5 There are a couple of tricky things going on here. First, the kernel
6 needs disk usage information in order to enforce the quota. However,
7 determining disk usage redundantly strains the disks. Thus, the
8 Sliver_VS.disk_usage_initialized flag is used to determine whether
9 this initialization has been made.
11 Second, it's not currently possible to set the scheduler parameters
12 for a sliver unless that sliver has a running process. /bin/vsh helps
13 us out by reading the configuration file so that it can set the
14 appropriate limits after entering the sliver context. Making the
15 syscall that actually sets the parameters gives a harmless error if no
16 process is running. Thus we keep vm_running on when setting scheduler
17 parameters so that set_sched_params() always makes the syscall, and we
18 don't have to guess if there is a running process or not.
26 from threading import BoundedSemaphore
29 # the util-vserver-pl module
36 # special constant that tells vserver to keep its existing settings
37 KEEP_LIMIT = vserver.VC_LIM_KEEP
39 # populate the sliver/vserver specific default allocations table,
40 # which is used to look for slice attributes
41 DEFAULT_ALLOCATION = {}
42 for rlimit in vserver.RLIMITS.keys():
44 DEFAULT_ALLOCATION["%s_min"%rlim]=KEEP_LIMIT
45 DEFAULT_ALLOCATION["%s_soft"%rlim]=KEEP_LIMIT
46 DEFAULT_ALLOCATION["%s_hard"%rlim]=KEEP_LIMIT
48 class Sliver_VS(accounts.Account, vserver.VServer):
49 """This class wraps vserver.VServer to make its interface closer to what we need."""
52 TYPE = 'sliver.VServer'
53 _init_disk_info_sem = BoundedSemaphore()
55 def __init__(self, rec):
57 logger.verbose ('sliver_vs: %s init'%name)
59 logger.log("sliver_vs: %s: first chance..."%name)
60 vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
61 except Exception, err:
62 if not isinstance(err, vserver.NoSuchVServer):
63 # Probably a bad vserver or vserver configuration file
64 logger.log_exc("sliver_vs:__init__ (first chance) %s",name=name)
65 logger.log('sliver_vs: %s: recreating bad vserver' % name)
67 self.create(name, rec)
68 logger.log("sliver_vs: %s: second chance..."%name)
69 vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
73 self.slice_id = rec['slice_id']
74 self.disk_usage_initialized = False
80 def create(name, rec = None):
81 logger.verbose('sliver_vs: %s: create'%name)
84 logger.log("sliver_vs: %s: ERROR - no vref attached, this is unexpected"%(name))
86 # band-aid for short period as old API doesn't have GetSliceFamily function
88 vref = "planetlab-f8-i386"
90 # used to look in /etc/planetlab/family,
91 # now relies on the 'GetSliceFamily' extra attribute in GetSlivers()
92 # which for legacy is still exposed here as the 'vref' key
94 # check the template exists -- there's probably a better way..
95 if not os.path.isdir ("/vservers/.vref/%s"%vref):
96 logger.log ("sliver_vs: %s: ERROR Could not create sliver - vreference image %s not found"%(name,vref))
101 (x,y,arch)=vref.split('-')
102 # mh, this of course applies when 'vref' is e.g. 'netflow'
103 # and that's not quite right
107 def personality (arch):
108 personality="linux32"
109 if arch.find("64")>=0:
110 personality="linux64"
114 if 'attributes' in rec and 'isolate_loopback' in rec['attributes'] and rec['attributes']['isolate_loopback'] == '1':
116 # logger.log_call(['/usr/sbin/vuseradd', '-t', vref, name, ], timeout=15*60)
117 logger.log_call(['/bin/bash','-x','/usr/sbin/vuseradd', extra, '-t', vref, name, ], timeout=15*60)
118 # export slicename to the slice in /etc/slicename
119 file('/vservers/%s/etc/slicename' % name, 'w').write(name)
120 file('/vservers/%s/etc/slicefamily' % name, 'w').write(vref)
121 # set personality: only if needed (if arch's differ)
122 if tools.root_context_arch() != arch:
123 file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch)+"\n")
124 logger.log('sliver_vs: %s: set personality to %s'%(name,personality(arch)))
128 # logger.log_call(['/usr/sbin/vuserdel', name, ])
129 logger.log_call(['/bin/bash','-x','/usr/sbin/vuserdel', name, ])
131 def configure(self, rec):
132 # in case we update nodemanager..
133 self.install_and_enable_vinit()
135 new_rspec = rec['_rspec']
136 if new_rspec != self.rspec:
137 self.rspec = new_rspec
140 new_initscript = rec['initscript']
141 if new_initscript != self.initscript:
142 self.initscript = new_initscript
143 # not used anymore, we always check against the installed script
144 #self.initscriptchanged = True
145 self.refresh_slice_vinit()
147 accounts.Account.configure(self, rec) # install ssh keys
149 # unconditionnally install and enable the generic vinit script
150 # mimicking chkconfig for enabling the generic vinit script
151 # this is hardwired for runlevel 3
152 def install_and_enable_vinit (self):
153 vinit_source="/usr/share/NodeManager/sliver-initscripts/vinit"
154 vinit_script="/vservers/%s/etc/rc.d/init.d/vinit"%self.name
155 rc3_link="/vservers/%s/etc/rc.d/rc3.d/S99vinit"%self.name
156 rc3_target="../init.d/vinit"
158 code=file(vinit_source).read()
159 if tools.replace_file_with_string(vinit_script,code,chmod=0755):
160 logger.log("vsliver_vs: %s: installed generic vinit rc script"%self.name)
161 # create symlink for runlevel 3
162 if not os.path.islink(rc3_link):
164 logger.log("vsliver_vs: %s: creating runlevel3 symlink %s"%(self.name,rc3_link))
165 os.symlink(rc3_target,rc3_link)
167 logger.log_exc("vsliver_vs: %s: failed to create runlevel3 symlink %s"%rc3_link)
169 def rerun_slice_vinit(self):
170 command = "/usr/sbin/vserver %s exec /etc/rc.d/init.d/vinit restart" % (self.name)
171 logger.log("vsliver_vs: %s: Rerunning slice initscript: %s" % (self.name, command))
172 subprocess.call(command + "&", stdin=open('/dev/null', 'r'), stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, shell=True)
174 # this one checks for the existence of the slice initscript
175 # install or remove the slice inistscript, as instructed by the initscript tag
176 def refresh_slice_vinit(self):
178 sliver_initscript="/vservers/%s/etc/rc.d/init.d/vinit.slice"%self.name
179 if tools.replace_file_with_string(sliver_initscript,code,remove_if_empty=True,chmod=0755):
181 logger.log("vsliver_vs: %s: Installed new initscript in %s"%(self.name,sliver_initscript))
182 if self.is_running():
183 # Only need to rerun the initscript if the vserver is
184 # already running. If the vserver isn't running, then the
185 # initscript will automatically be started by
186 # /etc/rc.d/vinit when the vserver is started.
187 self.rerun_slice_vinit()
189 logger.log("vsliver_vs: %s: Removed obsolete initscript %s"%(self.name,sliver_initscript))
191 # bind mount root side dir to sliver side
192 # needs to be done before sliver starts
193 def expose_ssh_dir (self):
195 root_ssh="/home/%s/.ssh"%self.name
196 sliver_ssh="/vservers/%s/home/%s/.ssh"%(self.name,self.name)
197 # any of both might not exist yet
198 for path in [root_ssh,sliver_ssh]:
199 if not os.path.exists (path):
201 if not os.path.isdir (path):
203 mounts=file('/proc/mounts').read()
204 if mounts.find(sliver_ssh)<0:
206 subprocess.call("mount --bind -o ro %s %s"%(root_ssh,sliver_ssh),shell=True)
207 logger.log("expose_ssh_dir: %s mounted into slice %s"%(root_ssh,self.name))
209 logger.log_exc("expose_ssh_dir with slice %s failed"%self.name)
211 def start(self, delay=0):
212 if self.rspec['enabled'] <= 0:
213 logger.log('sliver_vs: not starting %s, is not enabled'%self.name)
215 logger.log('sliver_vs: %s: starting in %d seconds' % (self.name, delay))
217 # the generic /etc/init.d/vinit script is permanently refreshed, and enabled
218 self.install_and_enable_vinit()
219 # expose .ssh for omf_friendly slivers
220 if 'omf_control' in self.rspec['tags']:
221 self.expose_ssh_dir()
222 # if a change has occured in the slice initscript, reflect this in /etc/init.d/vinit.slice
223 self.refresh_slice_vinit()
224 child_pid = os.fork()
226 # VServer.start calls fork() internally,
227 # so just close the nonstandard fds and fork once to avoid creating zombies
228 tools.close_nonstandard_fds()
229 vserver.VServer.start(self)
232 os.waitpid(child_pid, 0)
235 logger.log('sliver_vs: %s: stopping' % self.name)
236 vserver.VServer.stop(self)
238 def is_running(self):
239 return vserver.VServer.is_running(self)
241 def set_resources(self):
242 disk_max = self.rspec['disk_max']
243 logger.log('sliver_vs: %s: setting max disk usage to %d KiB' % (self.name, disk_max))
244 try: # if the sliver is over quota, .set_disk_limit will throw an exception
245 if not self.disk_usage_initialized:
246 self.vm_running = False
247 Sliver_VS._init_disk_info_sem.acquire()
248 logger.log('sliver_vs: %s: computing disk usage: beginning' % self.name)
249 # init_disk_info is inherited from VServer
250 try: self.init_disk_info()
251 finally: Sliver_VS._init_disk_info_sem.release()
252 logger.log('sliver_vs: %s: computing disk usage: ended' % self.name)
253 self.disk_usage_initialized = True
254 vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
256 logger.log_exc('sliver_vs: failed to set max disk usage',name=self.name)
258 # get/set the min/soft/hard values for all of the vserver
259 # related RLIMITS. Note that vserver currently only
260 # implements support for hard limits.
261 for limit in vserver.RLIMITS.keys():
263 minimum = self.rspec['%s_min'%type]
264 soft = self.rspec['%s_soft'%type]
265 hard = self.rspec['%s_hard'%type]
266 update = self.set_rlimit(limit, hard, soft, minimum)
268 logger.log('sliver_vs: %s: setting rlimit %s to (%d, %d, %d)'
269 % (self.name, type, hard, soft, minimum))
271 self.set_capabilities_config(self.rspec['capabilities'])
272 if self.rspec['capabilities']:
273 logger.log('sliver_vs: %s: setting capabilities to %s' % (self.name, self.rspec['capabilities']))
275 cpu_pct = self.rspec['cpu_pct']
276 cpu_share = self.rspec['cpu_share']
279 for key in self.rspec.keys():
280 if key.find('sysctl.') == 0:
281 sysctl=key.split('.')
283 # /etc/vservers/<guest>/sysctl/<id>/
284 dirname = "/etc/vservers/%s/sysctl/%s" % (self.name, count)
286 os.makedirs(dirname, 0755)
289 setting = open("%s/setting" % dirname, "w")
290 setting.write("%s\n" % key.lstrip("sysctl."))
292 value = open("%s/value" % dirname, "w")
293 value.write("%s\n" % self.rspec[key])
297 logger.log("sliver_vs: %s: writing %s=%s"%(self.name,key,self.rspec[key]))
299 logger.log("sliver_vs: %s: could not set %s=%s"%(self.name,key,self.rspec[key]))
300 logger.log("sliver_vs: %s: error = %s"%(self.name,e))
303 if self.rspec['enabled'] > 0:
305 logger.log('sliver_vs: %s: setting cpu reservation to %d%%' % (self.name, cpu_pct))
310 logger.log('sliver_vs: %s: setting cpu share to %d' % (self.name, cpu_share))
314 self.set_sched_config(cpu_pct, cpu_share)
315 # if IP address isn't set (even to 0.0.0.0), sliver won't be able to use network
316 if self.rspec['ip_addresses'] != '0.0.0.0':
317 logger.log('sliver_vs: %s: setting IP address(es) to %s' % \
318 (self.name, self.rspec['ip_addresses']))
320 if 'isolate_loopback' in self.rspec['tags']:
321 add_loopback = self.rspec['tags']['isolate_loopback'] != "1"
322 self.set_ipaddresses_config(self.rspec['ip_addresses'], add_loopback)
324 #logger.log("sliver_vs: %s: Setting name to %s" % (self.name, self.slice_id))
325 #self.setname(self.slice_id)
326 #logger.log("sliver_vs: %s: Storing slice id of %s for PlanetFlow" % (self.name, self.slice_id))
328 vserver_config_path = '/etc/vservers/%s'%self.name
329 if not os.path.exists (vserver_config_path):
330 os.makedirs (vserver_config_path)
331 file('%s/slice_id'%vserver_config_path, 'w').write("%d\n"%self.slice_id)
332 logger.log("sliver_vs: Recorded slice id %d for slice %s"%(self.slice_id,self.name))
334 logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name,str(e)))
336 logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e),name=self.name)
339 if self.enabled == False:
343 if False: # Does not work properly yet.
344 if self.have_limits_changed():
345 logger.log('sliver_vs: %s: limits have changed --- restarting' % self.name)
347 while self.is_running() and stopcount > 0:
351 stopcount = stopcount - 1
354 else: # tell vsh to disable remote login by setting CPULIMIT to 0
355 logger.log('sliver_vs: %s: disabling remote login' % self.name)
356 self.set_sched_config(0, 0)