5 There are a couple of tricky things going on here. First, the kernel
6 needs disk usage information in order to enforce the quota. However,
7 determining disk usage redundantly strains the disks. Thus, the
8 Sliver_VS.disk_usage_initialized flag is used to determine whether
9 this initialization has been made.
11 Second, it's not currently possible to set the scheduler parameters
12 for a sliver unless that sliver has a running process. /bin/vsh helps
13 us out by reading the configuration file so that it can set the
14 appropriate limits after entering the sliver context. Making the
15 syscall that actually sets the parameters gives a harmless error if no
16 process is running. Thus we keep vm_running on when setting scheduler
17 parameters so that set_sched_params() always makes the syscall, and we
18 don't have to guess if there is a running process or not.
26 from threading import BoundedSemaphore
29 # the util-vserver-pl module
36 # special constant that tells vserver to keep its existing settings
37 KEEP_LIMIT = vserver.VC_LIM_KEEP
39 # populate the sliver/vserver specific default allocations table,
40 # which is used to look for slice attributes
41 DEFAULT_ALLOCATION = {}
42 for rlimit in vserver.RLIMITS.keys():
44 DEFAULT_ALLOCATION["%s_min"%rlim]=KEEP_LIMIT
45 DEFAULT_ALLOCATION["%s_soft"%rlim]=KEEP_LIMIT
46 DEFAULT_ALLOCATION["%s_hard"%rlim]=KEEP_LIMIT
48 class Sliver_VS(account.Account, vserver.VServer):
49 """This class wraps vserver.VServer to make its interface closer to what we need."""
52 TYPE = 'sliver.VServer'
53 _init_disk_info_sem = BoundedSemaphore()
55 def __init__(self, rec):
57 logger.verbose ('sliver_vs: %s init'%name)
59 logger.log("sliver_vs: %s: first chance..."%name)
60 vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
61 except Exception, err:
62 if not isinstance(err, vserver.NoSuchVServer):
63 # Probably a bad vserver or vserver configuration file
64 logger.log_exc("sliver_vs:__init__ (first chance) %s",name=name)
65 logger.log('sliver_vs: %s: recreating bad vserver' % name)
67 self.create(name, rec)
68 logger.log("sliver_vs: %s: second chance..."%name)
69 vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
72 self.slice_id = rec['slice_id']
73 self.disk_usage_initialized = False
79 def create(name, rec = None):
80 logger.verbose('sliver_vs: %s: create'%name)
83 logger.log("sliver_vs: %s: ERROR - no vref attached, this is unexpected"%(name))
85 # band-aid for short period as old API doesn't have GetSliceFamily function
87 vref = "planetlab-f8-i386"
89 # used to look in /etc/planetlab/family,
90 # now relies on the 'GetSliceFamily' extra attribute in GetSlivers()
91 # which for legacy is still exposed here as the 'vref' key
93 # check the template exists -- there's probably a better way..
94 if not os.path.isdir ("/vservers/.vref/%s"%vref):
95 logger.log ("sliver_vs: %s: ERROR Could not create sliver - vreference image %s not found"%(name,vref))
100 (x,y,arch)=vref.split('-')
101 # mh, this of course applies when 'vref' is e.g. 'netflow'
102 # and that's not quite right
106 def personality (arch):
107 personality="linux32"
108 if arch.find("64")>=0:
109 personality="linux64"
114 command += ['/bin/bash','-x',]
115 command += ['/usr/sbin/vuseradd', ]
116 if 'attributes' in rec and 'isolate_loopback' in rec['attributes'] and rec['attributes']['isolate_loopback'] == '1':
118 # the vsliver imge to use
119 command += [ '-t', vref, ]
122 # logger.log_call(['/usr/sbin/vuseradd', '-t', vref, name, ], timeout=15*60)
123 logger.log_call(command, timeout=15*60)
124 # export slicename to the slice in /etc/slicename
125 file('/vservers/%s/etc/slicename' % name, 'w').write(name)
126 file('/vservers/%s/etc/slicefamily' % name, 'w').write(vref)
127 # set personality: only if needed (if arch's differ)
128 if tools.root_context_arch() != arch:
129 file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch)+"\n")
130 logger.log('sliver_vs: %s: set personality to %s'%(name,personality(arch)))
134 # logger.log_call(['/usr/sbin/vuserdel', name, ])
135 logger.log_call(['/bin/bash','-x','/usr/sbin/vuserdel', name, ])
137 def configure(self, rec):
138 # in case we update nodemanager..
139 self.install_and_enable_vinit()
141 new_rspec = rec['_rspec']
142 if new_rspec != self.rspec:
143 self.rspec = new_rspec
146 new_initscript = rec['initscript']
147 if new_initscript != self.initscript:
148 self.initscript = new_initscript
149 # not used anymore, we always check against the installed script
150 #self.initscriptchanged = True
151 self.refresh_slice_vinit()
153 account.Account.configure(self, rec) # install ssh keys
155 # unconditionnally install and enable the generic vinit script
156 # mimicking chkconfig for enabling the generic vinit script
157 # this is hardwired for runlevel 3
158 def install_and_enable_vinit (self):
159 vinit_source="/usr/share/NodeManager/sliver-initscripts/vinit"
160 vinit_script="/vservers/%s/etc/rc.d/init.d/vinit"%self.name
161 rc3_link="/vservers/%s/etc/rc.d/rc3.d/S99vinit"%self.name
162 rc3_target="../init.d/vinit"
164 code=file(vinit_source).read()
165 if tools.replace_file_with_string(vinit_script,code,chmod=0755):
166 logger.log("vsliver_vs: %s: installed generic vinit rc script"%self.name)
167 # create symlink for runlevel 3
168 if not os.path.islink(rc3_link):
170 logger.log("vsliver_vs: %s: creating runlevel3 symlink %s"%(self.name,rc3_link))
171 os.symlink(rc3_target,rc3_link)
173 logger.log_exc("vsliver_vs: %s: failed to create runlevel3 symlink %s"%rc3_link)
175 def rerun_slice_vinit(self):
176 command = "/usr/sbin/vserver %s exec /etc/rc.d/init.d/vinit restart" % (self.name)
177 logger.log("vsliver_vs: %s: Rerunning slice initscript: %s" % (self.name, command))
178 subprocess.call(command + "&", stdin=open('/dev/null', 'r'), stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, shell=True)
180 # this one checks for the existence of the slice initscript
181 # install or remove the slice inistscript, as instructed by the initscript tag
182 def refresh_slice_vinit(self):
184 sliver_initscript="/vservers/%s/etc/rc.d/init.d/vinit.slice"%self.name
185 if tools.replace_file_with_string(sliver_initscript,code,remove_if_empty=True,chmod=0755):
187 logger.log("vsliver_vs: %s: Installed new initscript in %s"%(self.name,sliver_initscript))
188 if self.is_running():
189 # Only need to rerun the initscript if the vserver is
190 # already running. If the vserver isn't running, then the
191 # initscript will automatically be started by
192 # /etc/rc.d/vinit when the vserver is started.
193 self.rerun_slice_vinit()
195 logger.log("vsliver_vs: %s: Removed obsolete initscript %s"%(self.name,sliver_initscript))
197 def start(self, delay=0):
198 if self.rspec['enabled'] <= 0:
199 logger.log('sliver_vs: not starting %s, is not enabled'%self.name)
201 logger.log('sliver_vs: %s: starting in %d seconds' % (self.name, delay))
203 # the generic /etc/init.d/vinit script is permanently refreshed, and enabled
204 self.install_and_enable_vinit()
205 # expose .ssh for omf_friendly slivers
206 if 'omf_control' in self.rspec['tags']:
207 self.expose_ssh_dir()
208 # if a change has occured in the slice initscript, reflect this in /etc/init.d/vinit.slice
209 self.refresh_slice_vinit()
210 child_pid = os.fork()
212 # VServer.start calls fork() internally,
213 # so just close the nonstandard fds and fork once to avoid creating zombies
214 tools.close_nonstandard_fds()
215 vserver.VServer.start(self)
218 os.waitpid(child_pid, 0)
221 logger.log('sliver_vs: %s: stopping' % self.name)
222 vserver.VServer.stop(self)
224 def is_running(self):
225 return vserver.VServer.is_running(self)
227 def set_resources(self):
228 disk_max = self.rspec['disk_max']
229 logger.log('sliver_vs: %s: setting max disk usage to %d KiB' % (self.name, disk_max))
230 try: # if the sliver is over quota, .set_disk_limit will throw an exception
231 if not self.disk_usage_initialized:
232 self.vm_running = False
233 Sliver_VS._init_disk_info_sem.acquire()
234 logger.log('sliver_vs: %s: computing disk usage: beginning' % self.name)
235 # init_disk_info is inherited from VServer
236 try: self.init_disk_info()
237 finally: Sliver_VS._init_disk_info_sem.release()
238 logger.log('sliver_vs: %s: computing disk usage: ended' % self.name)
239 self.disk_usage_initialized = True
240 vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
242 logger.log_exc('sliver_vs: failed to set max disk usage',name=self.name)
244 # get/set the min/soft/hard values for all of the vserver
245 # related RLIMITS. Note that vserver currently only
246 # implements support for hard limits.
247 for limit in vserver.RLIMITS.keys():
249 minimum = self.rspec['%s_min'%type]
250 soft = self.rspec['%s_soft'%type]
251 hard = self.rspec['%s_hard'%type]
252 update = self.set_rlimit(limit, hard, soft, minimum)
254 logger.log('sliver_vs: %s: setting rlimit %s to (%d, %d, %d)'
255 % (self.name, type, hard, soft, minimum))
257 self.set_capabilities_config(self.rspec['capabilities'])
258 if self.rspec['capabilities']:
259 logger.log('sliver_vs: %s: setting capabilities to %s' % (self.name, self.rspec['capabilities']))
261 cpu_pct = self.rspec['cpu_pct']
262 cpu_share = self.rspec['cpu_share']
265 for key in self.rspec.keys():
266 if key.find('sysctl.') == 0:
267 sysctl=key.split('.')
269 # /etc/vservers/<guest>/sysctl/<id>/
270 dirname = "/etc/vservers/%s/sysctl/%s" % (self.name, count)
272 os.makedirs(dirname, 0755)
275 setting = open("%s/setting" % dirname, "w")
276 setting.write("%s\n" % key.lstrip("sysctl."))
278 value = open("%s/value" % dirname, "w")
279 value.write("%s\n" % self.rspec[key])
283 logger.log("sliver_vs: %s: writing %s=%s"%(self.name,key,self.rspec[key]))
285 logger.log("sliver_vs: %s: could not set %s=%s"%(self.name,key,self.rspec[key]))
286 logger.log("sliver_vs: %s: error = %s"%(self.name,e))
289 if self.rspec['enabled'] > 0:
291 logger.log('sliver_vs: %s: setting cpu reservation to %d%%' % (self.name, cpu_pct))
296 logger.log('sliver_vs: %s: setting cpu share to %d' % (self.name, cpu_share))
300 self.set_sched_config(cpu_pct, cpu_share)
301 # if IP address isn't set (even to 0.0.0.0), sliver won't be able to use network
302 if self.rspec['ip_addresses'] != '0.0.0.0':
303 logger.log('sliver_vs: %s: setting IP address(es) to %s' % \
304 (self.name, self.rspec['ip_addresses']))
306 if 'isolate_loopback' in self.rspec['tags']:
307 add_loopback = self.rspec['tags']['isolate_loopback'] != "1"
308 self.set_ipaddresses_config(self.rspec['ip_addresses'], add_loopback)
310 #logger.log("sliver_vs: %s: Setting name to %s" % (self.name, self.slice_id))
311 #self.setname(self.slice_id)
312 #logger.log("sliver_vs: %s: Storing slice id of %s for PlanetFlow" % (self.name, self.slice_id))
314 vserver_config_path = '/etc/vservers/%s'%self.name
315 if not os.path.exists (vserver_config_path):
316 os.makedirs (vserver_config_path)
317 file('%s/slice_id'%vserver_config_path, 'w').write("%d\n"%self.slice_id)
318 logger.log("sliver_vs: Recorded slice id %d for slice %s"%(self.slice_id,self.name))
320 logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name,str(e)))
322 logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e),name=self.name)
325 if self.enabled == False:
329 if False: # Does not work properly yet.
330 if self.have_limits_changed():
331 logger.log('sliver_vs: %s: limits have changed --- restarting' % self.name)
333 while self.is_running() and stopcount > 0:
337 stopcount = stopcount - 1
340 else: # tell vsh to disable remote login by setting CPULIMIT to 0
341 logger.log('sliver_vs: %s: disabling remote login' % self.name)
342 self.set_sched_config(0, 0)