5 There are a couple of tricky things going on here. First, the kernel
6 needs disk usage information in order to enforce the quota. However,
7 determining disk usage redundantly strains the disks. Thus, the
8 Sliver_VS.disk_usage_initialized flag is used to determine whether
9 this initialization has been made.
11 Second, it's not currently possible to set the scheduler parameters
12 for a sliver unless that sliver has a running process. /bin/vsh helps
13 us out by reading the configuration file so that it can set the
14 appropriate limits after entering the sliver context. Making the
15 syscall that actually sets the parameters gives a harmless error if no
16 process is running. Thus we keep vm_running on when setting scheduler
17 parameters so that set_sched_params() always makes the syscall, and we
18 don't have to guess if there is a running process or not.
26 from threading import BoundedSemaphore
29 # the util-vserver-pl module
36 # special constant that tells vserver to keep its existing settings
37 KEEP_LIMIT = vserver.VC_LIM_KEEP
39 # populate the sliver/vserver specific default allocations table,
40 # which is used to look for slice attributes
41 DEFAULT_ALLOCATION = {}
42 for rlimit in vserver.RLIMITS.keys():
44 DEFAULT_ALLOCATION["%s_min"%rlim]=KEEP_LIMIT
45 DEFAULT_ALLOCATION["%s_soft"%rlim]=KEEP_LIMIT
46 DEFAULT_ALLOCATION["%s_hard"%rlim]=KEEP_LIMIT
48 class Sliver_VS(account.Account, vserver.VServer):
49 """This class wraps vserver.VServer to make its interface closer to what we need."""
52 TYPE = 'sliver.VServer'
53 _init_disk_info_sem = BoundedSemaphore()
55 def __init__(self, rec):
57 logger.verbose ('sliver_vs: %s init'%name)
59 logger.log("sliver_vs: %s: first chance..."%name)
60 vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
61 except Exception, err:
62 if not isinstance(err, vserver.NoSuchVServer):
63 # Probably a bad vserver or vserver configuration file
64 logger.log_exc("sliver_vs:__init__ (first chance) %s",name=name)
65 logger.log('sliver_vs: %s: recreating bad vserver' % name)
67 self.create(name, rec)
68 logger.log("sliver_vs: %s: second chance..."%name)
69 vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
73 self.slice_id = rec['slice_id']
74 self.disk_usage_initialized = False
80 def create(name, rec = None):
81 logger.verbose('sliver_vs: %s: create'%name)
84 logger.log("sliver_vs: %s: ERROR - no vref attached, this is unexpected"%(name))
86 # band-aid for short period as old API doesn't have GetSliceFamily function
88 vref = "planetlab-f8-i386"
90 # used to look in /etc/planetlab/family,
91 # now relies on the 'GetSliceFamily' extra attribute in GetSlivers()
92 # which for legacy is still exposed here as the 'vref' key
94 # check the template exists -- there's probably a better way..
95 if not os.path.isdir ("/vservers/.vref/%s"%vref):
96 logger.log ("sliver_vs: %s: ERROR Could not create sliver - vreference image %s not found"%(name,vref))
101 (x,y,arch)=vref.split('-')
102 # mh, this of course applies when 'vref' is e.g. 'netflow'
103 # and that's not quite right
107 def personality (arch):
108 personality="linux32"
109 if arch.find("64")>=0:
110 personality="linux64"
115 command += ['/bin/bash','-x',]
116 command += ['/usr/sbin/vuseradd', ]
117 if 'attributes' in rec and 'isolate_loopback' in rec['attributes'] and rec['attributes']['isolate_loopback'] == '1':
119 # the vsliver imge to use
120 command += [ '-t', vref, ]
123 # logger.log_call(['/usr/sbin/vuseradd', '-t', vref, name, ], timeout=15*60)
124 logger.log_call(command, timeout=15*60)
125 # export slicename to the slice in /etc/slicename
126 file('/vservers/%s/etc/slicename' % name, 'w').write(name)
127 file('/vservers/%s/etc/slicefamily' % name, 'w').write(vref)
128 # set personality: only if needed (if arch's differ)
129 if tools.root_context_arch() != arch:
130 file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch)+"\n")
131 logger.log('sliver_vs: %s: set personality to %s'%(name,personality(arch)))
135 # logger.log_call(['/usr/sbin/vuserdel', name, ])
136 logger.log_call(['/bin/bash','-x','/usr/sbin/vuserdel', name, ])
138 def configure(self, rec):
139 # in case we update nodemanager..
140 self.install_and_enable_vinit()
142 new_rspec = rec['_rspec']
143 if new_rspec != self.rspec:
144 self.rspec = new_rspec
147 new_initscript = rec['initscript']
148 if new_initscript != self.initscript:
149 self.initscript = new_initscript
150 # not used anymore, we always check against the installed script
151 #self.initscriptchanged = True
152 self.refresh_slice_vinit()
154 account.Account.configure(self, rec) # install ssh keys
156 # unconditionnally install and enable the generic vinit script
157 # mimicking chkconfig for enabling the generic vinit script
158 # this is hardwired for runlevel 3
159 def install_and_enable_vinit (self):
160 vinit_source="/usr/share/NodeManager/sliver-initscripts/vinit"
161 vinit_script="/vservers/%s/etc/rc.d/init.d/vinit"%self.name
162 rc3_link="/vservers/%s/etc/rc.d/rc3.d/S99vinit"%self.name
163 rc3_target="../init.d/vinit"
165 code=file(vinit_source).read()
166 if tools.replace_file_with_string(vinit_script,code,chmod=0755):
167 logger.log("vsliver_vs: %s: installed generic vinit rc script"%self.name)
168 # create symlink for runlevel 3
169 if not os.path.islink(rc3_link):
171 logger.log("vsliver_vs: %s: creating runlevel3 symlink %s"%(self.name,rc3_link))
172 os.symlink(rc3_target,rc3_link)
174 logger.log_exc("vsliver_vs: %s: failed to create runlevel3 symlink %s"%rc3_link)
176 def rerun_slice_vinit(self):
177 command = "/usr/sbin/vserver %s exec /etc/rc.d/init.d/vinit restart" % (self.name)
178 logger.log("vsliver_vs: %s: Rerunning slice initscript: %s" % (self.name, command))
179 subprocess.call(command + "&", stdin=open('/dev/null', 'r'), stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, shell=True)
181 # this one checks for the existence of the slice initscript
182 # install or remove the slice inistscript, as instructed by the initscript tag
183 def refresh_slice_vinit(self):
185 sliver_initscript="/vservers/%s/etc/rc.d/init.d/vinit.slice"%self.name
186 if tools.replace_file_with_string(sliver_initscript,code,remove_if_empty=True,chmod=0755):
188 logger.log("vsliver_vs: %s: Installed new initscript in %s"%(self.name,sliver_initscript))
189 if self.is_running():
190 # Only need to rerun the initscript if the vserver is
191 # already running. If the vserver isn't running, then the
192 # initscript will automatically be started by
193 # /etc/rc.d/vinit when the vserver is started.
194 self.rerun_slice_vinit()
196 logger.log("vsliver_vs: %s: Removed obsolete initscript %s"%(self.name,sliver_initscript))
198 def start(self, delay=0):
199 if self.rspec['enabled'] <= 0:
200 logger.log('sliver_vs: not starting %s, is not enabled'%self.name)
202 logger.log('sliver_vs: %s: starting in %d seconds' % (self.name, delay))
204 # the generic /etc/init.d/vinit script is permanently refreshed, and enabled
205 self.install_and_enable_vinit()
206 # expose .ssh for omf_friendly slivers
207 if 'omf_control' in self.rspec['tags']:
208 self.expose_ssh_dir()
209 # if a change has occured in the slice initscript, reflect this in /etc/init.d/vinit.slice
210 self.refresh_slice_vinit()
211 child_pid = os.fork()
213 # VServer.start calls fork() internally,
214 # so just close the nonstandard fds and fork once to avoid creating zombies
215 tools.close_nonstandard_fds()
216 vserver.VServer.start(self)
219 os.waitpid(child_pid, 0)
222 logger.log('sliver_vs: %s: stopping' % self.name)
223 vserver.VServer.stop(self)
225 def is_running(self):
226 return vserver.VServer.is_running(self)
228 def set_resources(self):
229 disk_max = self.rspec['disk_max']
230 logger.log('sliver_vs: %s: setting max disk usage to %d KiB' % (self.name, disk_max))
231 try: # if the sliver is over quota, .set_disk_limit will throw an exception
232 if not self.disk_usage_initialized:
233 self.vm_running = False
234 Sliver_VS._init_disk_info_sem.acquire()
235 logger.log('sliver_vs: %s: computing disk usage: beginning' % self.name)
236 # init_disk_info is inherited from VServer
237 try: self.init_disk_info()
238 finally: Sliver_VS._init_disk_info_sem.release()
239 logger.log('sliver_vs: %s: computing disk usage: ended' % self.name)
240 self.disk_usage_initialized = True
241 vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
243 logger.log_exc('sliver_vs: failed to set max disk usage',name=self.name)
245 # get/set the min/soft/hard values for all of the vserver
246 # related RLIMITS. Note that vserver currently only
247 # implements support for hard limits.
248 for limit in vserver.RLIMITS.keys():
250 minimum = self.rspec['%s_min'%type]
251 soft = self.rspec['%s_soft'%type]
252 hard = self.rspec['%s_hard'%type]
253 update = self.set_rlimit(limit, hard, soft, minimum)
255 logger.log('sliver_vs: %s: setting rlimit %s to (%d, %d, %d)'
256 % (self.name, type, hard, soft, minimum))
258 self.set_capabilities_config(self.rspec['capabilities'])
259 if self.rspec['capabilities']:
260 logger.log('sliver_vs: %s: setting capabilities to %s' % (self.name, self.rspec['capabilities']))
262 cpu_pct = self.rspec['cpu_pct']
263 cpu_share = self.rspec['cpu_share']
266 for key in self.rspec.keys():
267 if key.find('sysctl.') == 0:
268 sysctl=key.split('.')
270 # /etc/vservers/<guest>/sysctl/<id>/
271 dirname = "/etc/vservers/%s/sysctl/%s" % (self.name, count)
273 os.makedirs(dirname, 0755)
276 setting = open("%s/setting" % dirname, "w")
277 setting.write("%s\n" % key.lstrip("sysctl."))
279 value = open("%s/value" % dirname, "w")
280 value.write("%s\n" % self.rspec[key])
284 logger.log("sliver_vs: %s: writing %s=%s"%(self.name,key,self.rspec[key]))
286 logger.log("sliver_vs: %s: could not set %s=%s"%(self.name,key,self.rspec[key]))
287 logger.log("sliver_vs: %s: error = %s"%(self.name,e))
290 if self.rspec['enabled'] > 0:
292 logger.log('sliver_vs: %s: setting cpu reservation to %d%%' % (self.name, cpu_pct))
297 logger.log('sliver_vs: %s: setting cpu share to %d' % (self.name, cpu_share))
301 self.set_sched_config(cpu_pct, cpu_share)
302 # if IP address isn't set (even to 0.0.0.0), sliver won't be able to use network
303 if self.rspec['ip_addresses'] != '0.0.0.0':
304 logger.log('sliver_vs: %s: setting IP address(es) to %s' % \
305 (self.name, self.rspec['ip_addresses']))
307 if 'isolate_loopback' in self.rspec['tags']:
308 add_loopback = self.rspec['tags']['isolate_loopback'] != "1"
309 self.set_ipaddresses_config(self.rspec['ip_addresses'], add_loopback)
311 #logger.log("sliver_vs: %s: Setting name to %s" % (self.name, self.slice_id))
312 #self.setname(self.slice_id)
313 #logger.log("sliver_vs: %s: Storing slice id of %s for PlanetFlow" % (self.name, self.slice_id))
315 vserver_config_path = '/etc/vservers/%s'%self.name
316 if not os.path.exists (vserver_config_path):
317 os.makedirs (vserver_config_path)
318 file('%s/slice_id'%vserver_config_path, 'w').write("%d\n"%self.slice_id)
319 logger.log("sliver_vs: Recorded slice id %d for slice %s"%(self.slice_id,self.name))
321 logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name,str(e)))
323 logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e),name=self.name)
326 if self.enabled == False:
330 if False: # Does not work properly yet.
331 if self.have_limits_changed():
332 logger.log('sliver_vs: %s: limits have changed --- restarting' % self.name)
334 while self.is_running() and stopcount > 0:
338 stopcount = stopcount - 1
341 else: # tell vsh to disable remote login by setting CPULIMIT to 0
342 logger.log('sliver_vs: %s: disabling remote login' % self.name)
343 self.set_sched_config(0, 0)