3 There are a couple of tricky things going on here. First, the kernel
4 needs disk usage information in order to enforce the quota. However,
5 determining disk usage redundantly strains the disks. Thus, the
6 Sliver_VS.disk_usage_initialized flag is used to determine whether
7 this initialization has been made.
9 Second, it's not currently possible to set the scheduler parameters
10 for a sliver unless that sliver has a running process. /bin/vsh helps
11 us out by reading the configuration file so that it can set the
12 appropriate limits after entering the sliver context. Making the
13 syscall that actually sets the parameters gives a harmless error if no
14 process is running. Thus we keep vm_running on when setting scheduler
15 parameters so that set_sched_params() always makes the syscall, and we
16 don't have to guess if there is a running process or not.
30 from threading import BoundedSemaphore
33 globalsem = BoundedSemaphore()
35 # special constant that tells vserver to keep its existing settings
36 KEEP_LIMIT = vserver.VC_LIM_KEEP
38 # populate the sliver/vserver specific default allocations table,
39 # which is used to look for slice attributes
40 DEFAULT_ALLOCATION = {}
41 for rlimit in vserver.RLIMITS.keys():
43 DEFAULT_ALLOCATION["%s_min"%rlim]=KEEP_LIMIT
44 DEFAULT_ALLOCATION["%s_soft"%rlim]=KEEP_LIMIT
45 DEFAULT_ALLOCATION["%s_hard"%rlim]=KEEP_LIMIT
47 class Sliver_VS(accounts.Account, vserver.VServer):
48 """This class wraps vserver.VServer to make its interface closer to what we need."""
51 TYPE = 'sliver.VServer'
52 _init_disk_info_sem = globalsem
54 def __init__(self, rec):
55 logger.verbose ('initing Sliver_VS with name=%s'%rec['name'])
57 vserver.VServer.__init__(self, rec['name'],logfile='/var/log/nm')
58 except Exception, err:
59 if not isinstance(err, vserver.NoSuchVServer):
60 # Probably a bad vserver or vserver configuration file
61 logger.log_exc(self.name)
62 logger.log('%s: recreating bad vserver' % rec['name'])
63 self.destroy(rec['name'])
64 self.create(rec['name'], rec['vref'])
65 vserver.VServer.__init__(self, rec['name'],logfile='/var/log/nm')
70 self.slice_id = rec['slice_id']
71 self.disk_usage_initialized = False
72 self.initscriptchanged = False
77 def create(name, vref = None):
78 logger.verbose('Sliver_VS:create - name=%s'%name)
82 ### locating the right slicefamily
83 # this is a first draft, and more a proof of concept thing
84 # the idea is to parse vref for dash-separated wishes,
85 # and to project these against the defaults
86 # so e.g. if the default slice family (as found in /etc/planetlab/slicefamily)
87 # is planetlab-f8-i386, then here is what we get
88 # vref=x86_64 -> vuseradd -t planetlab-f8-x86_64
89 # vref=centos5 -> vuseradd -t planetlab-centos5-i386
90 # vref=centos5-onelab -> vuseradd -t onelab-centos5-i386
91 # vref=planetflow -> vuseradd -t planetflow-f8-i386
92 # vref=x86_64-planetflow -> vuseradd -t planetflow-f8-x86_64
95 default=file("/etc/planetlab/slicefamily").read().strip()
96 (pldistro,fcdistro,arch) = default.split("-")
98 known_archs = [ 'i386', 'x86_64' ]
99 known_fcdistros = [ 'centos5', 'f8', 'f9', 'f10', 'f11', 'f12' ]
100 # from the slice attribute: cut dashes and try to figure the meaning
101 slice_wishes = vref.split("-")
102 for wish in slice_wishes:
103 if wish in known_archs:
105 elif wish in known_fcdistros:
111 refname="-".join( (pldistro,fcdistro,arch) )
113 # check the template exists -- there's probably a better way..
114 if not os.path.isdir ("/vservers/.vref/%s"%refname):
115 logger.log("%s (%s) : vref %s not found, using default %s"%(
116 name,vref,refname,default))
118 # reset so arch is right
119 (pldistro,fcdistro,arch) = default.split("-")
120 # could check again, but as we have /etc/slicefamily
121 # there's probably no /vservers/.vref/default
124 # have not found slicefamily
125 logger.log("%s (%s): legacy node - using fallback vrefname 'default'"%(name,vref))
130 logger.log("%s (%s) : unexpected error follows - using 'default'"%(name,vref))
131 logger.log(traceback.format_exc())
135 def personality (arch):
136 personality="linux32"
137 if arch.find("64")>=0:
138 personality="linux64"
141 logger.log_call('/usr/sbin/vuseradd', '-t', refname, name)
142 # export slicename to the slice in /etc/slicename
143 file('/vservers/%s/etc/slicename' % name, 'w').write(name)
144 # set personality: only if needed (if arch's differ)
145 if tools.root_context_arch() != arch:
146 file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch))
147 logger.log('%s: set personality to %s'%(name,personality(arch)))
150 def destroy(name): logger.log_call('/usr/sbin/vuserdel', name)
152 def configure(self, rec):
153 new_rspec = rec['_rspec']
154 if new_rspec != self.rspec:
155 self.rspec = new_rspec
158 new_initscript = rec['initscript']
159 if new_initscript != self.initscript:
160 self.initscript = new_initscript
161 # not used anymore, we always check against the installed script
162 #self.initscriptchanged = True
163 self.refresh_slice_vinit()
165 accounts.Account.configure(self, rec) # install ssh keys
167 # unconditionnally install and enable the generic vinit script
168 # mimicking chkconfig for enabling the generic vinit script
169 # this is hardwired for runlevel 3
170 def install_and_enable_vinit (self):
171 vinit_source="/usr/share/NodeManager/sliver-initscripts/vinit"
172 vinit_script="/vservers/%s/etc/rc.d/init.d/vinit"%self.name
173 rc3_link="/vservers/%s/etc/rc.d/rc3.d/S99vinit"%self.name
174 rc3_target="../init.d/vinit"
176 body=file(vinit_source).read()
177 if tools.replace_file_with_string(vinit_script,body,chmod=0755):
178 logger.log("vsliver_vs: %s: installed generic vinit rc script"%self.name)
179 # create symlink for runlevel 3
180 if not os.path.islink(rc3_link):
182 logger.log("vsliver_vs: %s: creating runlevel3 symlink %s"%(self.name,rc3_link))
183 os.symlink(rc3_target,rc3_link)
185 logger.log_exc("vsliver_vs: %s: failed to create runlevel3 symlink %s"%rc3_link)
187 def rerun_slice_vinit(self):
188 command = "/usr/sbin/vserver %s exec /etc/rc.d/init.d/vinit.slice restart %s" % (self.name, self.name)
190 logger.log("vsliver_vs: %s: Rerunning slice initscript: %s" % (self.name, command))
191 subprocess.call(command + "&", stdin=open('/dev/null', 'r'), stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, shell=True)
193 # this one checks for the existence of the slice initscript
194 # install or remove the slice inistscript, as instructed by the initscript tag
195 def refresh_slice_vinit(self):
197 sliver_initscript="/vservers/%s/etc/rc.d/init.d/vinit.slice"%self.name
198 if tools.replace_file_with_string(sliver_initscript,body,remove_if_empty=True,chmod=0755):
200 logger.log("vsliver_vs: %s: Installed new initscript in %s"%(self.name,sliver_initscript))
201 if self.is_running():
202 # Only need to rerun the initscript if the vserver is
203 # already running. If the vserver isn't running, then the
204 # initscript will automatically be started by
205 # /etc/rc.d/vinit when the vserver is started.
206 self.rerun_slice_vinit()
208 logger.log("vsliver_vs: %s: Removed obsolete initscript %s"%(self.name,sliver_initscript))
210 def start(self, delay=0):
211 if self.rspec['enabled'] <= 0:
212 logger.log('sliver_vs: not starting %s, is not enabled'%self.name)
214 logger.log('sliver_vs: %s: starting in %d seconds' % (self.name, delay))
216 # the generic /etc/init.d/vinit script is permanently refreshed, and enabled
217 self.install_and_enable_vinit()
218 # if a change has occured in the slice initscript, reflect this in /etc/init.d/vinit.slice
219 self.refresh_slice_vinit()
220 child_pid = os.fork()
222 # VServer.start calls fork() internally,
223 # so just close the nonstandard fds and fork once to avoid creating zombies
224 tools.close_nonstandard_fds()
225 vserver.VServer.start(self)
228 os.waitpid(child_pid, 0)
231 logger.log('%s: stopping' % self.name)
232 vserver.VServer.stop(self)
234 def is_running(self):
235 return vserver.VServer.is_running(self)
237 def set_resources(self):
238 disk_max = self.rspec['disk_max']
239 logger.log('%s: setting max disk usage to %d KiB' % (self.name, disk_max))
240 try: # if the sliver is over quota, .set_disk_limit will throw an exception
241 if not self.disk_usage_initialized:
242 self.vm_running = False
243 Sliver_VS._init_disk_info_sem.acquire()
244 logger.log('%s: computing disk usage: beginning' % self.name)
245 try: self.init_disk_info()
246 finally: Sliver_VS._init_disk_info_sem.release()
247 logger.log('%s: computing disk usage: ended' % self.name)
248 self.disk_usage_initialized = True
249 vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
251 logger.log('%s: failed to set max disk usage' % self.name)
252 logger.log_exc(self.name)
254 # get/set the min/soft/hard values for all of the vserver
255 # related RLIMITS. Note that vserver currently only
256 # implements support for hard limits.
257 for limit in vserver.RLIMITS.keys():
259 minimum = self.rspec['%s_min'%type]
260 soft = self.rspec['%s_soft'%type]
261 hard = self.rspec['%s_hard'%type]
262 update = self.set_rlimit(limit, hard, soft, minimum)
264 logger.log('%s: setting rlimit %s to (%d, %d, %d)'
265 % (self.name, type, hard, soft, minimum))
267 self.set_capabilities_config(self.rspec['capabilities'])
268 if self.rspec['capabilities']:
269 logger.log('%s: setting capabilities to %s' % (self.name, self.rspec['capabilities']))
271 cpu_pct = self.rspec['cpu_pct']
272 cpu_share = self.rspec['cpu_share']
275 for key in self.rspec.keys():
276 if key.find('sysctl.') == 0:
277 sysctl=key.split('.')
279 # /etc/vservers/<guest>/sysctl/<id>/
280 dirname = "/etc/vservers/%s/sysctl/%s" % (self.name, count)
282 os.makedirs(dirname, 0755)
285 setting = open("%s/setting" % dirname, "w")
286 setting.write("%s\n" % key.lstrip("sysctl."))
288 value = open("%s/value" % dirname, "w")
289 value.write("%s\n" % self.rspec[key])
293 logger.log("sliver_vs: %s: writing %s=%s"%(self.name,key,self.rspec[key]))
295 logger.log("sliver_vs: %s: could not set %s=%s"%(self.name,key,self.rspec[key]))
296 logger.log("sliver_vs: %s: error = %s"%(self.name,e))
299 if self.rspec['enabled'] > 0:
301 logger.log('%s: setting cpu reservation to %d%%' % (self.name, cpu_pct))
306 logger.log('%s: setting cpu share to %d' % (self.name, cpu_share))
310 self.set_sched_config(cpu_pct, cpu_share)
311 # if IP address isn't set (even to 0.0.0.0), sliver won't be able to use network
312 if self.rspec['ip_addresses'] != '0.0.0.0':
313 logger.log('%s: setting IP address(es) to %s' % \
314 (self.name, self.rspec['ip_addresses']))
315 self.set_ipaddresses_config(self.rspec['ip_addresses'])
318 vserver_config_path = '/etc/vservers/%s'%self.name
319 if not os.path.exists (vserver_config_path):
320 os.makedirs (vserver_config_path)
321 file('%s/slice_id'%vserver_config_path, 'w').write("%d"%self.slice_id)
322 logger.log("sliver_vs: Recorded slice id %d for slice %s"%(self.slice_id,self.name))
324 logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name,str(e)))
326 logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e),name=self.name)
328 if self.enabled == False:
332 if False: # Does not work properly yet.
333 if self.have_limits_changed():
334 logger.log('%s: limits have changed --- restarting' % self.name)
336 while self.is_running() and stopcount > 0:
340 stopcount = stopcount - 1
343 else: # tell vsh to disable remote login by setting CPULIMIT to 0
344 logger.log('%s: disabling remote login' % self.name)
345 self.set_sched_config(0, 0)