3 There are a couple of tricky things going on here. First, the kernel
4 needs disk usage information in order to enforce the quota. However,
5 determining disk usage redundantly strains the disks. Thus, the
6 Sliver_VS.disk_usage_initialized flag is used to determine whether
7 this initialization has been made.
9 Second, it's not currently possible to set the scheduler parameters
10 for a sliver unless that sliver has a running process. /bin/vsh helps
11 us out by reading the configuration file so that it can set the
12 appropriate limits after entering the sliver context. Making the
13 syscall that actually sets the parameters gives a harmless error if no
14 process is running. Thus we keep vm_running on when setting scheduler
15 parameters so that set_sched_params() always makes the syscall, and we
16 don't have to guess if there is a running process or not.
30 from threading import BoundedSemaphore
33 globalsem = BoundedSemaphore()
35 # special constant that tells vserver to keep its existing settings
36 KEEP_LIMIT = vserver.VC_LIM_KEEP
38 # populate the sliver/vserver specific default allocations table,
39 # which is used to look for slice attributes
40 DEFAULT_ALLOCATION = {}
41 for rlimit in vserver.RLIMITS.keys():
43 DEFAULT_ALLOCATION["%s_min"%rlim]=KEEP_LIMIT
44 DEFAULT_ALLOCATION["%s_soft"%rlim]=KEEP_LIMIT
45 DEFAULT_ALLOCATION["%s_hard"%rlim]=KEEP_LIMIT
47 class Sliver_VS(accounts.Account, vserver.VServer):
48 """This class wraps vserver.VServer to make its interface closer to what we need."""
51 TYPE = 'sliver.VServer'
52 _init_disk_info_sem = globalsem
54 def __init__(self, rec):
55 logger.verbose ('initing Sliver_VS with name=%s'%rec['name'])
57 vserver.VServer.__init__(self, rec['name'],logfile='/var/log/nm')
58 except Exception, err:
59 if not isinstance(err, vserver.NoSuchVServer):
60 # Probably a bad vserver or vserver configuration file
61 logger.log_exc(self.name)
62 logger.log('%s: recreating bad vserver' % rec['name'])
63 self.destroy(rec['name'])
64 self.create(rec['name'], rec['vref'])
65 vserver.VServer.__init__(self, rec['name'],logfile='/var/log/nm')
70 self.slice_id = rec['slice_id']
71 self.disk_usage_initialized = False
72 self.initscriptchanged = False
77 def create(name, vref = None):
78 logger.verbose('Sliver_VS:create - name=%s'%name)
82 ### locating the right slicefamily
83 # this is a first draft, and more a proof of concept thing
84 # the idea is to parse vref for dash-separated wishes,
85 # and to project these against the defaults
86 # so e.g. if the default slice family (as found in /etc/planetlab/slicefamily)
87 # is planetlab-f8-i386, then here is what we get
88 # vref=x86_64 -> vuseradd -t planetlab-f8-x86_64
89 # vref=centos5 -> vuseradd -t planetlab-centos5-i386
90 # vref=centos5-onelab -> vuseradd -t onelab-centos5-i386
91 # vref=planetflow -> vuseradd -t planetflow-f8-i386
92 # vref=x86_64-planetflow -> vuseradd -t planetflow-f8-x86_64
95 default=file("/etc/planetlab/slicefamily").read().strip()
96 (pldistro,fcdistro,arch) = default.split("-")
98 known_archs = [ 'i386', 'x86_64' ]
99 known_fcdistros = [ 'centos5', 'f8', 'f9', 'f10', 'f11', 'f12' ]
100 # from the slice attribute: cut dashes and try to figure the meaning
101 slice_wishes = vref.split("-")
102 for wish in slice_wishes:
103 if wish in known_archs:
105 elif wish in known_fcdistros:
111 refname="-".join( (pldistro,fcdistro,arch) )
113 # check the template exists -- there's probably a better way..
114 if not os.path.isdir ("/vservers/.vref/%s"%refname):
115 logger.log("%s (%s) : vref %s not found, using default %s"%(
116 name,vref,refname,default))
118 # reset so arch is right
119 (pldistro,fcdistro,arch) = default.split("-")
120 # could check again, but as we have /etc/slicefamily
121 # there's probably no /vservers/.vref/default
124 # have not found slicefamily
125 logger.log("%s (%s): legacy node - using fallback vrefname 'default'"%(name,vref))
130 logger.log("%s (%s) : unexpected error follows - using 'default'"%(name,vref))
131 logger.log(traceback.format_exc())
135 def personality (arch):
136 personality="linux32"
137 if arch.find("64")>=0:
138 personality="linux64"
141 logger.log_call('/usr/sbin/vuseradd', '-t', refname, name)
142 # export slicename to the slice in /etc/slicename
143 file('/vservers/%s/etc/slicename' % name, 'w').write(name)
144 # set personality: only if needed (if arch's differ)
145 if tools.root_context_arch() != arch:
146 file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch))
147 logger.log('%s: set personality to %s'%(name,personality(arch)))
150 def destroy(name): logger.log_call('/usr/sbin/vuserdel', name)
152 def configure(self, rec):
153 # in case we update nodemanager..
154 self.install_and_enable_vinit()
156 new_rspec = rec['_rspec']
157 if new_rspec != self.rspec:
158 self.rspec = new_rspec
161 new_initscript = rec['initscript']
162 if new_initscript != self.initscript:
163 self.initscript = new_initscript
164 # not used anymore, we always check against the installed script
165 #self.initscriptchanged = True
166 self.refresh_slice_vinit()
168 accounts.Account.configure(self, rec) # install ssh keys
170 # unconditionnally install and enable the generic vinit script
171 # mimicking chkconfig for enabling the generic vinit script
172 # this is hardwired for runlevel 3
173 def install_and_enable_vinit (self):
174 vinit_source="/usr/share/NodeManager/sliver-initscripts/vinit"
175 vinit_script="/vservers/%s/etc/rc.d/init.d/vinit"%self.name
176 rc3_link="/vservers/%s/etc/rc.d/rc3.d/S99vinit"%self.name
177 rc3_target="../init.d/vinit"
179 body=file(vinit_source).read()
180 if tools.replace_file_with_string(vinit_script,body,chmod=0755):
181 logger.log("vsliver_vs: %s: installed generic vinit rc script"%self.name)
182 # create symlink for runlevel 3
183 if not os.path.islink(rc3_link):
185 logger.log("vsliver_vs: %s: creating runlevel3 symlink %s"%(self.name,rc3_link))
186 os.symlink(rc3_target,rc3_link)
188 logger.log_exc("vsliver_vs: %s: failed to create runlevel3 symlink %s"%rc3_link)
190 def rerun_slice_vinit(self):
191 command = "/usr/sbin/vserver %s exec /etc/rc.d/init.d/vinit restart" % (self.name)
192 logger.log("vsliver_vs: %s: Rerunning slice initscript: %s" % (self.name, command))
193 subprocess.call(command + "&", stdin=open('/dev/null', 'r'), stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, shell=True)
195 # this one checks for the existence of the slice initscript
196 # install or remove the slice inistscript, as instructed by the initscript tag
197 def refresh_slice_vinit(self):
199 sliver_initscript="/vservers/%s/etc/rc.d/init.d/vinit.slice"%self.name
200 if tools.replace_file_with_string(sliver_initscript,body,remove_if_empty=True,chmod=0755):
202 logger.log("vsliver_vs: %s: Installed new initscript in %s"%(self.name,sliver_initscript))
203 if self.is_running():
204 # Only need to rerun the initscript if the vserver is
205 # already running. If the vserver isn't running, then the
206 # initscript will automatically be started by
207 # /etc/rc.d/vinit when the vserver is started.
208 self.rerun_slice_vinit()
210 logger.log("vsliver_vs: %s: Removed obsolete initscript %s"%(self.name,sliver_initscript))
212 def start(self, delay=0):
213 if self.rspec['enabled'] <= 0:
214 logger.log('sliver_vs: not starting %s, is not enabled'%self.name)
216 logger.log('sliver_vs: %s: starting in %d seconds' % (self.name, delay))
218 # the generic /etc/init.d/vinit script is permanently refreshed, and enabled
219 self.install_and_enable_vinit()
220 # if a change has occured in the slice initscript, reflect this in /etc/init.d/vinit.slice
221 self.refresh_slice_vinit()
222 child_pid = os.fork()
224 # VServer.start calls fork() internally,
225 # so just close the nonstandard fds and fork once to avoid creating zombies
226 tools.close_nonstandard_fds()
227 vserver.VServer.start(self)
230 os.waitpid(child_pid, 0)
233 logger.log('%s: stopping' % self.name)
234 vserver.VServer.stop(self)
236 def is_running(self):
237 return vserver.VServer.is_running(self)
239 def set_resources(self):
240 disk_max = self.rspec['disk_max']
241 logger.log('%s: setting max disk usage to %d KiB' % (self.name, disk_max))
242 try: # if the sliver is over quota, .set_disk_limit will throw an exception
243 if not self.disk_usage_initialized:
244 self.vm_running = False
245 Sliver_VS._init_disk_info_sem.acquire()
246 logger.log('%s: computing disk usage: beginning' % self.name)
247 try: self.init_disk_info()
248 finally: Sliver_VS._init_disk_info_sem.release()
249 logger.log('%s: computing disk usage: ended' % self.name)
250 self.disk_usage_initialized = True
251 vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
253 logger.log('%s: failed to set max disk usage' % self.name)
254 logger.log_exc(self.name)
256 # get/set the min/soft/hard values for all of the vserver
257 # related RLIMITS. Note that vserver currently only
258 # implements support for hard limits.
259 for limit in vserver.RLIMITS.keys():
261 minimum = self.rspec['%s_min'%type]
262 soft = self.rspec['%s_soft'%type]
263 hard = self.rspec['%s_hard'%type]
264 update = self.set_rlimit(limit, hard, soft, minimum)
266 logger.log('%s: setting rlimit %s to (%d, %d, %d)'
267 % (self.name, type, hard, soft, minimum))
269 self.set_capabilities_config(self.rspec['capabilities'])
270 if self.rspec['capabilities']:
271 logger.log('%s: setting capabilities to %s' % (self.name, self.rspec['capabilities']))
273 cpu_pct = self.rspec['cpu_pct']
274 cpu_share = self.rspec['cpu_share']
277 for key in self.rspec.keys():
278 if key.find('sysctl.') == 0:
279 sysctl=key.split('.')
281 # /etc/vservers/<guest>/sysctl/<id>/
282 dirname = "/etc/vservers/%s/sysctl/%s" % (self.name, count)
284 os.makedirs(dirname, 0755)
287 setting = open("%s/setting" % dirname, "w")
288 setting.write("%s\n" % key.lstrip("sysctl."))
290 value = open("%s/value" % dirname, "w")
291 value.write("%s\n" % self.rspec[key])
295 logger.log("sliver_vs: %s: writing %s=%s"%(self.name,key,self.rspec[key]))
297 logger.log("sliver_vs: %s: could not set %s=%s"%(self.name,key,self.rspec[key]))
298 logger.log("sliver_vs: %s: error = %s"%(self.name,e))
301 if self.rspec['enabled'] > 0:
303 logger.log('%s: setting cpu reservation to %d%%' % (self.name, cpu_pct))
308 logger.log('%s: setting cpu share to %d' % (self.name, cpu_share))
312 self.set_sched_config(cpu_pct, cpu_share)
313 # if IP address isn't set (even to 0.0.0.0), sliver won't be able to use network
314 if self.rspec['ip_addresses'] != '0.0.0.0':
315 logger.log('%s: setting IP address(es) to %s' % \
316 (self.name, self.rspec['ip_addresses']))
317 self.set_ipaddresses_config(self.rspec['ip_addresses'])
320 vserver_config_path = '/etc/vservers/%s'%self.name
321 if not os.path.exists (vserver_config_path):
322 os.makedirs (vserver_config_path)
323 file('%s/slice_id'%vserver_config_path, 'w').write("%d"%self.slice_id)
324 logger.log("sliver_vs: Recorded slice id %d for slice %s"%(self.slice_id,self.name))
326 logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name,str(e)))
328 logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e),name=self.name)
330 if self.enabled == False:
334 if False: # Does not work properly yet.
335 if self.have_limits_changed():
336 logger.log('%s: limits have changed --- restarting' % self.name)
338 while self.is_running() and stopcount > 0:
342 stopcount = stopcount - 1
345 else: # tell vsh to disable remote login by setting CPULIMIT to 0
346 logger.log('%s: disabling remote login' % self.name)
347 self.set_sched_config(0, 0)