5 There are a couple of tricky things going on here. First, the kernel
6 needs disk usage information in order to enforce the quota. However,
7 determining disk usage redundantly strains the disks. Thus, the
8 Sliver_VS.disk_usage_initialized flag is used to determine whether
9 this initialization has been made.
11 Second, it's not currently possible to set the scheduler parameters
12 for a sliver unless that sliver has a running process. /bin/vsh helps
13 us out by reading the configuration file so that it can set the
14 appropriate limits after entering the sliver context. Making the
15 syscall that actually sets the parameters gives a harmless error if no
16 process is running. Thus we keep vm_running on when setting scheduler
17 parameters so that set_sched_params() always makes the syscall, and we
18 don't have to guess if there is a running process or not.
26 from threading import BoundedSemaphore
29 # the util-vserver-pl module
36 # special constant that tells vserver to keep its existing settings
37 KEEP_LIMIT = vserver.VC_LIM_KEEP
39 # populate the sliver/vserver specific default allocations table,
40 # which is used to look for slice attributes
41 DEFAULT_ALLOCATION = {}
42 for rlimit in vserver.RLIMITS.keys():
44 DEFAULT_ALLOCATION["%s_min"%rlim]=KEEP_LIMIT
45 DEFAULT_ALLOCATION["%s_soft"%rlim]=KEEP_LIMIT
46 DEFAULT_ALLOCATION["%s_hard"%rlim]=KEEP_LIMIT
48 class Sliver_VS(accounts.Account, vserver.VServer):
49 """This class wraps vserver.VServer to make its interface closer to what we need."""
52 TYPE = 'sliver.VServer'
53 _init_disk_info_sem = BoundedSemaphore()
55 def __init__(self, rec):
57 logger.verbose ('sliver_vs: %s init'%name)
59 logger.log("sliver_vs: %s: first chance..."%name)
60 vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
61 except Exception, err:
62 if not isinstance(err, vserver.NoSuchVServer):
63 # Probably a bad vserver or vserver configuration file
64 logger.log_exc("sliver_vs:__init__ (first chance) %s",name=name)
65 logger.log('sliver_vs: %s: recreating bad vserver' % name)
67 self.create(name, rec['vref'])
68 logger.log("sliver_vs: %s: second chance..."%name)
69 vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
73 self.slice_id = rec['slice_id']
74 self.disk_usage_initialized = False
80 def create(name, vref = None):
81 logger.verbose('sliver_vs: %s: create'%name)
83 logger.log("sliver_vs: %s: ERROR - no vref attached, this is unexpected"%(name))
85 # band-aid for short period as old API doesn't have GetSliceFamily function
87 vref = "planetlab-f8-i386"
89 # used to look in /etc/planetlab/family,
90 # now relies on the 'GetSliceFamily' extra attribute in GetSlivers()
91 # which for legacy is still exposed here as the 'vref' key
93 # check the template exists -- there's probably a better way..
94 if not os.path.isdir ("/vservers/.vref/%s"%vref):
95 logger.log ("sliver_vs: %s: ERROR Could not create sliver - vreference image %s not found"%(name,vref))
100 (x,y,arch)=vref.split('-')
101 # mh, this of course applies when 'vref' is e.g. 'netflow'
102 # and that's not quite right
106 def personality (arch):
107 personality="linux32"
108 if arch.find("64")>=0:
109 personality="linux64"
112 # logger.log_call(['/usr/sbin/vuseradd', '-t', vref, name, ], timeout=15*60)
113 logger.log_call(['/bin/bash','-x','/usr/sbin/vuseradd', '-t', vref, name, ], timeout=15*60)
114 # export slicename to the slice in /etc/slicename
115 file('/vservers/%s/etc/slicename' % name, 'w').write(name)
116 file('/vservers/%s/etc/slicefamily' % name, 'w').write(vref)
117 # set personality: only if needed (if arch's differ)
118 if tools.root_context_arch() != arch:
119 file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch)+"\n")
120 logger.log('sliver_vs: %s: set personality to %s'%(name,personality(arch)))
124 # logger.log_call(['/usr/sbin/vuserdel', name, ])
125 logger.log_call(['/bin/bash','-x','/usr/sbin/vuserdel', name, ])
127 def configure(self, rec):
128 # in case we update nodemanager..
129 self.install_and_enable_vinit()
131 new_rspec = rec['_rspec']
132 if new_rspec != self.rspec:
133 self.rspec = new_rspec
136 new_initscript = rec['initscript']
137 if new_initscript != self.initscript:
138 self.initscript = new_initscript
139 # not used anymore, we always check against the installed script
140 #self.initscriptchanged = True
141 self.refresh_slice_vinit()
143 accounts.Account.configure(self, rec) # install ssh keys
145 # unconditionnally install and enable the generic vinit script
146 # mimicking chkconfig for enabling the generic vinit script
147 # this is hardwired for runlevel 3
148 def install_and_enable_vinit (self):
149 vinit_source="/usr/share/NodeManager/sliver-initscripts/vinit"
150 vinit_script="/vservers/%s/etc/rc.d/init.d/vinit"%self.name
151 rc3_link="/vservers/%s/etc/rc.d/rc3.d/S99vinit"%self.name
152 rc3_target="../init.d/vinit"
154 code=file(vinit_source).read()
155 if tools.replace_file_with_string(vinit_script,code,chmod=0755):
156 logger.log("vsliver_vs: %s: installed generic vinit rc script"%self.name)
157 # create symlink for runlevel 3
158 if not os.path.islink(rc3_link):
160 logger.log("vsliver_vs: %s: creating runlevel3 symlink %s"%(self.name,rc3_link))
161 os.symlink(rc3_target,rc3_link)
163 logger.log_exc("vsliver_vs: %s: failed to create runlevel3 symlink %s"%rc3_link)
165 def rerun_slice_vinit(self):
166 command = "/usr/sbin/vserver %s exec /etc/rc.d/init.d/vinit restart" % (self.name)
167 logger.log("vsliver_vs: %s: Rerunning slice initscript: %s" % (self.name, command))
168 subprocess.call(command + "&", stdin=open('/dev/null', 'r'), stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, shell=True)
170 # this one checks for the existence of the slice initscript
171 # install or remove the slice inistscript, as instructed by the initscript tag
172 def refresh_slice_vinit(self):
174 sliver_initscript="/vservers/%s/etc/rc.d/init.d/vinit.slice"%self.name
175 if tools.replace_file_with_string(sliver_initscript,code,remove_if_empty=True,chmod=0755):
177 logger.log("vsliver_vs: %s: Installed new initscript in %s"%(self.name,sliver_initscript))
178 if self.is_running():
179 # Only need to rerun the initscript if the vserver is
180 # already running. If the vserver isn't running, then the
181 # initscript will automatically be started by
182 # /etc/rc.d/vinit when the vserver is started.
183 self.rerun_slice_vinit()
185 logger.log("vsliver_vs: %s: Removed obsolete initscript %s"%(self.name,sliver_initscript))
187 # bind mount root side dir to sliver side
188 # needs to be done before sliver starts
189 def expose_ssh_dir (self):
191 root_ssh="/home/%s/.ssh"%self.name
192 sliver_ssh="/vservers/%s/home/%s/.ssh"%(self.name,self.name)
193 # any of both might not exist yet
194 for path in [root_ssh,sliver_ssh]:
195 if not os.path.exists (path):
197 if not os.path.isdir (path):
199 mounts=file('/proc/mounts').read()
200 if mounts.find(sliver_ssh)<0:
202 subprocess.call("mount --bind -o ro %s %s"%(root_ssh,sliver_ssh),shell=True)
203 logger.log("expose_ssh_dir: %s mounted into slice %s"%(root_ssh,self.name))
205 logger.log_exc("expose_ssh_dir with slice %s failed"%self.name)
207 def start(self, delay=0):
208 if self.rspec['enabled'] <= 0:
209 logger.log('sliver_vs: not starting %s, is not enabled'%self.name)
211 logger.log('sliver_vs: %s: starting in %d seconds' % (self.name, delay))
213 # the generic /etc/init.d/vinit script is permanently refreshed, and enabled
214 self.install_and_enable_vinit()
215 # expose .ssh for omf_friendly slivers
216 if 'omf_control' in self.rspec['tags']:
217 self.expose_ssh_dir()
218 # if a change has occured in the slice initscript, reflect this in /etc/init.d/vinit.slice
219 self.refresh_slice_vinit()
220 child_pid = os.fork()
222 # VServer.start calls fork() internally,
223 # so just close the nonstandard fds and fork once to avoid creating zombies
224 tools.close_nonstandard_fds()
225 vserver.VServer.start(self)
228 os.waitpid(child_pid, 0)
231 logger.log('sliver_vs: %s: stopping' % self.name)
232 vserver.VServer.stop(self)
234 def is_running(self):
235 return vserver.VServer.is_running(self)
237 def set_resources(self):
238 disk_max = self.rspec['disk_max']
239 logger.log('sliver_vs: %s: setting max disk usage to %d KiB' % (self.name, disk_max))
240 try: # if the sliver is over quota, .set_disk_limit will throw an exception
241 if not self.disk_usage_initialized:
242 self.vm_running = False
243 Sliver_VS._init_disk_info_sem.acquire()
244 logger.log('sliver_vs: %s: computing disk usage: beginning' % self.name)
245 # init_disk_info is inherited from VServer
246 try: self.init_disk_info()
247 finally: Sliver_VS._init_disk_info_sem.release()
248 logger.log('sliver_vs: %s: computing disk usage: ended' % self.name)
249 self.disk_usage_initialized = True
250 vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
252 logger.log_exc('sliver_vs: failed to set max disk usage',name=self.name)
254 # get/set the min/soft/hard values for all of the vserver
255 # related RLIMITS. Note that vserver currently only
256 # implements support for hard limits.
257 for limit in vserver.RLIMITS.keys():
259 minimum = self.rspec['%s_min'%type]
260 soft = self.rspec['%s_soft'%type]
261 hard = self.rspec['%s_hard'%type]
262 update = self.set_rlimit(limit, hard, soft, minimum)
264 logger.log('sliver_vs: %s: setting rlimit %s to (%d, %d, %d)'
265 % (self.name, type, hard, soft, minimum))
267 self.set_capabilities_config(self.rspec['capabilities'])
268 if self.rspec['capabilities']:
269 logger.log('sliver_vs: %s: setting capabilities to %s' % (self.name, self.rspec['capabilities']))
271 cpu_pct = self.rspec['cpu_pct']
272 cpu_share = self.rspec['cpu_share']
275 for key in self.rspec.keys():
276 if key.find('sysctl.') == 0:
277 sysctl=key.split('.')
279 # /etc/vservers/<guest>/sysctl/<id>/
280 dirname = "/etc/vservers/%s/sysctl/%s" % (self.name, count)
282 os.makedirs(dirname, 0755)
285 setting = open("%s/setting" % dirname, "w")
286 setting.write("%s\n" % key.lstrip("sysctl."))
288 value = open("%s/value" % dirname, "w")
289 value.write("%s\n" % self.rspec[key])
293 logger.log("sliver_vs: %s: writing %s=%s"%(self.name,key,self.rspec[key]))
295 logger.log("sliver_vs: %s: could not set %s=%s"%(self.name,key,self.rspec[key]))
296 logger.log("sliver_vs: %s: error = %s"%(self.name,e))
299 if self.rspec['enabled'] > 0:
301 logger.log('sliver_vs: %s: setting cpu reservation to %d%%' % (self.name, cpu_pct))
306 logger.log('sliver_vs: %s: setting cpu share to %d' % (self.name, cpu_share))
310 self.set_sched_config(cpu_pct, cpu_share)
311 # if IP address isn't set (even to 0.0.0.0), sliver won't be able to use network
312 if self.rspec['ip_addresses'] != '0.0.0.0':
313 logger.log('sliver_vs: %s: setting IP address(es) to %s' % \
314 (self.name, self.rspec['ip_addresses']))
315 self.set_ipaddresses_config(self.rspec['ip_addresses'])
317 #logger.log("sliver_vs: %s: Setting name to %s" % (self.name, self.slice_id))
318 #self.setname(self.slice_id)
319 #logger.log("sliver_vs: %s: Storing slice id of %s for PlanetFlow" % (self.name, self.slice_id))
321 vserver_config_path = '/etc/vservers/%s'%self.name
322 if not os.path.exists (vserver_config_path):
323 os.makedirs (vserver_config_path)
324 file('%s/slice_id'%vserver_config_path, 'w').write("%d\n"%self.slice_id)
325 logger.log("sliver_vs: Recorded slice id %d for slice %s"%(self.slice_id,self.name))
327 logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name,str(e)))
329 logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e),name=self.name)
332 if self.enabled == False:
336 if False: # Does not work properly yet.
337 if self.have_limits_changed():
338 logger.log('sliver_vs: %s: limits have changed --- restarting' % self.name)
340 while self.is_running() and stopcount > 0:
344 stopcount = stopcount - 1
347 else: # tell vsh to disable remote login by setting CPULIMIT to 0
348 logger.log('sliver_vs: %s: disabling remote login' % self.name)
349 self.set_sched_config(0, 0)