5 There are a couple of tricky things going on here. First, the kernel
6 needs disk usage information in order to enforce the quota. However,
7 determining disk usage redundantly strains the disks. Thus, the
8 Sliver_VS.disk_usage_initialized flag is used to determine whether
9 this initialization has been made.
11 Second, it's not currently possible to set the scheduler parameters
12 for a sliver unless that sliver has a running process. /bin/vsh helps
13 us out by reading the configuration file so that it can set the
14 appropriate limits after entering the sliver context. Making the
15 syscall that actually sets the parameters gives a harmless error if no
16 process is running. Thus we keep vm_running on when setting scheduler
17 parameters so that set_sched_params() always makes the syscall, and we
18 don't have to guess if there is a running process or not.
26 from threading import BoundedSemaphore
29 # the util-vserver-pl module
36 # special constant that tells vserver to keep its existing settings
37 KEEP_LIMIT = vserver.VC_LIM_KEEP
39 # populate the sliver/vserver specific default allocations table,
40 # which is used to look for slice attributes
41 DEFAULT_ALLOCATION = {}
42 for rlimit in vserver.RLIMITS.keys():
44 DEFAULT_ALLOCATION["%s_min"%rlim]=KEEP_LIMIT
45 DEFAULT_ALLOCATION["%s_soft"%rlim]=KEEP_LIMIT
46 DEFAULT_ALLOCATION["%s_hard"%rlim]=KEEP_LIMIT
48 class Sliver_VS(accounts.Account, vserver.VServer):
49 """This class wraps vserver.VServer to make its interface closer to what we need."""
52 TYPE = 'sliver.VServer'
53 _init_disk_info_sem = BoundedSemaphore()
55 def __init__(self, rec):
57 logger.verbose ('sliver_vs: %s init'%name)
59 logger.log("sliver_vs: %s: first chance..."%name)
60 vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
61 except Exception, err:
62 if not isinstance(err, vserver.NoSuchVServer):
63 # Probably a bad vserver or vserver configuration file
64 logger.log_exc("sliver_vs:__init__ (first chance) %s",name=name)
65 logger.log('sliver_vs: %s: recreating bad vserver' % name)
67 self.create(name, rec['vref'])
68 logger.log("sliver_vs: %s: second chance..."%name)
69 vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
73 self.slice_id = rec['slice_id']
74 self.disk_usage_initialized = False
80 def create(name, vref = None):
81 logger.verbose('sliver_vs: %s: create'%name)
83 logger.log("sliver_vs: %s: ERROR - no vref attached, this is unexpected"%(name))
85 # band-aid for short period as old API doesn't have GetSliceFamily function
87 vref = "planetlab-f8-i386"
89 # used to look in /etc/planetlab/family,
90 # now relies on the 'GetSliceFamily' extra attribute in GetSlivers()
91 # which for legacy is still exposed here as the 'vref' key
93 # check the template exists -- there's probably a better way..
94 if not os.path.isdir ("/vservers/.vref/%s"%vref):
95 logger.log ("sliver_vs: %s: ERROR Could not create sliver - vreference image %s not found"%(name,vref))
100 (x,y,arch)=vref.split('-')
101 # mh, this of course applies when 'vref' is e.g. 'netflow'
102 # and that's not quite right
106 def personality (arch):
107 personality="linux32"
108 if arch.find("64")>=0:
109 personality="linux64"
112 # logger.log_call(['/usr/sbin/vuseradd', '-t', vref, name, ], timeout=15*60)
113 logger.log_call(['/bin/bash','-x','/usr/sbin/vuseradd', '-t', vref, name, ], timeout=15*60)
114 # export slicename to the slice in /etc/slicename
115 file('/vservers/%s/etc/slicename' % name, 'w').write(name)
116 file('/vservers/%s/etc/slicefamily' % name, 'w').write(vref)
117 # set personality: only if needed (if arch's differ)
118 if tools.root_context_arch() != arch:
119 file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch)+"\n")
120 logger.log('sliver_vs: %s: set personality to %s'%(name,personality(arch)))
124 # logger.log_call(['/usr/sbin/vuserdel', name, ])
125 logger.log_call(['/bin/bash','-x','/usr/sbin/vuserdel', name, ])
127 def configure(self, rec):
128 new_rspec = rec['_rspec']
129 if new_rspec != self.rspec:
130 self.rspec = new_rspec
133 new_initscript = rec['initscript']
134 if new_initscript != self.initscript:
135 self.initscript = new_initscript
136 # not used anymore, we always check against the installed script
137 #self.initscriptchanged = True
138 self.refresh_slice_vinit()
140 accounts.Account.configure(self, rec) # install ssh keys
142 # unconditionnally install and enable the generic vinit script
143 # mimicking chkconfig for enabling the generic vinit script
144 # this is hardwired for runlevel 3
145 def install_and_enable_vinit (self):
146 vinit_source="/usr/share/NodeManager/sliver-initscripts/vinit"
147 vinit_script="/vservers/%s/etc/rc.d/init.d/vinit"%self.name
148 rc3_link="/vservers/%s/etc/rc.d/rc3.d/S99vinit"%self.name
149 rc3_target="../init.d/vinit"
151 body=file(vinit_source).read()
152 if tools.replace_file_with_string(vinit_script,body,chmod=0755):
153 logger.log("vsliver_vs: %s: installed generic vinit rc script"%self.name)
154 # create symlink for runlevel 3
155 if not os.path.islink(rc3_link):
157 logger.log("vsliver_vs: %s: creating runlevel3 symlink %s"%(self.name,rc3_link))
158 os.symlink(rc3_target,rc3_link)
160 logger.log_exc("vsliver_vs: %s: failed to create runlevel3 symlink %s"%rc3_link)
162 def rerun_slice_vinit(self):
163 command = "/usr/sbin/vserver %s exec /etc/rc.d/init.d/vinit.slice restart %s" % (self.name, self.name)
165 logger.log("vsliver_vs: %s: Rerunning slice initscript: %s" % (self.name, command))
166 subprocess.call(command + "&", stdin=open('/dev/null', 'r'), stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, shell=True)
168 # this one checks for the existence of the slice initscript
169 # install or remove the slice inistscript, as instructed by the initscript tag
170 def refresh_slice_vinit(self):
172 sliver_initscript="/vservers/%s/etc/rc.d/init.d/vinit.slice"%self.name
173 if tools.replace_file_with_string(sliver_initscript,body,remove_if_empty=True,chmod=0755):
175 logger.log("vsliver_vs: %s: Installed new initscript in %s"%(self.name,sliver_initscript))
176 if self.is_running():
177 # Only need to rerun the initscript if the vserver is
178 # already running. If the vserver isn't running, then the
179 # initscript will automatically be started by
180 # /etc/rc.d/vinit when the vserver is started.
181 self.rerun_slice_vinit()
183 logger.log("vsliver_vs: %s: Removed obsolete initscript %s"%(self.name,sliver_initscript))
185 # bind mount root side dir to sliver side
186 # needs to be done before sliver starts
187 def expose_ssh_dir (self):
189 root_ssh="/home/%s/.ssh"%self.name
190 sliver_ssh="/vservers/%s/home/%s/.ssh"%(self.name,self.name)
191 # any of both might not exist yet
192 for path in [root_ssh,sliver_ssh]:
193 if not os.path.exists (path):
195 if not os.path.isdir (path):
197 mounts=file('/proc/mounts').read()
198 if mounts.find(sliver_ssh)<0:
200 subprocess.call("mount --bind -o ro %s %s"%(root_ssh,sliver_ssh),shell=True)
201 logger.log("expose_ssh_dir: %s mounted into slice %s"%(root_ssh,self.name))
203 logger.log_exc("expose_ssh_dir with slice %s failed"%self.name)
205 def start(self, delay=0):
206 if self.rspec['enabled'] <= 0:
207 logger.log('sliver_vs: not starting %s, is not enabled'%self.name)
209 logger.log('sliver_vs: %s: starting in %d seconds' % (self.name, delay))
211 # the generic /etc/init.d/vinit script is permanently refreshed, and enabled
212 self.install_and_enable_vinit()
213 # expose .ssh for omf_friendly slivers
214 if 'omf_control' in self.rspec['tags']:
215 self.expose_ssh_dir()
216 # if a change has occured in the slice initscript, reflect this in /etc/init.d/vinit.slice
217 self.refresh_slice_vinit()
218 child_pid = os.fork()
220 # VServer.start calls fork() internally,
221 # so just close the nonstandard fds and fork once to avoid creating zombies
222 tools.close_nonstandard_fds()
223 vserver.VServer.start(self)
226 os.waitpid(child_pid, 0)
229 logger.log('sliver_vs: %s: stopping' % self.name)
230 vserver.VServer.stop(self)
232 def is_running(self):
233 return vserver.VServer.is_running(self)
235 def set_resources(self):
236 disk_max = self.rspec['disk_max']
237 logger.log('sliver_vs: %s: setting max disk usage to %d KiB' % (self.name, disk_max))
238 try: # if the sliver is over quota, .set_disk_limit will throw an exception
239 if not self.disk_usage_initialized:
240 self.vm_running = False
241 Sliver_VS._init_disk_info_sem.acquire()
242 logger.log('sliver_vs: %s: computing disk usage: beginning' % self.name)
243 # init_disk_info is inherited from VServer
244 try: self.init_disk_info()
245 finally: Sliver_VS._init_disk_info_sem.release()
246 logger.log('sliver_vs: %s: computing disk usage: ended' % self.name)
247 self.disk_usage_initialized = True
248 vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
250 logger.log_exc('sliver_vs: failed to set max disk usage',name=self.name)
252 # get/set the min/soft/hard values for all of the vserver
253 # related RLIMITS. Note that vserver currently only
254 # implements support for hard limits.
255 for limit in vserver.RLIMITS.keys():
257 minimum = self.rspec['%s_min'%type]
258 soft = self.rspec['%s_soft'%type]
259 hard = self.rspec['%s_hard'%type]
260 update = self.set_rlimit(limit, hard, soft, minimum)
262 logger.log('sliver_vs: %s: setting rlimit %s to (%d, %d, %d)'
263 % (self.name, type, hard, soft, minimum))
265 self.set_capabilities_config(self.rspec['capabilities'])
266 if self.rspec['capabilities']:
267 logger.log('sliver_vs: %s: setting capabilities to %s' % (self.name, self.rspec['capabilities']))
269 cpu_pct = self.rspec['cpu_pct']
270 cpu_share = self.rspec['cpu_share']
273 for key in self.rspec.keys():
274 if key.find('sysctl.') == 0:
275 sysctl=key.split('.')
277 # /etc/vservers/<guest>/sysctl/<id>/
278 dirname = "/etc/vservers/%s/sysctl/%s" % (self.name, count)
280 os.makedirs(dirname, 0755)
283 setting = open("%s/setting" % dirname, "w")
284 setting.write("%s\n" % key.lstrip("sysctl."))
286 value = open("%s/value" % dirname, "w")
287 value.write("%s\n" % self.rspec[key])
291 logger.log("sliver_vs: %s: writing %s=%s"%(self.name,key,self.rspec[key]))
293 logger.log("sliver_vs: %s: could not set %s=%s"%(self.name,key,self.rspec[key]))
294 logger.log("sliver_vs: %s: error = %s"%(self.name,e))
297 if self.rspec['enabled'] > 0:
299 logger.log('sliver_vs: %s: setting cpu reservation to %d%%' % (self.name, cpu_pct))
304 logger.log('sliver_vs: %s: setting cpu share to %d' % (self.name, cpu_share))
308 self.set_sched_config(cpu_pct, cpu_share)
309 # if IP address isn't set (even to 0.0.0.0), sliver won't be able to use network
310 if self.rspec['ip_addresses'] != '0.0.0.0':
311 logger.log('sliver_vs: %s: setting IP address(es) to %s' % \
312 (self.name, self.rspec['ip_addresses']))
313 self.set_ipaddresses_config(self.rspec['ip_addresses'])
315 #logger.log("sliver_vs: %s: Setting name to %s" % (self.name, self.slice_id))
316 #self.setname(self.slice_id)
317 #logger.log("sliver_vs: %s: Storing slice id of %s for PlanetFlow" % (self.name, self.slice_id))
319 vserver_config_path = '/etc/vservers/%s'%self.name
320 if not os.path.exists (vserver_config_path):
321 os.makedirs (vserver_config_path)
322 file('%s/slice_id'%vserver_config_path, 'w').write("%d\n"%self.slice_id)
323 logger.log("sliver_vs: Recorded slice id %d for slice %s"%(self.slice_id,self.name))
325 logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name,str(e)))
327 logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e),name=self.name)
330 if self.enabled == False:
334 if False: # Does not work properly yet.
335 if self.have_limits_changed():
336 logger.log('sliver_vs: %s: limits have changed --- restarting' % self.name)
338 while self.is_running() and stopcount > 0:
342 stopcount = stopcount - 1
345 else: # tell vsh to disable remote login by setting CPULIMIT to 0
346 logger.log('sliver_vs: %s: disabling remote login' % self.name)
347 self.set_sched_config(0, 0)