5 There are a couple of tricky things going on here. First, the kernel
6 needs disk usage information in order to enforce the quota. However,
7 determining disk usage redundantly strains the disks. Thus, the
8 Sliver_VS.disk_usage_initialized flag is used to determine whether
9 this initialization has been made.
11 Second, it's not currently possible to set the scheduler parameters
12 for a sliver unless that sliver has a running process. /bin/vsh helps
13 us out by reading the configuration file so that it can set the
14 appropriate limits after entering the sliver context. Making the
15 syscall that actually sets the parameters gives a harmless error if no
16 process is running. Thus we keep vm_running on when setting scheduler
17 parameters so that set_sched_params() always makes the syscall, and we
18 don't have to guess if there is a running process or not.
25 from threading import BoundedSemaphore
28 # the util-vserver-pl module
35 # special constant that tells vserver to keep its existing settings
36 KEEP_LIMIT = vserver.VC_LIM_KEEP
38 # populate the sliver/vserver specific default allocations table,
39 # which is used to look for slice attributes
40 DEFAULT_ALLOCATION = {}
41 for rlimit in vserver.RLIMITS.keys():
43 DEFAULT_ALLOCATION["%s_min"%rlim]=KEEP_LIMIT
44 DEFAULT_ALLOCATION["%s_soft"%rlim]=KEEP_LIMIT
45 DEFAULT_ALLOCATION["%s_hard"%rlim]=KEEP_LIMIT
47 class Sliver_VS(accounts.Account, vserver.VServer):
48 """This class wraps vserver.VServer to make its interface closer to what we need."""
51 TYPE = 'sliver.VServer'
52 _init_disk_info_sem = BoundedSemaphore()
54 def __init__(self, rec):
56 logger.verbose ('sliver_vs: %s init'%name)
58 logger.log("sliver_vs: %s: first chance..."%name)
59 vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
60 except Exception, err:
61 if not isinstance(err, vserver.NoSuchVServer):
62 # Probably a bad vserver or vserver configuration file
63 logger.log_exc("sliver_vs:__init__ (first chance) %s",name=name)
64 logger.log('sliver_vs: %s: recreating bad vserver' % name)
66 self.create(name, rec['vref'])
67 logger.log("sliver_vs: %s: second chance..."%name)
68 vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
72 self.slice_id = rec['slice_id']
73 self.disk_usage_initialized = False
79 def create(name, vref = None):
80 logger.verbose('sliver_vs: %s: create'%name)
82 logger.log("sliver_vs: %s: ERROR - no vref attached, this is unexpected"%(name))
84 # band-aid for short period as old API doesn't have GetSliceFamily function
86 vref = "planetlab-f8-i386"
88 # used to look in /etc/planetlab/family,
89 # now relies on the 'GetSliceFamily' extra attribute in GetSlivers()
90 # which for legacy is still exposed here as the 'vref' key
92 # check the template exists -- there's probably a better way..
93 if not os.path.isdir ("/vservers/.vref/%s"%vref):
94 logger.log ("sliver_vs: %s: ERROR Could not create sliver - vreference image %s not found"%(name,vref))
99 (x,y,arch)=vref.split('-')
100 # mh, this of course applies when 'vref' is e.g. 'netflow'
101 # and that's not quite right
105 def personality (arch):
106 personality="linux32"
107 if arch.find("64")>=0:
108 personality="linux64"
111 # logger.log_call(['/usr/sbin/vuseradd', '-t', vref, name, ], timeout=15*60)
112 logger.log_call(['/bin/bash','-x','/usr/sbin/vuseradd', '-t', vref, name, ], timeout=15*60)
113 # export slicename to the slice in /etc/slicename
114 file('/vservers/%s/etc/slicename' % name, 'w').write(name)
115 file('/vservers/%s/etc/slicefamily' % name, 'w').write(vref)
116 # set personality: only if needed (if arch's differ)
117 if tools.root_context_arch() != arch:
118 file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch)+"\n")
119 logger.log('sliver_vs: %s: set personality to %s'%(name,personality(arch)))
123 # logger.log_call(['/usr/sbin/vuserdel', name, ])
124 logger.log_call(['/bin/bash','-x','/usr/sbin/vuserdel', name, ])
126 def configure(self, rec):
127 new_rspec = rec['_rspec']
128 if new_rspec != self.rspec:
129 self.rspec = new_rspec
132 new_initscript = rec['initscript']
133 if new_initscript != self.initscript:
134 self.initscript = new_initscript
135 # not used anymore, we always check against the installed script
136 #self.initscriptchanged = True
137 self.refresh_slice_vinit()
139 accounts.Account.configure(self, rec) # install ssh keys
141 # unconditionnally install and enable the generic vinit script
142 # mimicking chkconfig for enabling the generic vinit script
143 # this is hardwired for runlevel 3
144 def install_and_enable_vinit (self):
145 vinit_source="/usr/share/NodeManager/sliver-initscripts/vinit"
146 vinit_script="/vservers/%s/etc/rc.d/init.d/vinit"%self.name
147 rc3_link="/vservers/%s/etc/rc.d/rc3.d/S99vinit"%self.name
148 rc3_target="../init.d/vinit"
150 body=file(vinit_source).read()
151 if tools.replace_file_with_string(vinit_script,body,chmod=0755):
152 logger.log("vsliver_vs: %s: installed generic vinit rc script"%self.name)
153 # create symlink for runlevel 3
154 if not os.path.islink(rc3_link):
156 logger.log("vsliver_vs: %s: creating runlevel3 symlink %s"%(self.name,rc3_link))
157 os.symlink(rc3_target,rc3_link)
159 logger.log_exc("vsliver_vs: %s: failed to create runlevel3 symlink %s"%rc3_link)
161 # this one checks for the existence of the slice initscript
162 # install or remove the slice inistscript, as instructed by the initscript tag
163 def refresh_slice_vinit(self):
165 sliver_initscript="/vservers/%s/etc/rc.d/init.d/vinit.slice"%self.name
166 if tools.replace_file_with_string(sliver_initscript,body,remove_if_empty=True,chmod=0755):
168 logger.log("vsliver_vs: %s: Installed new initscript in %s"%(self.name,sliver_initscript))
170 logger.log("vsliver_vs: %s: Removed obsolete initscript %s"%(self.name,sliver_initscript))
172 # bind mount root side dir to sliver side
173 # needs to be done before sliver starts
174 def expose_ssh_dir (self):
176 root_ssh="/home/%s/.ssh"%self.name
177 sliver_ssh="/vservers/%s/home/%s/.ssh"%(self.name,self.name)
178 # any of both might not exist yet
179 for path in [root_ssh,sliver_ssh]:
180 if not os.path.exists (path):
182 if not os.path.isdir (path):
184 mounts=file('/proc/mounts').read()
185 if mounts.find(sliver_ssh)<0:
187 subprocess.call("mount --bind -o ro %s %s"%(root_ssh,sliver_ssh),shell=True)
188 logger.log("expose_ssh_dir: %s mounted into slice %s"%(root_ssh,self.name))
190 logger.log_exc("expose_ssh_dir with slice %s failed"%self.name)
192 def start(self, delay=0):
193 if self.rspec['enabled'] <= 0:
194 logger.log('sliver_vs: not starting %s, is not enabled'%self.name)
196 logger.log('sliver_vs: %s: starting in %d seconds' % (self.name, delay))
198 # the generic /etc/init.d/vinit script is permanently refreshed, and enabled
199 self.install_and_enable_vinit()
200 # expose .ssh for omf_friendly slivers
201 if 'omf_control' in self.rspec['tags']:
202 self.expose_ssh_dir()
203 # if a change has occured in the slice initscript, reflect this in /etc/init.d/vinit.slice
204 self.refresh_slice_vinit()
205 child_pid = os.fork()
207 # VServer.start calls fork() internally,
208 # so just close the nonstandard fds and fork once to avoid creating zombies
209 tools.close_nonstandard_fds()
210 vserver.VServer.start(self)
213 os.waitpid(child_pid, 0)
216 logger.log('sliver_vs: %s: stopping' % self.name)
217 vserver.VServer.stop(self)
219 def is_running(self):
220 return vserver.VServer.is_running(self)
222 def set_resources(self):
223 disk_max = self.rspec['disk_max']
224 logger.log('sliver_vs: %s: setting max disk usage to %d KiB' % (self.name, disk_max))
225 try: # if the sliver is over quota, .set_disk_limit will throw an exception
226 if not self.disk_usage_initialized:
227 self.vm_running = False
228 Sliver_VS._init_disk_info_sem.acquire()
229 logger.log('sliver_vs: %s: computing disk usage: beginning' % self.name)
230 # init_disk_info is inherited from VServer
231 try: self.init_disk_info()
232 finally: Sliver_VS._init_disk_info_sem.release()
233 logger.log('sliver_vs: %s: computing disk usage: ended' % self.name)
234 self.disk_usage_initialized = True
235 vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
237 logger.log_exc('sliver_vs: failed to set max disk usage',name=self.name)
239 # get/set the min/soft/hard values for all of the vserver
240 # related RLIMITS. Note that vserver currently only
241 # implements support for hard limits.
242 for limit in vserver.RLIMITS.keys():
244 minimum = self.rspec['%s_min'%type]
245 soft = self.rspec['%s_soft'%type]
246 hard = self.rspec['%s_hard'%type]
247 update = self.set_rlimit(limit, hard, soft, minimum)
249 logger.log('sliver_vs: %s: setting rlimit %s to (%d, %d, %d)'
250 % (self.name, type, hard, soft, minimum))
252 self.set_capabilities_config(self.rspec['capabilities'])
253 if self.rspec['capabilities']:
254 logger.log('sliver_vs: %s: setting capabilities to %s' % (self.name, self.rspec['capabilities']))
256 cpu_pct = self.rspec['cpu_pct']
257 cpu_share = self.rspec['cpu_share']
260 for key in self.rspec.keys():
261 if key.find('sysctl.') == 0:
262 sysctl=key.split('.')
264 # /etc/vservers/<guest>/sysctl/<id>/
265 dirname = "/etc/vservers/%s/sysctl/%s" % (self.name, count)
267 os.makedirs(dirname, 0755)
270 setting = open("%s/setting" % dirname, "w")
271 setting.write("%s\n" % key.lstrip("sysctl."))
273 value = open("%s/value" % dirname, "w")
274 value.write("%s\n" % self.rspec[key])
278 logger.log("sliver_vs: %s: writing %s=%s"%(self.name,key,self.rspec[key]))
280 logger.log("sliver_vs: %s: could not set %s=%s"%(self.name,key,self.rspec[key]))
281 logger.log("sliver_vs: %s: error = %s"%(self.name,e))
284 if self.rspec['enabled'] > 0:
286 logger.log('sliver_vs: %s: setting cpu reservation to %d%%' % (self.name, cpu_pct))
291 logger.log('sliver_vs: %s: setting cpu share to %d' % (self.name, cpu_share))
295 self.set_sched_config(cpu_pct, cpu_share)
296 # if IP address isn't set (even to 0.0.0.0), sliver won't be able to use network
297 if self.rspec['ip_addresses'] != '0.0.0.0':
298 logger.log('sliver_vs: %s: setting IP address(es) to %s' % \
299 (self.name, self.rspec['ip_addresses']))
300 self.set_ipaddresses_config(self.rspec['ip_addresses'])
302 #logger.log("sliver_vs: %s: Setting name to %s" % (self.name, self.slice_id))
303 #self.setname(self.slice_id)
304 #logger.log("sliver_vs: %s: Storing slice id of %s for PlanetFlow" % (self.name, self.slice_id))
306 vserver_config_path = '/etc/vservers/%s'%self.name
307 if not os.path.exists (vserver_config_path):
308 os.makedirs (vserver_config_path)
309 file('%s/slice_id'%vserver_config_path, 'w').write("%d\n"%self.slice_id)
310 logger.log("sliver_vs: Recorded slice id %d for slice %s"%(self.slice_id,self.name))
312 logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name,str(e)))
314 logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e),name=self.name)
317 if self.enabled == False:
321 if False: # Does not work properly yet.
322 if self.have_limits_changed():
323 logger.log('sliver_vs: %s: limits have changed --- restarting' % self.name)
325 while self.is_running() and stopcount > 0:
329 stopcount = stopcount - 1
332 else: # tell vsh to disable remote login by setting CPULIMIT to 0
333 logger.log('sliver_vs: %s: disabling remote login' % self.name)
334 self.set_sched_config(0, 0)