5 There are a couple of tricky things going on here. First, the kernel
6 needs disk usage information in order to enforce the quota. However,
7 determining disk usage redundantly strains the disks. Thus, the
8 Sliver_VS.disk_usage_initialized flag is used to determine whether
9 this initialization has been made.
11 Second, it's not currently possible to set the scheduler parameters
12 for a sliver unless that sliver has a running process. /bin/vsh helps
13 us out by reading the configuration file so that it can set the
14 appropriate limits after entering the sliver context. Making the
15 syscall that actually sets the parameters gives a harmless error if no
16 process is running. Thus we keep vm_running on when setting scheduler
17 parameters so that set_sched_params() always makes the syscall, and we
18 don't have to guess if there is a running process or not.
26 from threading import BoundedSemaphore
29 # the util-vserver-pl module
34 from account import Account
35 from initscript import Initscript
37 # special constant that tells vserver to keep its existing settings
38 KEEP_LIMIT = vserver.VC_LIM_KEEP
40 # populate the sliver/vserver specific default allocations table,
41 # which is used to look for slice attributes
42 DEFAULT_ALLOCATION = {}
43 for rlimit in vserver.RLIMITS.keys():
45 DEFAULT_ALLOCATION["%s_min"%rlim]=KEEP_LIMIT
46 DEFAULT_ALLOCATION["%s_soft"%rlim]=KEEP_LIMIT
47 DEFAULT_ALLOCATION["%s_hard"%rlim]=KEEP_LIMIT
49 class Sliver_VS(vserver.VServer, Account, Initscript):
50 """This class wraps vserver.VServer to make its interface closer to what we need."""
53 TYPE = 'sliver.VServer'
54 _init_disk_info_sem = BoundedSemaphore()
56 def __init__(self, rec):
58 logger.verbose ('sliver_vs: %s init'%name)
60 logger.log("sliver_vs: %s: first chance..."%name)
61 vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
62 Account.__init__ (self, name)
63 Initscript.__init__ (self, name)
64 except Exception, err:
65 if not isinstance(err, vserver.NoSuchVServer):
66 # Probably a bad vserver or vserver configuration file
67 logger.log_exc("sliver_vs:__init__ (first chance) %s",name=name)
68 logger.log('sliver_vs: %s: recreating bad vserver' % name)
70 self.create(name, rec)
71 vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
72 Account.__init__ (self, name)
73 Initscript.__init__ (self, name)
76 self.slice_id = rec['slice_id']
77 self.disk_usage_initialized = False
79 # xxx this almost certainly is wrong...
83 def create(name, rec = None):
84 logger.verbose('sliver_vs: %s: create'%name)
87 logger.log("sliver_vs: %s: ERROR - no vref attached, this is unexpected"%(name))
89 # band-aid for short period as old API doesn't have GetSliceFamily function
91 vref = "planetlab-f8-i386"
93 # used to look in /etc/planetlab/family,
94 # now relies on the 'GetSliceFamily' extra attribute in GetSlivers()
95 # which for legacy is still exposed here as the 'vref' key
97 # check the template exists -- there's probably a better way..
98 if not os.path.isdir ("/vservers/.vref/%s"%vref):
99 logger.log ("sliver_vs: %s: ERROR Could not create sliver - vreference image %s not found"%(name,vref))
104 (x,y,arch)=vref.split('-')
105 # mh, this of course applies when 'vref' is e.g. 'netflow'
106 # and that's not quite right
110 def personality (arch):
111 personality="linux32"
112 if arch.find("64")>=0:
113 personality="linux64"
118 command += ['/bin/bash','-x',]
119 command += ['/usr/sbin/vuseradd', ]
120 if 'attributes' in rec and 'isolate_loopback' in rec['attributes'] and rec['attributes']['isolate_loopback'] == '1':
122 # the vsliver imge to use
123 command += [ '-t', vref, ]
126 logger.log_call(command, timeout=15*60)
127 # export slicename to the slice in /etc/slicename
128 file('/vservers/%s/etc/slicename' % name, 'w').write(name)
129 file('/vservers/%s/etc/slicefamily' % name, 'w').write(vref)
130 # set personality: only if needed (if arch's differ)
131 if tools.root_context_arch() != arch:
132 file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch)+"\n")
133 logger.log('sliver_vs: %s: set personality to %s'%(name,personality(arch)))
137 logger.log_call(['/bin/bash','-x','/usr/sbin/vuserdel', name, ])
139 def configure(self, rec):
140 # in case we update nodemanager..
141 self.install_and_enable_vinit()
143 new_rspec = rec['_rspec']
144 if new_rspec != self.rspec:
145 self.rspec = new_rspec
148 # do the configure part from Initscript
149 Initscript.configure(self,rec)
151 Account.configure(self, rec) # install ssh keys
153 def start(self, delay=0):
154 if self.rspec['enabled'] <= 0:
155 logger.log('sliver_vs: not starting %s, is not enabled'%self.name)
157 logger.log('sliver_vs: %s: starting in %d seconds' % (self.name, delay))
159 # the generic /etc/init.d/vinit script is permanently refreshed, and enabled
160 self.install_and_enable_vinit()
161 # expose .ssh for omf_friendly slivers
162 if 'omf_control' in self.rspec['tags']:
163 self.expose_ssh_dir()
164 # if a change has occured in the slice initscript, reflect this in /etc/init.d/vinit.slice
165 self.refresh_slice_vinit()
166 child_pid = os.fork()
168 # VServer.start calls fork() internally,
169 # so just close the nonstandard fds and fork once to avoid creating zombies
170 tools.close_nonstandard_fds()
171 vserver.VServer.start(self)
174 os.waitpid(child_pid, 0)
177 logger.log('sliver_vs: %s: stopping' % self.name)
178 vserver.VServer.stop(self)
180 def is_running(self):
181 return vserver.VServer.is_running(self)
183 # this one seems to belong in Initscript at first sight,
184 # but actually depends on the underlying vm techno
185 # so let's keep it here
186 def rerun_slice_vinit(self):
187 command = "/usr/sbin/vserver %s exec /etc/rc.d/init.d/vinit restart" % (self.name)
188 logger.log("vsliver_vs: %s: Rerunning slice initscript: %s" % (self.name, command))
189 subprocess.call(command + "&", stdin=open('/dev/null', 'r'), stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, shell=True)
191 def set_resources(self):
192 disk_max = self.rspec['disk_max']
193 logger.log('sliver_vs: %s: setting max disk usage to %d KiB' % (self.name, disk_max))
194 try: # if the sliver is over quota, .set_disk_limit will throw an exception
195 if not self.disk_usage_initialized:
196 self.vm_running = False
197 Sliver_VS._init_disk_info_sem.acquire()
198 logger.log('sliver_vs: %s: computing disk usage: beginning' % self.name)
199 # init_disk_info is inherited from VServer
200 try: self.init_disk_info()
201 finally: Sliver_VS._init_disk_info_sem.release()
202 logger.log('sliver_vs: %s: computing disk usage: ended' % self.name)
203 self.disk_usage_initialized = True
204 vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
206 logger.log_exc('sliver_vs: failed to set max disk usage',name=self.name)
208 # get/set the min/soft/hard values for all of the vserver
209 # related RLIMITS. Note that vserver currently only
210 # implements support for hard limits.
211 for limit in vserver.RLIMITS.keys():
213 minimum = self.rspec['%s_min'%type]
214 soft = self.rspec['%s_soft'%type]
215 hard = self.rspec['%s_hard'%type]
216 update = self.set_rlimit(limit, hard, soft, minimum)
218 logger.log('sliver_vs: %s: setting rlimit %s to (%d, %d, %d)'
219 % (self.name, type, hard, soft, minimum))
221 self.set_capabilities_config(self.rspec['capabilities'])
222 if self.rspec['capabilities']:
223 logger.log('sliver_vs: %s: setting capabilities to %s' % (self.name, self.rspec['capabilities']))
225 cpu_pct = self.rspec['cpu_pct']
226 cpu_share = self.rspec['cpu_share']
229 for key in self.rspec.keys():
230 if key.find('sysctl.') == 0:
231 sysctl=key.split('.')
233 # /etc/vservers/<guest>/sysctl/<id>/
234 dirname = "/etc/vservers/%s/sysctl/%s" % (self.name, count)
236 os.makedirs(dirname, 0755)
239 setting = open("%s/setting" % dirname, "w")
240 setting.write("%s\n" % key.lstrip("sysctl."))
242 value = open("%s/value" % dirname, "w")
243 value.write("%s\n" % self.rspec[key])
247 logger.log("sliver_vs: %s: writing %s=%s"%(self.name,key,self.rspec[key]))
249 logger.log("sliver_vs: %s: could not set %s=%s"%(self.name,key,self.rspec[key]))
250 logger.log("sliver_vs: %s: error = %s"%(self.name,e))
253 if self.rspec['enabled'] > 0:
255 logger.log('sliver_vs: %s: setting cpu reservation to %d%%' % (self.name, cpu_pct))
260 logger.log('sliver_vs: %s: setting cpu share to %d' % (self.name, cpu_share))
264 self.set_sched_config(cpu_pct, cpu_share)
265 # if IP address isn't set (even to 0.0.0.0), sliver won't be able to use network
266 if self.rspec['ip_addresses'] != '0.0.0.0':
267 logger.log('sliver_vs: %s: setting IP address(es) to %s' % \
268 (self.name, self.rspec['ip_addresses']))
270 if 'isolate_loopback' in self.rspec['tags']:
271 add_loopback = self.rspec['tags']['isolate_loopback'] != "1"
272 self.set_ipaddresses_config(self.rspec['ip_addresses'], add_loopback)
274 #logger.log("sliver_vs: %s: Setting name to %s" % (self.name, self.slice_id))
275 #self.setname(self.slice_id)
276 #logger.log("sliver_vs: %s: Storing slice id of %s for PlanetFlow" % (self.name, self.slice_id))
278 vserver_config_path = '/etc/vservers/%s'%self.name
279 if not os.path.exists (vserver_config_path):
280 os.makedirs (vserver_config_path)
281 file('%s/slice_id'%vserver_config_path, 'w').write("%d\n"%self.slice_id)
282 logger.log("sliver_vs: Recorded slice id %d for slice %s"%(self.slice_id,self.name))
284 logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name,str(e)))
286 logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e),name=self.name)
289 if self.enabled == False:
293 if False: # Does not work properly yet.
294 if self.have_limits_changed():
295 logger.log('sliver_vs: %s: limits have changed --- restarting' % self.name)
297 while self.is_running() and stopcount > 0:
301 stopcount = stopcount - 1
304 else: # tell vsh to disable remote login by setting CPULIMIT to 0
305 logger.log('sliver_vs: %s: disabling remote login' % self.name)
306 self.set_sched_config(0, 0)