5 There are a couple of tricky things going on here. First, the kernel
6 needs disk usage information in order to enforce the quota. However,
7 determining disk usage redundantly strains the disks. Thus, the
8 Sliver_VS.disk_usage_initialized flag is used to determine whether
9 this initialization has been made.
11 Second, it's not currently possible to set the scheduler parameters
12 for a sliver unless that sliver has a running process. /bin/vsh helps
13 us out by reading the configuration file so that it can set the
14 appropriate limits after entering the sliver context. Making the
15 syscall that actually sets the parameters gives a harmless error if no
16 process is running. Thus we keep vm_running on when setting scheduler
17 parameters so that set_sched_params() always makes the syscall, and we
18 don't have to guess if there is a running process or not.
26 from threading import BoundedSemaphore
29 # the util-vserver-pl module
34 from account import Account
35 from initscript import Initscript
37 # special constant that tells vserver to keep its existing settings
38 KEEP_LIMIT = vserver.VC_LIM_KEEP
40 # populate the sliver/vserver specific default allocations table,
41 # which is used to look for slice attributes
42 DEFAULT_ALLOCATION = {}
43 for rlimit in vserver.RLIMITS.keys():
45 DEFAULT_ALLOCATION["%s_min"%rlim] = KEEP_LIMIT
46 DEFAULT_ALLOCATION["%s_soft"%rlim] = KEEP_LIMIT
47 DEFAULT_ALLOCATION["%s_hard"%rlim] = KEEP_LIMIT
49 class Sliver_VS(vserver.VServer, Account, Initscript):
50 """This class wraps vserver.VServer to make its interface closer to what we need."""
53 TYPE = 'sliver.VServer'
54 _init_disk_info_sem = BoundedSemaphore()
56 def __init__(self, rec):
58 logger.verbose ('sliver_vs: %s init'%name)
60 logger.log("sliver_vs: %s: first chance..."%name)
61 vserver.VServer.__init__(self, name, logfile='/var/log/nodemanager')
62 Account.__init__ (self, name)
63 Initscript.__init__ (self, name)
64 except Exception, err:
65 if not isinstance(err, vserver.NoSuchVServer):
66 # Probably a bad vserver or vserver configuration file
67 logger.log_exc("sliver_vs:__init__ (first chance) %s", name=name)
68 logger.log('sliver_vs: %s: recreating bad vserver' % name)
70 self.create(name, rec)
71 vserver.VServer.__init__(self, name, logfile='/var/log/nodemanager')
72 Account.__init__ (self, name)
73 Initscript.__init__ (self, name)
76 self.slice_id = rec['slice_id']
77 self.disk_usage_initialized = False
79 # xxx this almost certainly is wrong...
83 def create(name, rec = None):
84 logger.verbose('sliver_vs: %s: create'%name)
88 # band-aid for short period as old API doesn't have GetSliceFamily function
89 vref = "planetlab-f8-i386"
90 logger.log("sliver_vs: %s: ERROR - no vref attached, using hard-wired default %s"%(name, vref))
92 # used to look in /etc/planetlab/family,
93 # now relies on the 'GetSliceFamily' extra attribute in GetSlivers()
94 # which for legacy is still exposed here as the 'vref' key
96 # check the template exists -- there's probably a better way..
97 if not os.path.isdir ("/vservers/.vref/%s"%vref):
98 logger.log ("sliver_vs: %s: ERROR Could not create sliver - vreference image %s not found"%(name, vref))
101 # compute guest personality
103 (x, y, arch) = vref.split('-')
104 # mh, this of course applies when 'vref' is e.g. 'netflow'
105 # and that's not quite right
109 def personality (arch):
110 return "linux64" if arch.find("64") >= 0 else "linux32"
114 command += ['/bin/bash', '-x', ]
115 command += ['/usr/sbin/vuseradd', ]
116 if 'attributes' in rec and 'isolate_loopback' in rec['attributes'] and rec['attributes']['isolate_loopback'] == '1':
118 # the vsliver imge to use
119 command += [ '-t', vref, ]
122 logger.log_call(command, timeout=15*60)
123 # export slicename to the slice in /etc/slicename
124 file('/vservers/%s/etc/slicename' % name, 'w').write(name)
125 file('/vservers/%s/etc/slicefamily' % name, 'w').write(vref)
126 # set personality: only if needed (if arch's differ)
127 if tools.root_context_arch() != arch:
128 file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch)+"\n")
129 logger.log('sliver_vs: %s: set personality to %s'%(name, personality(arch)))
133 # need to umount before we trash, otherwise we end up with sequels in
134 # /vservers/slicename/ (namely in home/ )
135 # also because this is a static method we cannot check for 'omf_control'
136 # but it is no big deal as umount_ssh_dir checks before it umounts..
137 Account.umount_ssh_dir(name)
138 logger.log("sliver_vs: destroying %s"%name)
139 logger.log_call(['/bin/bash', '-x', '/usr/sbin/vuserdel', name, ])
142 def configure(self, rec):
143 # in case we update nodemanager..
144 self.install_and_enable_vinit()
146 new_rspec = rec['_rspec']
147 if new_rspec != self.rspec:
148 self.rspec = new_rspec
151 # do the configure part from Initscript
152 # i.e. install slice initscript if defined
153 Initscript.configure(self, rec)
155 Account.configure(self, rec)
157 # remember configure() always gets called *before* start()
158 # in particular the slice initscript
159 # is expected to be in place already at this point
160 def start(self, delay=0):
161 if self.rspec['enabled'] <= 0:
162 logger.log('sliver_vs: not starting %s, is not enabled'%self.name)
164 logger.log('sliver_vs: %s: starting in %d seconds' % (self.name, delay))
166 # the generic /etc/init.d/vinit script is permanently refreshed, and enabled
167 self.install_and_enable_vinit()
168 # expose .ssh for omf_friendly slivers
169 if 'omf_control' in self.rspec['tags']:
170 Account.mount_ssh_dir(self.name)
171 child_pid = os.fork()
173 # VServer.start calls fork() internally,
174 # so just close the nonstandard fds and fork once to avoid creating zombies
175 tools.close_nonstandard_fds()
176 vserver.VServer.start(self)
179 os.waitpid(child_pid, 0)
182 logger.log('sliver_vs: %s: stopping' % self.name)
183 vserver.VServer.stop(self)
185 def is_running(self):
186 return vserver.VServer.is_running(self)
188 # this one seems to belong in Initscript at first sight,
189 # but actually depends on the underlying vm techno
190 # so let's keep it here
191 def rerun_slice_vinit(self):
192 command = "/usr/sbin/vserver %s exec /etc/rc.d/init.d/vinit restart" % (self.name)
193 logger.log("vsliver_vs: %s: Rerunning slice initscript: %s" % (self.name, command))
194 subprocess.call(command + "&", stdin=open('/dev/null', 'r'),
195 stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, shell=True)
197 def set_resources(self):
198 disk_max = self.rspec['disk_max']
199 logger.log('sliver_vs: %s: setting max disk usage to %d KiB' % (self.name, disk_max))
200 try: # if the sliver is over quota, .set_disk_limit will throw an exception
201 if not self.disk_usage_initialized:
202 self.vm_running = False
203 Sliver_VS._init_disk_info_sem.acquire()
204 logger.log('sliver_vs: %s: computing disk usage: beginning' % self.name)
205 # init_disk_info is inherited from VServer
206 try: self.init_disk_info()
207 finally: Sliver_VS._init_disk_info_sem.release()
208 logger.log('sliver_vs: %s: computing disk usage: ended' % self.name)
209 self.disk_usage_initialized = True
210 vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
212 logger.log_exc('sliver_vs: failed to set max disk usage', name=self.name)
214 # get/set the min/soft/hard values for all of the vserver
215 # related RLIMITS. Note that vserver currently only
216 # implements support for hard limits.
217 for limit in vserver.RLIMITS.keys():
219 minimum = self.rspec['%s_min'%type]
220 soft = self.rspec['%s_soft'%type]
221 hard = self.rspec['%s_hard'%type]
222 update = self.set_rlimit(limit, hard, soft, minimum)
224 logger.log('sliver_vs: %s: setting rlimit %s to (%d, %d, %d)'
225 % (self.name, type, hard, soft, minimum))
227 self.set_capabilities_config(self.rspec['capabilities'])
228 if self.rspec['capabilities']:
229 logger.log('sliver_vs: %s: setting capabilities to %s'
230 % (self.name, self.rspec['capabilities']))
232 cpu_pct = self.rspec['cpu_pct']
233 cpu_share = self.rspec['cpu_share']
236 for key in self.rspec.keys():
237 if key.find('sysctl.') == 0:
238 sysctl = key.split('.')
240 # /etc/vservers/<guest>/sysctl/<id>/
241 dirname = "/etc/vservers/%s/sysctl/%s" % (self.name, count)
243 os.makedirs(dirname, 0755)
246 setting = open("%s/setting" % dirname, "w")
247 setting.write("%s\n" % key.lstrip("sysctl."))
249 value = open("%s/value" % dirname, "w")
250 value.write("%s\n" % self.rspec[key])
254 logger.log("sliver_vs: %s: writing %s=%s"%(self.name, key, self.rspec[key]))
256 logger.log("sliver_vs: %s: could not set %s=%s"%(self.name, key, self.rspec[key]))
257 logger.log("sliver_vs: %s: error = %s"%(self.name, e))
260 if self.rspec['enabled'] > 0:
262 logger.log('sliver_vs: %s: setting cpu reservation to %d%%' % (self.name, cpu_pct))
267 logger.log('sliver_vs: %s: setting cpu share to %d' % (self.name, cpu_share))
271 self.set_sched_config(cpu_pct, cpu_share)
272 # if IP address isn't set (even to 0.0.0.0), sliver won't be able to use network
273 if self.rspec['ip_addresses'] != '0.0.0.0':
274 logger.log('sliver_vs: %s: setting IP address(es) to %s' % \
275 (self.name, self.rspec['ip_addresses']))
277 if 'isolate_loopback' in self.rspec['tags']:
278 add_loopback = self.rspec['tags']['isolate_loopback'] != "1"
279 self.set_ipaddresses_config(self.rspec['ip_addresses'], add_loopback)
281 #logger.log("sliver_vs: %s: Setting name to %s" % (self.name, self.slice_id))
282 #self.setname(self.slice_id)
283 #logger.log("sliver_vs: %s: Storing slice id of %s for PlanetFlow" % (self.name, self.slice_id))
285 vserver_config_path = '/etc/vservers/%s'%self.name
286 if not os.path.exists (vserver_config_path):
287 os.makedirs (vserver_config_path)
288 file('%s/slice_id'%vserver_config_path, 'w').write("%d\n"%self.slice_id)
289 logger.log("sliver_vs: Recorded slice id %d for slice %s"%(self.slice_id, self.name))
291 logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name, str(e)))
292 except Exception as e:
293 logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e), name=self.name)
296 if self.enabled == False:
300 if False: # Does not work properly yet.
301 if self.have_limits_changed():
302 logger.log('sliver_vs: %s: limits have changed --- restarting' % self.name)
304 while self.is_running() and stopcount > 0:
308 stopcount = stopcount - 1
311 else: # tell vsh to disable remote login by setting CPULIMIT to 0
312 logger.log('sliver_vs: %s: disabling remote login' % self.name)
313 self.set_sched_config(0, 0)