5 There are a couple of tricky things going on here. First, the kernel
6 needs disk usage information in order to enforce the quota. However,
7 determining disk usage redundantly strains the disks. Thus, the
8 Sliver_VS.disk_usage_initialized flag is used to determine whether
9 this initialization has been made.
11 Second, it's not currently possible to set the scheduler parameters
12 for a sliver unless that sliver has a running process. /bin/vsh helps
13 us out by reading the configuration file so that it can set the
14 appropriate limits after entering the sliver context. Making the
15 syscall that actually sets the parameters gives a harmless error if no
16 process is running. Thus we keep vm_running on when setting scheduler
17 parameters so that set_sched_params() always makes the syscall, and we
18 don't have to guess if there is a running process or not.
26 from threading import BoundedSemaphore
29 # the util-vserver-pl module
34 from account import Account
35 from initscript import Initscript
37 # special constant that tells vserver to keep its existing settings
38 KEEP_LIMIT = vserver.VC_LIM_KEEP
40 # populate the sliver/vserver specific default allocations table,
41 # which is used to look for slice attributes
42 DEFAULT_ALLOCATION = {}
43 for rlimit in vserver.RLIMITS.keys():
45 DEFAULT_ALLOCATION["{}_min".format(rlim)] = KEEP_LIMIT
46 DEFAULT_ALLOCATION["{}_soft".format(rlim)] = KEEP_LIMIT
47 DEFAULT_ALLOCATION["{}_hard".format(rlim)] = KEEP_LIMIT
49 class Sliver_VS(vserver.VServer, Account, Initscript):
50 """This class wraps vserver.VServer to make its interface closer to what we need."""
53 TYPE = 'sliver.VServer'
54 _init_disk_info_sem = BoundedSemaphore()
56 def __init__(self, rec):
58 logger.verbose ('sliver_vs: {} init'.format(name))
60 logger.log("sliver_vs: {}: first chance...".format(name))
61 vserver.VServer.__init__(self, name, logfile='/var/log/nodemanager')
62 Account.__init__ (self, name)
63 Initscript.__init__ (self, name)
64 except Exception, err:
65 if not isinstance(err, vserver.NoSuchVServer):
66 # Probably a bad vserver or vserver configuration file
67 logger.log_exc("sliver_vs:__init__ (first chance)", name=name)
68 logger.log('sliver_vs: {}: recreating bad vserver'.format(name))
70 self.create(name, rec)
71 vserver.VServer.__init__(self, name, logfile='/var/log/nodemanager')
72 Account.__init__ (self, name)
73 Initscript.__init__ (self, name)
76 self.slice_id = rec['slice_id']
77 self.disk_usage_initialized = False
79 # xxx this almost certainly is wrong...
83 def create(name, rec = None):
84 logger.verbose('sliver_vs: {}: create'.format(name))
88 # band-aid for short period as old API doesn't have GetSliceFamily function
89 vref = "planetlab-f8-i386"
90 logger.log("sliver_vs: {}: ERROR - no vref attached, using hard-wired default {}"
93 # used to look in /etc/planetlab/family,
94 # now relies on the 'GetSliceFamily' extra attribute in GetSlivers()
95 # which for legacy is still exposed here as the 'vref' key
97 # check the template exists -- there's probably a better way..
98 if not os.path.isdir ("/vservers/.vref/{}".format(vref)):
99 logger.log ("sliver_vs: {}: ERROR Could not create sliver - vreference image {} not found"
103 # compute guest personality
105 (x, y, arch) = vref.split('-')
106 # mh, this of course applies when 'vref' is e.g. 'netflow'
107 # and that's not quite right
111 def personality (arch):
112 return "linux64" if arch.find("64") >= 0 else "linux32"
116 command += ['/bin/bash', '-x', ]
117 command += ['/usr/sbin/vuseradd', ]
118 if 'attributes' in rec and 'isolate_loopback' in rec['attributes'] and rec['attributes']['isolate_loopback'] == '1':
120 # the vsliver imge to use
121 command += [ '-t', vref, ]
124 logger.log_call(command, timeout=15*60)
125 # export slicename to the slice in /etc/slicename
126 with open('/vservers/{}/etc/slicename'.format(name), 'w') as slicenamefile:
127 slicenamefile.write(name)
128 with open('/vservers/{}/etc/slicefamily'.format(name), 'w') as slicefamilyfile:
129 slicefamilyfile.write(vref)
130 # set personality: only if needed (if arch's differ)
131 if tools.root_context_arch() != arch:
132 with open('/etc/vservers/{}/personality'.format(name), 'w') as personalityfile:
133 personalityfile.write(personality(arch)+"\n")
134 logger.log('sliver_vs: {}: set personality to {}'.format(name, personality(arch)))
138 # need to umount before we trash, otherwise we end up with sequels in
139 # /vservers/slicename/ (namely in home/ )
140 # also because this is a static method we cannot check for 'omf_control'
141 # but it is no big deal as umount_ssh_dir checks before it umounts..
142 Account.umount_ssh_dir(name)
143 logger.log("sliver_vs: destroying {}".format(name))
144 logger.log_call(['/bin/bash', '-x', '/usr/sbin/vuserdel', name, ])
147 def configure(self, rec):
148 # in case we update nodemanager..
149 self.install_and_enable_vinit()
151 new_rspec = rec['_rspec']
152 if new_rspec != self.rspec:
153 self.rspec = new_rspec
156 # do the configure part from Initscript
157 # i.e. install slice initscript if defined
158 Initscript.configure(self, rec)
160 Account.configure(self, rec)
162 # remember configure() always gets called *before* start()
163 # in particular the slice initscript
164 # is expected to be in place already at this point
165 def start(self, delay=0):
166 if self.rspec['enabled'] <= 0:
167 logger.log('sliver_vs: not starting {}, is not enabled'.format(self.name))
169 logger.log('sliver_vs: {}: starting in {} seconds'.format(self.name, delay))
171 # the generic /etc/init.d/vinit script is permanently refreshed, and enabled
172 self.install_and_enable_vinit()
173 # expose .ssh for omf_friendly slivers
174 if 'omf_control' in self.rspec['tags']:
175 Account.mount_ssh_dir(self.name)
176 child_pid = os.fork()
178 # VServer.start calls fork() internally,
179 # so just close the nonstandard fds and fork once to avoid creating zombies
180 tools.close_nonstandard_fds()
181 vserver.VServer.start(self)
184 os.waitpid(child_pid, 0)
187 logger.log('sliver_vs: {}: stopping'.format(self.name))
188 vserver.VServer.stop(self)
190 def is_running(self):
191 return vserver.VServer.is_running(self)
193 # this one seems to belong in Initscript at first sight,
194 # but actually depends on the underlying vm techno
195 # so let's keep it here
196 def rerun_slice_vinit(self):
197 command = "/usr/sbin/vserver {} exec /etc/rc.d/init.d/vinit restart"\
199 logger.log("vsliver_vs: {}: Rerunning slice initscript: {}"
200 .format(self.name, command))
201 subprocess.call(command + "&", stdin=open('/dev/null', 'r'),
202 stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, shell=True)
204 def set_resources(self):
205 disk_max = self.rspec['disk_max']
206 logger.log('sliver_vs: {}: setting max disk usage to {} KiB'
207 .format(self.name, disk_max))
208 try: # if the sliver is over quota, .set_disk_limit will throw an exception
209 if not self.disk_usage_initialized:
210 self.vm_running = False
211 Sliver_VS._init_disk_info_sem.acquire()
212 logger.log('sliver_vs: {}: computing disk usage: beginning'.format(self.name))
213 # init_disk_info is inherited from VServer
214 try: self.init_disk_info()
215 finally: Sliver_VS._init_disk_info_sem.release()
216 logger.log('sliver_vs: {}: computing disk usage: ended'.format(self.name))
217 self.disk_usage_initialized = True
218 vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
220 logger.log_exc('sliver_vs: failed to set max disk usage', name=self.name)
222 # get/set the min/soft/hard values for all of the vserver
223 # related RLIMITS. Note that vserver currently only
224 # implements support for hard limits.
225 for limit in vserver.RLIMITS.keys():
227 minimum = self.rspec['{}_min'.format(type)]
228 soft = self.rspec['{}_soft'.format(type)]
229 hard = self.rspec['{}_hard'.format(type)]
230 update = self.set_rlimit(limit, hard, soft, minimum)
232 logger.log('sliver_vs: {}: setting rlimit {} to ({}, {}, {})'
233 .format(self.name, type, hard, soft, minimum))
235 self.set_capabilities_config(self.rspec['capabilities'])
236 if self.rspec['capabilities']:
237 logger.log('sliver_vs: {}: setting capabilities to {}'
238 .format(self.name, self.rspec['capabilities']))
240 cpu_pct = self.rspec['cpu_pct']
241 cpu_share = self.rspec['cpu_share']
244 for key in self.rspec.keys():
245 if key.find('sysctl.') == 0:
246 sysctl = key.split('.')
248 # /etc/vservers/<guest>/sysctl/<id>/
249 dirname = "/etc/vservers/{}/sysctl/{}".format(self.name, count)
251 os.makedirs(dirname, 0755)
254 with open("{}/setting".format(dirname), "w") as setting:
255 setting.write("{}\n".format(key.lstrip("sysctl.")))
256 with open("{}/value".format(dirname), "w") as value:
257 value.write("{}\n".format(self.rspec[key]))
260 logger.log("sliver_vs: {}: writing {}={}"
261 .format(self.name, key, self.rspec[key]))
263 logger.log("sliver_vs: {}: could not set {}={}"
264 .format(self.name, key, self.rspec[key]))
265 logger.log("sliver_vs: {}: error = {}".format(self.name, e))
268 if self.rspec['enabled'] > 0:
270 logger.log('sliver_vs: {}: setting cpu reservation to {}%'
271 .format(self.name, cpu_pct))
276 logger.log('sliver_vs: {}: setting cpu share to {}'
277 .format(self.name, cpu_share))
281 self.set_sched_config(cpu_pct, cpu_share)
282 # if IP address isn't set (even to 0.0.0.0), sliver won't be able to use network
283 if self.rspec['ip_addresses'] != '0.0.0.0':
284 logger.log('sliver_vs: {}: setting IP address(es) to {}'
285 .format(self.name, self.rspec['ip_addresses']))
287 if 'isolate_loopback' in self.rspec['tags']:
288 add_loopback = self.rspec['tags']['isolate_loopback'] != "1"
289 self.set_ipaddresses_config(self.rspec['ip_addresses'], add_loopback)
291 #logger.log("sliver_vs: {}: Setting name to {}".format(self.name, self.slice_id))
292 #self.setname(self.slice_id)
293 #logger.log("sliver_vs: {}: Storing slice id of {} for PlanetFlow".format(self.name, self.slice_id))
295 vserver_config_path = '/etc/vservers/{}'.format(self.name)
296 if not os.path.exists (vserver_config_path):
297 os.makedirs (vserver_config_path)
298 with open('{}/slice_id'.format(vserver_config_path), 'w') as sliceidfile:
299 sliceidfile.write("{}\n".format(self.slice_id))
300 logger.log("sliver_vs: Recorded slice id {} for slice {}"
301 .format(self.slice_id, self.name))
303 logger.log("sliver_vs: Could not record slice_id for slice {}. Error: {}"
304 .format(self.name, str(e)))
305 except Exception as e:
306 logger.log_exc("sliver_vs: Error recording slice id: {}".format(e), name=self.name)
309 if self.enabled == False:
313 if False: # Does not work properly yet.
314 if self.have_limits_changed():
315 logger.log('sliver_vs: {}: limits have changed --- restarting'.format(self.name))
317 while self.is_running() and stopcount > 0:
321 stopcount = stopcount - 1
324 else: # tell vsh to disable remote login by setting CPULIMIT to 0
325 logger.log('sliver_vs: {}: disabling remote login'.format(self.name))
326 self.set_sched_config(0, 0)