10 from pwd import getpwnam
11 from string import Template
13 # vsys probably should not be a plugin
14 # the thing is, the right way to handle stuff would be that
15 # if slivers get created by doing a,b,c
16 # then they should be deleted by doing c,b,a
17 # the current ordering model for vsys plugins completely fails to capture that
18 from plugins.vsys import removeSliverFromVsys, startService as vsysStartService
23 import plnode.bwlimit as bwlimit
24 from initscript import Initscript
25 from account import Account
26 from sliver_libvirt import Sliver_Libvirt
30 class Sliver_LXC(Sliver_Libvirt, Initscript):
31 """This class wraps LXC commands"""
33 SHELL = '/usr/sbin/vsh'
35 # Need to add a tag at myplc to actually use this account
38 REF_IMG_BASE_DIR = '/vservers/.lvref'
39 CON_BASE_DIR = '/vservers'
41 def __init__(self, rec):
43 Sliver_Libvirt.__init__(self, rec)
44 Initscript.__init__(self, name)
46 def configure(self, rec):
47 logger.log('========== sliver_lxc.configure {}'.format(self.name))
48 Sliver_Libvirt.configure(self, rec)
50 # in case we update nodemanager..
51 self.install_and_enable_vinit()
52 # do the configure part from Initscript
53 Initscript.configure(self, rec)
55 # remember configure() always gets called *before* start()
56 # in particular the slice initscript
57 # is expected to be in place already at this point
58 def start(self, delay=0):
59 logger.log('==================== sliver_lxc.start {}'.format(self.name))
60 if 'enabled' in self.rspec and self.rspec['enabled'] <= 0:
61 logger.log('sliver_lxc: not starting {}, is not enabled'.format(self.name))
63 # the generic /etc/init.d/vinit script is permanently refreshed, and enabled
64 self.install_and_enable_vinit()
65 # expose .ssh for omf_friendly slivers
66 if 'tags' in self.rspec and 'omf_control' in self.rspec['tags']:
67 Account.mount_ssh_dir(self.name)
68 # logger.log("NM is exiting for debug - just about to start {}".format(self.name))
70 Sliver_Libvirt.start(self, delay)
72 def rerun_slice_vinit(self):
73 """This is called at startup, and whenever the initscript code changes"""
74 logger.log("sliver_lxc.rerun_slice_vinit {}".format(self.name))
75 plain = "virsh -c lxc:/// lxc-enter-namespace --noseclabel -- {} /usr/bin/systemctl --system daemon-reload"\
77 command = plain.split()
78 logger.log_call(command, timeout=3)
79 plain = "virsh -c lxc:/// lxc-enter-namespace --noseclabel -- {} /usr/bin/systemctl restart vinit.service"\
81 command = plain.split()
82 logger.log_call(command, timeout=3)
86 def create(name, rec=None):
88 Create dirs, copy fs image, lxc_create
90 logger.verbose('sliver_lxc: {} create'.format(name))
91 conn = Sliver_Libvirt.getConnection(Sliver_LXC.TYPE)
95 vref = "lxc-f18-x86_64"
96 logger.log("sliver_libvirt: {}: WARNING - no vref attached, using hard-wired default {}"
99 # compute guest arch from vref
100 # essentially we want x86_64 (default) or i686 here for libvirt
102 (x, y, arch) = vref.split('-')
103 arch = "x86_64" if arch.find("64") >= 0 else "i686"
107 # Get the type of image from vref myplc tags specified as:
113 tags = rec['rspec']['tags']
122 refImgDir = os.path.join(Sliver_LXC.REF_IMG_BASE_DIR, vref)
123 containerDir = os.path.join(Sliver_LXC.CON_BASE_DIR, name)
125 # check the template exists -- there's probably a better way..
126 if not os.path.isdir(refImgDir):
127 logger.log('sliver_lxc: {}: ERROR Could not create sliver - reference image {} not found'
129 logger.log('sliver_lxc: %s: ERROR Expected reference image in {}'.format(name, refImgDir))
132 # this hopefully should be fixed now
133 # # in fedora20 we have some difficulty in properly cleaning up /vservers/<slicename>
134 # # also note that running e.g. btrfs subvolume create /vservers/.lvref/image /vservers/foo
135 # # behaves differently, whether /vservers/foo exists or not:
136 # # if /vservers/foo does not exist, it creates /vservers/foo
137 # # but if it does exist, then it creates /vservers/foo/image !!
138 # # so we need to check the expected container rootfs does not exist yet
139 # # this hopefully could be removed in a future release
140 # if os.path.exists (containerDir):
141 # logger.log("sliver_lxc: {}: WARNING cleaning up pre-existing {}".format(name, containerDir))
142 # command = ['btrfs', 'subvolume', 'delete', containerDir]
143 # logger.log_call(command, BTRFS_TIMEOUT)
145 # if os.path.exists (containerDir):
146 # logger.log('sliver_lxc: {}: ERROR Could not create sliver - could not clean up empty {}'
147 # .format(name, containerDir))
150 # Snapshot the reference image fs
151 # this assumes the reference image is in its own subvolume
152 command = ['btrfs', 'subvolume', 'snapshot', refImgDir, containerDir]
153 if not logger.log_call(command, timeout=BTRFS_TIMEOUT):
154 logger.log('sliver_lxc: ERROR Could not create BTRFS snapshot at', containerDir)
156 command = ['chmod', '755', containerDir]
157 logger.log_call(command)
159 # TODO: set quotas...
161 # Set hostname. A valid hostname cannot have '_'
162 #with open(os.path.join(containerDir, 'etc/hostname'), 'w') as f:
163 # print >>f, name.replace('_', '-')
165 # Add slices group if not already present
167 group = grp.getgrnam('slices')
169 command = ['/usr/sbin/groupadd', 'slices']
170 logger.log_call(command)
172 # Add unix account (TYPE is specified in the subclass)
173 command = ['/usr/sbin/useradd', '-g', 'slices', '-s', Sliver_LXC.SHELL, name, '-p', '*']
174 logger.log_call(command)
175 command = ['mkdir', '/home/{}/.ssh'.format(name)]
176 logger.log_call(command)
178 # Create PK pair keys to connect from the host to the guest without
179 # password... maybe remove the need for authentication inside the
181 command = ['su', '-s', '/bin/bash', '-c',
182 'ssh-keygen -t rsa -N "" -f /home/{}/.ssh/id_rsa'.format(name)]
183 logger.log_call(command)
185 command = ['chown', '-R', '{}.slices'.format(name), '/home/{}/.ssh'.format(name)]
186 logger.log_call(command)
188 command = ['mkdir', '{}/root/.ssh'.format(containerDir)]
189 logger.log_call(command)
191 command = ['cp', '/home/{}/.ssh/id_rsa.pub'.format(name),
192 '{}/root/.ssh/authorized_keys'.format(containerDir)]
193 logger.log_call(command)
195 logger.log("creating /etc/slicename file in {}".format(os.path.join(containerDir, 'etc/slicename')))
197 file(os.path.join(containerDir, 'etc/slicename'), 'w').write(name)
199 logger.log_exc("exception while creating /etc/slicename")
202 file(os.path.join(containerDir, 'etc/slicefamily'), 'w').write(vref)
204 logger.log_exc("exception while creating /etc/slicefamily")
208 uid = getpwnam(name).pw_uid
210 # keyerror will happen if user id was not created successfully
211 logger.log_exc("exception while getting user id")
214 logger.log("uid is {}".format(uid))
215 command = ['mkdir', '{}/home/{}'.format(containerDir, name)]
216 logger.log_call(command)
217 command = ['chown', name, '{}/home/{}'.format(containerDir, name)]
218 logger.log_call(command)
219 etcpasswd = os.path.join(containerDir, 'etc/passwd')
220 etcgroup = os.path.join(containerDir, 'etc/group')
221 if os.path.exists(etcpasswd):
222 # create all accounts with gid=1001 - i.e. 'slices' like it is in the root context
224 logger.log("adding user {name} id {uid} gid {slices_gid} to {etcpasswd}"
225 .format(**(locals())))
227 with open(etcpasswd, 'a') as passwdfile:
228 passwdfile.write("{name}:x:{uid}:{slices_gid}::/home/{name}:/bin/bash\n"
231 logger.log_exc("exception while updating {}".format(etcpasswd))
232 logger.log("adding group slices with gid {slices_gid} to {etcgroup}"
235 with open(etcgroup, 'a') as groupfile:
236 groupfile.write("slices:x:{slices_gid}\n"
239 logger.log_exc("exception while updating {}".format(etcgroup))
240 sudoers = os.path.join(containerDir, 'etc/sudoers')
241 if os.path.exists(sudoers):
243 file(sudoers, 'a').write("{} ALL=(ALL) NOPASSWD: ALL\n".format(name))
245 logger.log_exc("exception while updating /etc/sudoers")
247 # customizations for the user environment - root or slice uid
248 # we save the whole business in /etc/planetlab.profile
249 # and source this file for both root and the slice uid's .profile
250 # prompt for slice owner, + LD_PRELOAD for transparently wrap bind
251 pl_profile = os.path.join(containerDir, "etc/planetlab.profile")
252 ld_preload_text = """# by default, we define this setting so that calls to bind(2),
253 # when invoked on 0.0.0.0, get transparently redirected to the public interface of this node
254 # see https://svn.planet-lab.org/wiki/LxcPortForwarding"""
255 usrmove_path_text = """# VM's before Features/UsrMove need /bin and /sbin in their PATH"""
256 usrmove_path_code = """
258 if ! echo $PATH | /bin/egrep -q "(^|:)$1($|:)" ; then
259 if [ "$2" = "after" ] ; then
267 pathmunge /sbin after
270 with open(pl_profile, 'w') as f:
271 f.write("export PS1='{}@\H \$ '\n".format(name))
272 f.write("{}\n".format(ld_preload_text))
273 f.write("export LD_PRELOAD=/etc/planetlab/lib/bind_public.so\n")
274 f.write("{}\n".format(usrmove_path_text))
275 f.write("{}\n".format(usrmove_path_code))
277 # make sure this file is sourced from both root's and slice's .profile
278 enforced_line = "[ -f /etc/planetlab.profile ] && source /etc/planetlab.profile\n"
279 for path in [ 'root/.profile', 'home/{}/.profile'.format(name) ]:
280 from_root = os.path.join(containerDir, path)
281 # if dir is not yet existing let's forget it for now
282 if not os.path.isdir(os.path.dirname(from_root)): continue
285 contents = file(from_root).readlines()
286 for content in contents:
287 if content == enforced_line:
292 with open(from_root, "a") as user_profile:
293 user_profile.write(enforced_line)
294 # in case we create the slice's .profile when writing
295 if from_root.find("/home") >= 0:
296 command = ['chown', '{}:slices'.format(name), from_root]
297 logger.log_call(command)
299 # Lookup for xid and create template after the user is created so we
300 # can get the correct xid based on the name of the slice
301 xid = bwlimit.get_xid(name)
303 # Template for libvirt sliver configuration
304 template_filename_sliceimage = os.path.join(Sliver_LXC.REF_IMG_BASE_DIR, 'lxc_template.xml')
305 if os.path.isfile (template_filename_sliceimage):
306 logger.verbose("Using XML template {}".format(template_filename_sliceimage))
307 template_filename = template_filename_sliceimage
309 logger.log("Cannot find XML template {}".format(template_filename_sliceimage))
312 interfaces = Sliver_Libvirt.get_interfaces_xml(rec)
315 with open(template_filename) as f:
316 template = Template(f.read())
317 xml = template.substitute(name=name, xid=xid, interfaces=interfaces, arch=arch)
319 logger.log('Failed to parse or use XML template file {}'.format(template_filename))
322 # Lookup for the sliver before actually
323 # defining it, just in case it was already defined.
325 dom = conn.lookupByName(name)
327 dom = conn.defineXML(xml)
328 logger.verbose('lxc_create: {} -> {}'.format(name, Sliver_Libvirt.dom_details(dom)))
333 # umount .ssh directory - only if mounted
334 Account.umount_ssh_dir(name)
335 logger.verbose ('sliver_lxc: {} destroy'.format(name))
336 conn = Sliver_Libvirt.getConnection(Sliver_LXC.TYPE)
338 containerDir = Sliver_LXC.CON_BASE_DIR + '/%s'%(name)
341 # Destroy libvirt domain
342 dom = conn.lookupByName(name)
344 logger.verbose('sliver_lxc.destroy: Domain %s does not exist!' % name)
347 # Slivers with vsys running will fail the subvolume delete
348 # removeSliverFromVsys return True if it stops vsys, telling us to start it again later
349 vsys_stopped = removeSliverFromVsys (name)
352 logger.log("sliver_lxc.destroy: destroying domain %s"%name)
355 logger.verbose('sliver_lxc.destroy: Domain %s not running... continuing.' % name)
358 logger.log("sliver_lxc.destroy: undefining domain %s"%name)
361 logger.verbose('sliver_lxc.destroy: Domain %s is not defined... continuing.' % name)
363 # Remove user after destroy domain to force logout
364 command = ['/usr/sbin/userdel', '-f', '-r', name]
365 logger.log_call(command)
367 # Remove rootfs of destroyed domain
368 command = ['/usr/bin/rm', '-rf', containerDir]
369 logger.log_call(command, timeout=BTRFS_TIMEOUT)
372 logger.log("-TMP-ls-l %s"%name)
373 command = ['ls', '-lR', containerDir]
374 logger.log_call(command)
375 logger.log("-TMP-vsys-status")
376 command = ['/usr/bin/systemctl', 'status', 'vsys']
377 logger.log_call(command)
380 # Remove rootfs of destroyed domain
381 command = ['btrfs', 'subvolume', 'delete', containerDir]
382 logger.log_call(command, timeout=BTRFS_TIMEOUT)
384 # For some reason I am seeing this :
385 #log_call: running command btrfs subvolume delete /vservers/inri_sl1
386 #log_call: ERROR: cannot delete '/vservers/inri_sl1' - Device or resource busy
387 #log_call: Delete subvolume '/vservers/inri_sl1'
388 #log_call:end command (btrfs subvolume delete /vservers/inri_sl1) returned with code 1
390 # something must have an open handle to a file in there, but I can't find out what it is
391 # the following code aims at gathering data on what is going on in the system at this point in time
392 # note that some time later (typically when the sliver gets re-created) the same
393 # attempt at deleting the subvolume does work
394 # also lsof never shows anything relevant; this is painful..
396 if not os.path.exists(containerDir):
397 logger.log('sliver_lxc.destroy: %s cleanly destroyed.'%name)
400 #logger.log("-TMP-cwd %s : %s"%(name, os.getcwd()))
401 # also lsof never shows anything relevant; this is painful..
402 #logger.log("-TMP-lsof %s"%name)
404 #logger.log_call(command)
405 logger.log("-TMP-ls-l %s"%name)
406 command = ['ls', '-lR', containerDir]
407 logger.log_call(command)
408 logger.log("-TMP-lsof")
410 logger.log_call(command)
411 if os.path.exists(containerDir):
412 logger.log('sliver_lxc.destroy: ERROR could not cleanly destroy %s - giving up'%name)