10 from pwd import getpwnam
11 from string import Template
13 # vsys probably should not be a plugin
14 # the thing is, the right way to handle stuff would be that
15 # if slivers get created by doing a,b,c
16 # then they should be deleted by doing c,b,a
17 # the current ordering model for vsys plugins completely fails to capture that
18 from plugins.vsys import removeSliverFromVsys, startService as vsysStartService
23 import plnode.bwlimit as bwlimit
24 from initscript import Initscript
25 from account import Account
26 from sliver_libvirt import Sliver_Libvirt
30 class Sliver_LXC(Sliver_Libvirt, Initscript):
31 """This class wraps LXC commands"""
33 SHELL = '/usr/sbin/vsh'
35 # Need to add a tag at myplc to actually use this account
38 REF_IMG_BASE_DIR = '/vservers/.lvref'
39 CON_BASE_DIR = '/vservers'
41 def __init__(self, rec):
43 Sliver_Libvirt.__init__(self, rec)
44 Initscript.__init__(self, name)
46 def configure(self, rec):
47 logger.log('========== sliver_lxc.configure {}'.format(self.name))
48 Sliver_Libvirt.configure(self, rec)
50 # in case we update nodemanager..
51 self.install_and_enable_vinit()
52 # do the configure part from Initscript
53 Initscript.configure(self, rec)
55 # remember configure() always gets called *before* start()
56 # in particular the slice initscript
57 # is expected to be in place already at this point
58 def start(self, delay=0):
59 logger.log('==================== sliver_lxc.start {}'.format(self.name))
60 if 'enabled' in self.rspec and self.rspec['enabled'] <= 0:
61 logger.log('sliver_lxc: not starting {}, is not enabled'.format(self.name))
63 # the generic /etc/init.d/vinit script is permanently refreshed, and enabled
64 self.install_and_enable_vinit()
65 # expose .ssh for omf_friendly slivers
66 if 'tags' in self.rspec and 'omf_control' in self.rspec['tags']:
67 Account.mount_ssh_dir(self.name)
68 # logger.log("NM is exiting for debug - just about to start {}".format(self.name))
70 Sliver_Libvirt.start(self, delay)
72 def rerun_slice_vinit(self):
73 """This is called at startup, and whenever the initscript code changes"""
74 logger.log("sliver_lxc.rerun_slice_vinit {}".format(self.name))
75 plain = "virsh -c lxc:/// lxc-enter-namespace --noseclabel -- {} /usr/bin/systemctl --system daemon-reload"\
77 command = plain.split()
78 logger.log_call(command, timeout=3)
79 plain = "virsh -c lxc:/// lxc-enter-namespace --noseclabel -- {} /usr/bin/systemctl restart vinit.service"\
81 command = plain.split()
82 logger.log_call(command, timeout=3)
86 def create(name, rec=None):
88 Create dirs, copy fs image, lxc_create
90 logger.verbose('sliver_lxc: {} create'.format(name))
91 conn = Sliver_Libvirt.getConnection(Sliver_LXC.TYPE)
95 vref = "lxc-f24-x86_64"
96 logger.log("sliver_libvirt: {}: WARNING - no vref attached, using hard-wired default {}"
99 # compute guest arch from vref
100 # essentially we want x86_64 (default) or i686 here for libvirt
102 (x, y, arch) = vref.split('-')
103 arch = "x86_64" if arch.find("64") >= 0 else "i686"
107 # Get the type of image from vref myplc tags specified as:
113 tags = rec['rspec']['tags']
120 refImgDir = os.path.join(Sliver_LXC.REF_IMG_BASE_DIR, vref)
121 containerDir = os.path.join(Sliver_LXC.CON_BASE_DIR, name)
123 # check the template exists -- there's probably a better way..
124 if not os.path.isdir(refImgDir):
125 logger.log('sliver_lxc: {}: ERROR Could not create sliver - reference image {} not found'
127 logger.log('sliver_lxc: {}: ERROR Expected reference image in {}'.format(name, refImgDir))
130 # during some time this fragment had been commented out
131 # but we're seeing cases where this code might actually be useful, so..
132 # this hopefully should be fixed now
133 # # in fedora20 we have some difficulty in properly cleaning up /vservers/<slicename>
134 # # also note that running e.g. btrfs subvolume create /vservers/.lvref/image /vservers/foo
135 # # behaves differently, whether /vservers/foo exists or not:
136 # # if /vservers/foo does not exist, it creates /vservers/foo
137 # # but if it does exist, then it creates /vservers/foo/image !!
138 # # so we need to check the expected container rootfs does not exist yet
139 # # this hopefully could be removed in a future release
140 if os.path.exists (containerDir):
141 logger.log("sliver_lxc: {}: WARNING cleaning up pre-existing {}".format(name, containerDir))
142 command = ['btrfs', 'subvolume', 'delete', containerDir]
143 logger.log_call(command, BTRFS_TIMEOUT)
145 if os.path.exists (containerDir):
146 logger.log('sliver_lxc: {}: ERROR Could not create sliver - could not clean up empty {}'
147 .format(name, containerDir))
150 # Snapshot the reference image fs
151 # this assumes the reference image is in its own subvolume
152 command = ['btrfs', 'subvolume', 'snapshot', refImgDir, containerDir]
153 if not logger.log_call(command, timeout=BTRFS_TIMEOUT):
154 logger.log('sliver_lxc: ERROR Could not create BTRFS snapshot at {}'
155 .format(containerDir))
157 command = ['chmod', '755', containerDir]
158 logger.log_call(command)
160 # TODO: set quotas...
162 # Set hostname. A valid hostname cannot have '_'
163 #with open(os.path.join(containerDir, 'etc/hostname'), 'w') as f:
164 # print >>f, name.replace('_', '-')
166 # Add slices group if not already present
168 group = grp.getgrnam('slices')
170 command = ['/usr/sbin/groupadd', 'slices']
171 logger.log_call(command)
173 # Add unix account (TYPE is specified in the subclass)
174 command = ['/usr/sbin/useradd', '-g', 'slices', '-s', Sliver_LXC.SHELL, name, '-p', '*']
175 logger.log_call(command)
176 command = ['mkdir', '/home/{}/.ssh'.format(name)]
177 logger.log_call(command)
179 # Create PK pair keys to connect from the host to the guest without
180 # password... maybe remove the need for authentication inside the
182 command = ['su', '-s', '/bin/bash', '-c',
183 'ssh-keygen -t rsa -N "" -f /home/{}/.ssh/id_rsa'.format(name)]
184 logger.log_call(command)
186 command = ['chown', '-R', '{}:slices'.format(name), '/home/{}/.ssh'.format(name)]
187 logger.log_call(command)
189 command = ['mkdir', '{}/root/.ssh'.format(containerDir)]
190 logger.log_call(command)
192 command = ['cp', '/home/{}/.ssh/id_rsa.pub'.format(name),
193 '{}/root/.ssh/authorized_keys'.format(containerDir)]
194 logger.log_call(command)
196 logger.log("creating /etc/slicename file in {}".format(os.path.join(containerDir, 'etc/slicename')))
198 with open(os.path.join(containerDir, 'etc/slicename'), 'w') as f:
201 logger.log_exc("exception while creating /etc/slicename")
204 with open(os.path.join(containerDir, 'etc/slicefamily'), 'w') as f:
207 logger.log_exc("exception while creating /etc/slicefamily")
211 uid = getpwnam(name).pw_uid
213 # keyerror will happen if user id was not created successfully
214 logger.log_exc("exception while getting user id")
217 logger.log("uid is {}".format(uid))
218 command = ['mkdir', '{}/home/{}'.format(containerDir, name)]
219 logger.log_call(command)
220 command = ['chown', name, '{}/home/{}'.format(containerDir, name)]
221 logger.log_call(command)
222 etcpasswd = os.path.join(containerDir, 'etc/passwd')
223 etcgroup = os.path.join(containerDir, 'etc/group')
224 if os.path.exists(etcpasswd):
225 # create all accounts with gid=1001 - i.e. 'slices' like it is in the root context
227 logger.log("adding user {name} id {uid} gid {slices_gid} to {etcpasswd}"
228 .format(**(locals())))
230 with open(etcpasswd, 'a') as passwdfile:
231 passwdfile.write("{name}:x:{uid}:{slices_gid}::/home/{name}:/bin/bash\n"
234 logger.log_exc("exception while updating {}".format(etcpasswd))
235 logger.log("adding group slices with gid {slices_gid} to {etcgroup}"
238 with open(etcgroup, 'a') as groupfile:
239 groupfile.write("slices:x:{slices_gid}\n"
242 logger.log_exc("exception while updating {}".format(etcgroup))
243 sudoers = os.path.join(containerDir, 'etc/sudoers')
244 if os.path.exists(sudoers):
246 with open(sudoers, 'a') as f:
247 f.write("{} ALL=(ALL) NOPASSWD: ALL\n".format(name))
249 logger.log_exc("exception while updating /etc/sudoers")
251 # customizations for the user environment - root or slice uid
252 # we save the whole business in /etc/planetlab.profile
253 # and source this file for both root and the slice uid's .profile
254 # prompt for slice owner, + LD_PRELOAD for transparently wrap bind
255 pl_profile = os.path.join(containerDir, "etc/planetlab.profile")
256 ld_preload_text = """# by default, we define this setting so that calls to bind(2),
257 # when invoked on 0.0.0.0, get transparently redirected to the public interface of this node
258 # see https://svn.planet-lab.org/wiki/LxcPortForwarding"""
259 usrmove_path_text = """# VM's before Features/UsrMove need /bin and /sbin in their PATH"""
260 usrmove_path_code = """
262 if ! echo $PATH | /bin/egrep -q "(^|:)$1($|:)" ; then
263 if [ "$2" = "after" ] ; then
271 pathmunge /sbin after
274 with open(pl_profile, 'w') as f:
275 f.write("export PS1='{}@\H \$ '\n".format(name))
276 f.write("{}\n".format(ld_preload_text))
277 f.write("export LD_PRELOAD=/etc/planetlab/lib/bind_public.so\n")
278 f.write("{}\n".format(usrmove_path_text))
279 f.write("{}\n".format(usrmove_path_code))
281 # make sure this file is sourced from both root's and slice's .profile
282 enforced_line = "[ -f /etc/planetlab.profile ] && source /etc/planetlab.profile\n"
283 for path in [ 'root/.profile', 'home/{}/.profile'.format(name) ]:
284 from_root = os.path.join(containerDir, path)
285 # if dir is not yet existing let's forget it for now
286 if not os.path.isdir(os.path.dirname(from_root)): continue
289 with open(from_root) as f:
290 contents = f.readlines()
291 for content in contents:
292 if content == enforced_line:
297 with open(from_root, "a") as user_profile:
298 user_profile.write(enforced_line)
299 # in case we create the slice's .profile when writing
300 if from_root.find("/home") >= 0:
301 command = ['chown', '{}:slices'.format(name), from_root]
302 logger.log_call(command)
304 # Lookup for xid and create template after the user is created so we
305 # can get the correct xid based on the name of the slice
306 xid = bwlimit.get_xid(name)
308 # Template for libvirt sliver configuration
309 template_filename_sliceimage = os.path.join(Sliver_LXC.REF_IMG_BASE_DIR, 'lxc_template.xml')
310 if os.path.isfile(template_filename_sliceimage):
311 logger.verbose("Using XML template {}".format(template_filename_sliceimage))
312 template_filename = template_filename_sliceimage
314 logger.log("Cannot find XML template {}".format(template_filename_sliceimage))
317 interfaces = Sliver_Libvirt.get_interfaces_xml(rec)
320 with open(template_filename) as f:
321 template = Template(f.read())
322 xml = template.substitute(name=name, xid=xid, interfaces=interfaces, arch=arch)
324 logger.log('Failed to parse or use XML template file {}'.format(template_filename))
327 # Lookup for the sliver before actually
328 # defining it, just in case it was already defined.
330 dom = conn.lookupByName(name)
332 dom = conn.defineXML(xml)
333 logger.verbose('lxc_create: {} -> {}'.format(name, Sliver_Libvirt.dom_details(dom)))
338 # umount .ssh directory - only if mounted
339 Account.umount_ssh_dir(name)
340 logger.verbose ('sliver_lxc: {} destroy'.format(name))
341 conn = Sliver_Libvirt.getConnection(Sliver_LXC.TYPE)
343 containerDir = os.path.join(Sliver_LXC.CON_BASE_DIR, name)
346 # Destroy libvirt domain
347 dom = conn.lookupByName(name)
349 logger.verbose('sliver_lxc.destroy: Domain {} does not exist!'.format(name))
352 # Slivers with vsys running will fail the subvolume delete
353 # removeSliverFromVsys return True if it stops vsys, telling us to start it again later
354 vsys_stopped = removeSliverFromVsys (name)
357 logger.log("sliver_lxc.destroy: destroying domain {}".format(name))
360 logger.verbose("sliver_lxc.destroy: Domain {} not running... continuing.".format(name))
363 logger.log("sliver_lxc.destroy: undefining domain {}".format(name))
366 logger.verbose('sliver_lxc.destroy: Domain {} is not defined... continuing.'.format(name))
368 # Remove user after destroy domain to force logout
369 command = ['/usr/sbin/userdel', '-f', '-r', name]
370 logger.log_call(command)
372 # Remove rootfs of destroyed domain
373 command = ['/usr/bin/rm', '-rf', containerDir]
374 logger.log_call(command, timeout=BTRFS_TIMEOUT)
377 logger.log("-TMP-ls-l {}".format(name))
378 command = ['ls', '-lR', containerDir]
379 logger.log_call(command)
380 logger.log("-TMP-vsys-status")
381 command = ['/usr/bin/systemctl', 'status', 'vsys']
382 logger.log_call(command)
385 # Remove rootfs of destroyed domain
386 command = ['btrfs', 'subvolume', 'delete', containerDir]
387 logger.log_call(command, timeout=BTRFS_TIMEOUT)
389 # For some reason I am seeing this :
390 #log_call: running command btrfs subvolume delete /vservers/inri_sl1
391 #log_call: ERROR: cannot delete '/vservers/inri_sl1' - Device or resource busy
392 #log_call: Delete subvolume '/vservers/inri_sl1'
393 #log_call:end command (btrfs subvolume delete /vservers/inri_sl1) returned with code 1
395 # something must have an open handle to a file in there, but I can't find out what it is
396 # the following code aims at gathering data on what is going on in the system at this point in time
397 # note that some time later (typically when the sliver gets re-created) the same
398 # attempt at deleting the subvolume does work
399 # also lsof never shows anything relevant; this is painful..
401 if not os.path.exists(containerDir):
402 logger.log('sliver_lxc.destroy: {} cleanly destroyed.'.format(name))
405 #logger.log("-TMP-cwd {} : {}".format(name, os.getcwd()))
406 # also lsof never shows anything relevant; this is painful..
407 #logger.log("-TMP-lsof {}".format(name))
409 #logger.log_call(command)
410 logger.log("-TMP-ls-l {}".format(name))
411 command = ['ls', '-lR', containerDir]
412 logger.log_call(command)
413 logger.log("-TMP-lsof")
415 logger.log_call(command)
416 if os.path.exists(containerDir):
417 logger.log('sliver_lxc.destroy: ERROR could not cleanly destroy {} - giving up'.format(name))