X-Git-Url: http://git.onelab.eu/?p=nodemanager.git;a=blobdiff_plain;f=sliver_lxc.py;h=5892eed9fe6c5d9245ca550aa6fc38e1c0bb8a54;hp=b3a9c7cea3156fb131f0c60e607e73b2c4b63b0a;hb=e57432c1dfdfeaa52cc32799e2abbc34b7704ce9;hpb=96e31587b420695fd0a342673b4149d4680fe293 diff --git a/sliver_lxc.py b/sliver_lxc.py index b3a9c7c..5892eed 100644 --- a/sliver_lxc.py +++ b/sliver_lxc.py @@ -10,6 +10,13 @@ import grp from pwd import getpwnam from string import Template +# vsys probably should not be a plugin +# the thing is, the right way to handle stuff would be that +# if slivers get created by doing a,b,c +# then they should be deleted by doing c,b,a +# the current ordering model for vsys plugins completely fails to capture that +from plugins.vsys import removeSliverFromVsys, startService as vsysStartService + import libvirt import logger @@ -18,6 +25,8 @@ from initscript import Initscript from account import Account from sliver_libvirt import Sliver_Libvirt +BTRFS_TIMEOUT = 15*60 + class Sliver_LXC(Sliver_Libvirt, Initscript): """This class wraps LXC commands""" @@ -29,20 +38,25 @@ class Sliver_LXC(Sliver_Libvirt, Initscript): REF_IMG_BASE_DIR = '/vservers/.lvref' CON_BASE_DIR = '/vservers' - def __init__ (self, rec): + def __init__(self, rec): name=rec['name'] - Sliver_Libvirt.__init__ (self,rec) - Initscript.__init__ (self,name) + Sliver_Libvirt.__init__(self,rec) + Initscript.__init__(self,name) - def configure (self, rec): - Sliver_Libvirt.configure (self,rec) + def configure(self, rec): + logger.log('========== sliver_lxc.configure {}'.format(self.name)) + Sliver_Libvirt.configure(self, rec) # in case we update nodemanager.. self.install_and_enable_vinit() # do the configure part from Initscript - Initscript.configure(self,rec) + Initscript.configure(self, rec) + # remember configure() always gets called *before* start() + # in particular the slice initscript + # is expected to be in place already at this point def start(self, delay=0): + logger.log('==================== sliver_lxc.start {}'.format(self.name)) if 'enabled' in self.rspec and self.rspec['enabled'] <= 0: logger.log('sliver_lxc: not starting %s, is not enabled'%self.name) return @@ -51,24 +65,44 @@ class Sliver_LXC(Sliver_Libvirt, Initscript): # expose .ssh for omf_friendly slivers if 'tags' in self.rspec and 'omf_control' in self.rspec['tags']: Account.mount_ssh_dir(self.name) - Sliver_Libvirt.start (self, delay) + Sliver_Libvirt.start(self, delay) # if a change has occured in the slice initscript, reflect this in /etc/init.d/vinit.slice self.refresh_slice_vinit() - def rerun_slice_vinit (self): - """This is called whenever the initscript code changes""" - # xxx - todo - not sure exactly how to: - # (.) invoke something in the guest - # (.) which options of systemctl should be used to trigger a restart - # should not prevent the first run from going fine hopefully - logger.log("WARNING: sliver_lxc.rerun_slice_vinit not implemented yet") - + def rerun_slice_vinit(self): + """This is called at startup, and whenever the initscript code changes""" + logger.log("sliver_lxc.rerun_slice_vinit {}".format(self.name)) + plain = "virsh -c lxc:/// lxc-enter-namespace --noseclabel -- {} /usr/bin/systemctl --system daemon-reload"\ + .format(self.name) + command = plain.split() + logger.log_call(command, timeout=3) + plain = "virsh -c lxc:/// lxc-enter-namespace --noseclabel -- {} /usr/bin/systemctl restart vinit.service"\ + .format(self.name) + command = plain.split() + logger.log_call(command, timeout=3) + + @staticmethod def create(name, rec=None): - ''' Create dirs, copy fs image, lxc_create ''' - logger.verbose ('sliver_lxc: %s create'%(name)) + ''' + Create dirs, copy fs image, lxc_create + ''' + logger.verbose('sliver_lxc: %s create' % name) conn = Sliver_Libvirt.getConnection(Sliver_LXC.TYPE) + vref = rec['vref'] + if vref is None: + vref = "lxc-f18-x86_64" + logger.log("sliver_libvirt: %s: WARNING - no vref attached, using hard-wired default %s" % (name,vref)) + + # compute guest arch from vref + # essentially we want x86_64 (default) or i686 here for libvirt + try: + (x, y, arch) = vref.split('-') + arch = "x86_64" if arch.find("64")>=0 else "i686" + except: + arch = 'x86_64' + # Get the type of image from vref myplc tags specified as: # pldistro = lxc # fcdistro = squeeze @@ -81,10 +115,8 @@ class Sliver_LXC(Sliver_Libvirt, Initscript): if arch == 'i386': arch = 'i686' - vref = rec['vref'] - if vref is None: - vref = "lxc-f14-x86_64" - logger.log("sliver_libvirt: %s: WARNING - no vref attached, using hard-wired default %s" % (name,vref)) + + refImgDir = os.path.join(Sliver_LXC.REF_IMG_BASE_DIR, vref) containerDir = os.path.join(Sliver_LXC.CON_BASE_DIR, name) @@ -95,60 +127,31 @@ class Sliver_LXC(Sliver_Libvirt, Initscript): logger.log('sliver_lxc: %s: ERROR Expected reference image in %s'%(name,refImgDir)) return - # Snapshot the reference image fs (assume the reference image is in its own - # subvolume) +# this hopefully should be fixed now +# # in fedora20 we have some difficulty in properly cleaning up /vservers/ +# # also note that running e.g. btrfs subvolume create /vservers/.lvref/image /vservers/foo +# # behaves differently, whether /vservers/foo exists or not: +# # if /vservers/foo does not exist, it creates /vservers/foo +# # but if it does exist, then it creates /vservers/foo/image !! +# # so we need to check the expected container rootfs does not exist yet +# # this hopefully could be removed in a future release +# if os.path.exists (containerDir): +# logger.log("sliver_lxc: %s: WARNING cleaning up pre-existing %s"%(name,containerDir)) +# command = ['btrfs', 'subvolume', 'delete', containerDir] +# logger.log_call(command, BTRFS_TIMEOUT) +# # re-check +# if os.path.exists (containerDir): +# logger.log('sliver_lxc: %s: ERROR Could not create sliver - could not clean up empty %s'%(name,containerDir)) +# return + + # Snapshot the reference image fs + # this assumes the reference image is in its own subvolume command = ['btrfs', 'subvolume', 'snapshot', refImgDir, containerDir] - if not logger.log_call(command, timeout=15*60): + if not logger.log_call(command, timeout=BTRFS_TIMEOUT): logger.log('sliver_lxc: ERROR Could not create BTRFS snapshot at', containerDir) return command = ['chmod', '755', containerDir] - logger.log_call(command, timeout=15*60) - - # customizations for the user environment - root or slice uid - # we save the whole business in /etc/planetlab.profile - # and source this file for both root and the slice uid's .profile - # prompt for slice owner, + LD_PRELOAD for transparently wrap bind - pl_profile=os.path.join(containerDir,"etc/planetlab.profile") - ld_preload_msg="""# by default, we define this setting so that calls to bind(2), -# when invoked on 0.0.0.0, get transparently redirected to the public interface of this node -# see https://svn.planet-lab.org/wiki/LxcPortForwarding""" - usrmove_path_msg="""# VM's before Features/UsrMove need /bin and /sbin in their PATH""" - usrmove_path_code=""" -pathmunge () { - if ! echo $PATH | /bin/egrep -q "(^|:)$1($|:)" ; then - if [ "$2" = "after" ] ; then - PATH=$PATH:$1 - else - PATH=$1:$PATH - fi - fi -} -pathmunge /bin after -pathmunge /sbin after -unset pathmunge -""" - with open(pl_profile,'w') as f: - f.write("export PS1='%s@\H \$ '\n"%(name)) - f.write("%s\n"%ld_preload_msg) - f.write("export LD_PRELOAD=/etc/planetlab/lib/bind_public.so\n") - f.write("%s\n"%usrmove_path_msg) - f.write("%s\n"%usrmove_path_code) - - # make sure this file is sourced from both root's and slice's .profile - enforced_line = "[ -f /etc/planetlab.profile ] && source /etc/planetlab.profile\n" - for path in [ 'root/.profile', 'home/%s/.profile'%name ]: - from_root=os.path.join(containerDir,path) - # if dir is not yet existing let's forget it for now - if not os.path.isdir(os.path.dirname(from_root)): continue - found=False - try: - contents=file(from_root).readlines() - for content in contents: - if content==enforced_line: found=True - except IOError: pass - if not found: - with open(from_root,"a") as user_profile: - user_profile.write(enforced_line) + logger.log_call(command) # TODO: set quotas... @@ -161,28 +164,28 @@ unset pathmunge group = grp.getgrnam('slices') except: command = ['/usr/sbin/groupadd', 'slices'] - logger.log_call(command, timeout=15*60) + logger.log_call(command) # Add unix account (TYPE is specified in the subclass) command = ['/usr/sbin/useradd', '-g', 'slices', '-s', Sliver_LXC.SHELL, name, '-p', '*'] - logger.log_call(command, timeout=15*60) + logger.log_call(command) command = ['mkdir', '/home/%s/.ssh'%name] - logger.log_call(command, timeout=15*60) + logger.log_call(command) # Create PK pair keys to connect from the host to the guest without # password... maybe remove the need for authentication inside the # guest? command = ['su', '-s', '/bin/bash', '-c', 'ssh-keygen -t rsa -N "" -f /home/%s/.ssh/id_rsa'%(name)] - logger.log_call(command, timeout=60) + logger.log_call(command) command = ['chown', '-R', '%s.slices'%name, '/home/%s/.ssh'%name] - logger.log_call(command, timeout=30) + logger.log_call(command) command = ['mkdir', '%s/root/.ssh'%containerDir] - logger.log_call(command, timeout=10) + logger.log_call(command) command = ['cp', '/home/%s/.ssh/id_rsa.pub'%name, '%s/root/.ssh/authorized_keys'%containerDir] - logger.log_call(command, timeout=30) + logger.log_call(command) logger.log("creating /etc/slicename file in %s" % os.path.join(containerDir,'etc/slicename')) try: @@ -205,16 +208,24 @@ unset pathmunge if uid is not None: logger.log("uid is %d" % uid) command = ['mkdir', '%s/home/%s' % (containerDir, name)] - logger.log_call(command, timeout=10) + logger.log_call(command) command = ['chown', name, '%s/home/%s' % (containerDir, name)] - logger.log_call(command, timeout=10) + logger.log_call(command) etcpasswd = os.path.join(containerDir, 'etc/passwd') + etcgroup = os.path.join(containerDir, 'etc/group') if os.path.exists(etcpasswd): - logger.log("adding user %s id %d to %s" % (name, uid, etcpasswd)) + # create all accounts with gid=1001 - i.e. 'slices' like it is in the root context + slices_gid=1001 + logger.log("adding user %(name)s id %(uid)d gid %(slices_gid)d to %(etcpasswd)s" % (locals())) try: - file(etcpasswd,'a').write("%s:x:%d:%d::/home/%s:/bin/bash\n" % (name, uid, uid, name)) + file(etcpasswd,'a').write("%(name)s:x:%(uid)d:%(slices_gid)d::/home/%(name)s:/bin/bash\n" % locals()) except: - logger.log_exc("exception while updating etc/passwd") + logger.log_exc("exception while updating %s"%etcpasswd) + logger.log("adding group slices with gid %(slices_gid)d to %(etcgroup)s"%locals()) + try: + file(etcgroup,'a').write("slices:x:%(slices_gid)d\n"%locals()) + except: + logger.log_exc("exception while updating %s"%etcgroup) sudoers = os.path.join(containerDir, 'etc/sudoers') if os.path.exists(sudoers): try: @@ -222,6 +233,56 @@ unset pathmunge except: logger.log_exc("exception while updating /etc/sudoers") + # customizations for the user environment - root or slice uid + # we save the whole business in /etc/planetlab.profile + # and source this file for both root and the slice uid's .profile + # prompt for slice owner, + LD_PRELOAD for transparently wrap bind + pl_profile=os.path.join(containerDir,"etc/planetlab.profile") + ld_preload_text="""# by default, we define this setting so that calls to bind(2), +# when invoked on 0.0.0.0, get transparently redirected to the public interface of this node +# see https://svn.planet-lab.org/wiki/LxcPortForwarding""" + usrmove_path_text="""# VM's before Features/UsrMove need /bin and /sbin in their PATH""" + usrmove_path_code=""" +pathmunge () { + if ! echo $PATH | /bin/egrep -q "(^|:)$1($|:)" ; then + if [ "$2" = "after" ] ; then + PATH=$PATH:$1 + else + PATH=$1:$PATH + fi + fi +} +pathmunge /bin after +pathmunge /sbin after +unset pathmunge +""" + with open(pl_profile,'w') as f: + f.write("export PS1='%s@\H \$ '\n"%(name)) + f.write("%s\n"%ld_preload_text) + f.write("export LD_PRELOAD=/etc/planetlab/lib/bind_public.so\n") + f.write("%s\n"%usrmove_path_text) + f.write("%s\n"%usrmove_path_code) + + # make sure this file is sourced from both root's and slice's .profile + enforced_line = "[ -f /etc/planetlab.profile ] && source /etc/planetlab.profile\n" + for path in [ 'root/.profile', 'home/%s/.profile'%name ]: + from_root=os.path.join(containerDir,path) + # if dir is not yet existing let's forget it for now + if not os.path.isdir(os.path.dirname(from_root)): continue + found=False + try: + contents=file(from_root).readlines() + for content in contents: + if content==enforced_line: found=True + except IOError: pass + if not found: + with open(from_root,"a") as user_profile: + user_profile.write(enforced_line) + # in case we create the slice's .profile when writing + if from_root.find("/home")>=0: + command=['chown','%s:slices'%name,from_root] + logger.log_call(command) + # Lookup for xid and create template after the user is created so we # can get the correct xid based on the name of the slice xid = bwlimit.get_xid(name) @@ -251,7 +312,7 @@ unset pathmunge dom = conn.lookupByName(name) except: dom = conn.defineXML(xml) - logger.verbose('lxc_create: %s -> %s'%(name, Sliver_Libvirt.debuginfo(dom))) + logger.verbose('lxc_create: %s -> %s'%(name, Sliver_Libvirt.dom_details(dom))) @staticmethod @@ -267,37 +328,75 @@ unset pathmunge # Destroy libvirt domain dom = conn.lookupByName(name) except: - logger.verbose('sliver_lxc: Domain %s does not exist!' % name) + logger.verbose('sliver_lxc.destroy: Domain %s does not exist!' % name) + return + + # Slivers with vsys running will fail the subvolume delete + # removeSliverFromVsys return True if it stops vsys, telling us to start it again later + vsys_stopped = removeSliverFromVsys (name) try: + logger.log("sliver_lxc.destroy: destroying domain %s"%name) dom.destroy() except: - logger.verbose('sliver_lxc: Domain %s not running... continuing.' % name) + logger.verbose('sliver_lxc.destroy: Domain %s not running... continuing.' % name) try: + logger.log("sliver_lxc.destroy: undefining domain %s"%name) dom.undefine() except: - logger.verbose('sliver_lxc: Domain %s is not defined... continuing.' % name) + logger.verbose('sliver_lxc.destroy: Domain %s is not defined... continuing.' % name) # Remove user after destroy domain to force logout command = ['/usr/sbin/userdel', '-f', '-r', name] - logger.log_call(command, timeout=15*60) + logger.log_call(command) - if os.path.exists(os.path.join(containerDir,"vsys")): - # Slivers with vsys running will fail the subvolume delete. - # A more permanent solution may be to ensure that the vsys module - # is called before the sliver is destroyed. - logger.log("destroying vsys directory and restarting vsys") - logger.log_call(["rm", "-fR", os.path.join(containerDir, "vsys")]) - logger.log_call(["/etc/init.d/vsys", "restart", ]) + # Remove rootfs of destroyed domain + command = ['/usr/bin/rm', '-rf', containerDir] + logger.log_call(command, timeout=BTRFS_TIMEOUT) + + # ??? + logger.log("-TMP-ls-l %s"%name) + command = ['ls', '-lR', containerDir] + logger.log_call(command) + logger.log("-TMP-vsys-status") + command = ['/usr/bin/systemctl', 'status', 'vsys'] + logger.log_call(command) + # ??? # Remove rootfs of destroyed domain command = ['btrfs', 'subvolume', 'delete', containerDir] - logger.log_call(command, timeout=60) - - if os.path.exists(containerDir): - # oh no, it's still here... - logger.log("WARNING: failed to destroy container %s" % containerDir) - - logger.verbose('sliver_libvirt: %s destroyed.'%name) - + logger.log_call(command, timeout=BTRFS_TIMEOUT) + + # For some reason I am seeing this : + #log_call: running command btrfs subvolume delete /vservers/inri_sl1 + #log_call: ERROR: cannot delete '/vservers/inri_sl1' - Device or resource busy + #log_call: Delete subvolume '/vservers/inri_sl1' + #log_call:end command (btrfs subvolume delete /vservers/inri_sl1) returned with code 1 + # + # something must have an open handle to a file in there, but I can't find out what it is + # the following code aims at gathering data on what is going on in the system at this point in time + # note that some time later (typically when the sliver gets re-created) the same + # attempt at deleting the subvolume does work + # also lsof never shows anything relevant; this is painful.. + + if not os.path.exists(containerDir): + logger.log('sliver_lxc.destroy: %s cleanly destroyed.'%name) + else: + # we're in / + #logger.log("-TMP-cwd %s : %s"%(name,os.getcwd())) + # also lsof never shows anything relevant; this is painful.. + #logger.log("-TMP-lsof %s"%name) + #command=['lsof'] + #logger.log_call(command) + logger.log("-TMP-ls-l %s"%name) + command = ['ls', '-lR', containerDir] + logger.log_call(command) + logger.log("-TMP-lsof") + command = ['lsof'] + logger.log_call(command) + if os.path.exists(containerDir): + logger.log('sliver_lxc.destroy: ERROR could not cleanly destroy %s - giving up'%name) + + if vsys_stopped: + vsysStartService()