X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sliver_lxc.py;h=5892eed9fe6c5d9245ca550aa6fc38e1c0bb8a54;hb=ccc7b9c4b76a89faad66867b00d16ac45333b6de;hp=94de3db189589d67edc41cba251186712c5177b1;hpb=5d884de3c1a7764a681d048beac8ef5234213d19;p=nodemanager.git diff --git a/sliver_lxc.py b/sliver_lxc.py index 94de3db..5892eed 100644 --- a/sliver_lxc.py +++ b/sliver_lxc.py @@ -13,7 +13,7 @@ from string import Template # vsys probably should not be a plugin # the thing is, the right way to handle stuff would be that # if slivers get created by doing a,b,c -# then they sohuld be delted by doing c,b,a +# then they should be deleted by doing c,b,a # the current ordering model for vsys plugins completely fails to capture that from plugins.vsys import removeSliverFromVsys, startService as vsysStartService @@ -25,7 +25,7 @@ from initscript import Initscript from account import Account from sliver_libvirt import Sliver_Libvirt -BTRFS_TIMEOUT=15*60 +BTRFS_TIMEOUT = 15*60 class Sliver_LXC(Sliver_Libvirt, Initscript): """This class wraps LXC commands""" @@ -38,20 +38,25 @@ class Sliver_LXC(Sliver_Libvirt, Initscript): REF_IMG_BASE_DIR = '/vservers/.lvref' CON_BASE_DIR = '/vservers' - def __init__ (self, rec): + def __init__(self, rec): name=rec['name'] - Sliver_Libvirt.__init__ (self,rec) - Initscript.__init__ (self,name) + Sliver_Libvirt.__init__(self,rec) + Initscript.__init__(self,name) - def configure (self, rec): - Sliver_Libvirt.configure (self,rec) + def configure(self, rec): + logger.log('========== sliver_lxc.configure {}'.format(self.name)) + Sliver_Libvirt.configure(self, rec) # in case we update nodemanager.. self.install_and_enable_vinit() # do the configure part from Initscript - Initscript.configure(self,rec) + Initscript.configure(self, rec) + # remember configure() always gets called *before* start() + # in particular the slice initscript + # is expected to be in place already at this point def start(self, delay=0): + logger.log('==================== sliver_lxc.start {}'.format(self.name)) if 'enabled' in self.rspec and self.rspec['enabled'] <= 0: logger.log('sliver_lxc: not starting %s, is not enabled'%self.name) return @@ -60,24 +65,44 @@ class Sliver_LXC(Sliver_Libvirt, Initscript): # expose .ssh for omf_friendly slivers if 'tags' in self.rspec and 'omf_control' in self.rspec['tags']: Account.mount_ssh_dir(self.name) - Sliver_Libvirt.start (self, delay) + Sliver_Libvirt.start(self, delay) # if a change has occured in the slice initscript, reflect this in /etc/init.d/vinit.slice self.refresh_slice_vinit() - def rerun_slice_vinit (self): - """This is called whenever the initscript code changes""" - # xxx - todo - not sure exactly how to: - # (.) invoke something in the guest - # (.) which options of systemctl should be used to trigger a restart - # should not prevent the first run from going fine hopefully - logger.log("WARNING: sliver_lxc.rerun_slice_vinit not implemented yet") - + def rerun_slice_vinit(self): + """This is called at startup, and whenever the initscript code changes""" + logger.log("sliver_lxc.rerun_slice_vinit {}".format(self.name)) + plain = "virsh -c lxc:/// lxc-enter-namespace --noseclabel -- {} /usr/bin/systemctl --system daemon-reload"\ + .format(self.name) + command = plain.split() + logger.log_call(command, timeout=3) + plain = "virsh -c lxc:/// lxc-enter-namespace --noseclabel -- {} /usr/bin/systemctl restart vinit.service"\ + .format(self.name) + command = plain.split() + logger.log_call(command, timeout=3) + + @staticmethod def create(name, rec=None): - ''' Create dirs, copy fs image, lxc_create ''' - logger.verbose ('sliver_lxc: %s create'%(name)) + ''' + Create dirs, copy fs image, lxc_create + ''' + logger.verbose('sliver_lxc: %s create' % name) conn = Sliver_Libvirt.getConnection(Sliver_LXC.TYPE) + vref = rec['vref'] + if vref is None: + vref = "lxc-f18-x86_64" + logger.log("sliver_libvirt: %s: WARNING - no vref attached, using hard-wired default %s" % (name,vref)) + + # compute guest arch from vref + # essentially we want x86_64 (default) or i686 here for libvirt + try: + (x, y, arch) = vref.split('-') + arch = "x86_64" if arch.find("64")>=0 else "i686" + except: + arch = 'x86_64' + # Get the type of image from vref myplc tags specified as: # pldistro = lxc # fcdistro = squeeze @@ -90,10 +115,8 @@ class Sliver_LXC(Sliver_Libvirt, Initscript): if arch == 'i386': arch = 'i686' - vref = rec['vref'] - if vref is None: - vref = "lxc-f18-x86_64" - logger.log("sliver_libvirt: %s: WARNING - no vref attached, using hard-wired default %s" % (name,vref)) + + refImgDir = os.path.join(Sliver_LXC.REF_IMG_BASE_DIR, vref) containerDir = os.path.join(Sliver_LXC.CON_BASE_DIR, name) @@ -104,24 +127,25 @@ class Sliver_LXC(Sliver_Libvirt, Initscript): logger.log('sliver_lxc: %s: ERROR Expected reference image in %s'%(name,refImgDir)) return - # in fedora20 we have some difficulty in properly cleaning up /vservers/ - # also note that running e.g. btrfs subvolume create /vservers/.lvref/image /vservers/foo - # behaves differently, whether /vservers/foo exists or not: - # if /vservers/foo does not exist, it creates /vservers/foo - # but if it does exist, then it creates /vservers/foo/image !! - # so we need to check the expected container rootfs does not exist yet - # this hopefully could be removed in a future release - if os.path.exists (containerDir): - logger.log("sliver_lxc: %s: WARNING cleaning up pre-existing %s"%(name,containerDir)) - command = ['btrfs', 'subvolume', 'delete', containerDir] - logger.log_call(command, BTRFS_TIMEOUT) - # re-check - if os.path.exists (containerDir): - logger.log('sliver_lxc: %s: ERROR Could not create sliver - could not clean up empty %s'%(name,containerDir)) - return - - # Snapshot the reference image fs (assume the reference image is in its own - # subvolume) +# this hopefully should be fixed now +# # in fedora20 we have some difficulty in properly cleaning up /vservers/ +# # also note that running e.g. btrfs subvolume create /vservers/.lvref/image /vservers/foo +# # behaves differently, whether /vservers/foo exists or not: +# # if /vservers/foo does not exist, it creates /vservers/foo +# # but if it does exist, then it creates /vservers/foo/image !! +# # so we need to check the expected container rootfs does not exist yet +# # this hopefully could be removed in a future release +# if os.path.exists (containerDir): +# logger.log("sliver_lxc: %s: WARNING cleaning up pre-existing %s"%(name,containerDir)) +# command = ['btrfs', 'subvolume', 'delete', containerDir] +# logger.log_call(command, BTRFS_TIMEOUT) +# # re-check +# if os.path.exists (containerDir): +# logger.log('sliver_lxc: %s: ERROR Could not create sliver - could not clean up empty %s'%(name,containerDir)) +# return + + # Snapshot the reference image fs + # this assumes the reference image is in its own subvolume command = ['btrfs', 'subvolume', 'snapshot', refImgDir, containerDir] if not logger.log_call(command, timeout=BTRFS_TIMEOUT): logger.log('sliver_lxc: ERROR Could not create BTRFS snapshot at', containerDir) @@ -210,7 +234,7 @@ class Sliver_LXC(Sliver_Libvirt, Initscript): logger.log_exc("exception while updating /etc/sudoers") # customizations for the user environment - root or slice uid - # we save the whole business in /etc/planetlab.profile + # we save the whole business in /etc/planetlab.profile # and source this file for both root and the slice uid's .profile # prompt for slice owner, + LD_PRELOAD for transparently wrap bind pl_profile=os.path.join(containerDir,"etc/planetlab.profile") @@ -246,7 +270,7 @@ unset pathmunge # if dir is not yet existing let's forget it for now if not os.path.isdir(os.path.dirname(from_root)): continue found=False - try: + try: contents=file(from_root).readlines() for content in contents: if content==enforced_line: found=True @@ -327,10 +351,23 @@ unset pathmunge command = ['/usr/sbin/userdel', '-f', '-r', name] logger.log_call(command) + # Remove rootfs of destroyed domain + command = ['/usr/bin/rm', '-rf', containerDir] + logger.log_call(command, timeout=BTRFS_TIMEOUT) + + # ??? + logger.log("-TMP-ls-l %s"%name) + command = ['ls', '-lR', containerDir] + logger.log_call(command) + logger.log("-TMP-vsys-status") + command = ['/usr/bin/systemctl', 'status', 'vsys'] + logger.log_call(command) + # ??? + # Remove rootfs of destroyed domain command = ['btrfs', 'subvolume', 'delete', containerDir] logger.log_call(command, timeout=BTRFS_TIMEOUT) - + # For some reason I am seeing this : #log_call: running command btrfs subvolume delete /vservers/inri_sl1 #log_call: ERROR: cannot delete '/vservers/inri_sl1' - Device or resource busy @@ -346,14 +383,20 @@ unset pathmunge if not os.path.exists(containerDir): logger.log('sliver_lxc.destroy: %s cleanly destroyed.'%name) else: - logger.log("-TMP-cwd %s : %s"%(name,os.getcwd())) - logger.log("-TMP-lsof %s"%name) - command=['lsof'] - logger.log_call(command) + # we're in / + #logger.log("-TMP-cwd %s : %s"%(name,os.getcwd())) + # also lsof never shows anything relevant; this is painful.. + #logger.log("-TMP-lsof %s"%name) + #command=['lsof'] + #logger.log_call(command) logger.log("-TMP-ls-l %s"%name) - command = ['ls', '-l', containerDir] + command = ['ls', '-lR', containerDir] + logger.log_call(command) + logger.log("-TMP-lsof") + command = ['lsof'] logger.log_call(command) if os.path.exists(containerDir): logger.log('sliver_lxc.destroy: ERROR could not cleanly destroy %s - giving up'%name) - if vsys_stopped: vsysStartService() + if vsys_stopped: + vsysStartService()