import grp
from pwd import getpwnam
from string import Template
-from plugins.vsys import removeSliverFromVsys
+
+# vsys probably should not be a plugin
+# the thing is, the right way to handle stuff would be that
+# if slivers get created by doing a,b,c
+# then they should be deleted by doing c,b,a
+# the current ordering model for vsys plugins completely fails to capture that
+from plugins.vsys import removeSliverFromVsys, startService as vsysStartService
import libvirt
from account import Account
from sliver_libvirt import Sliver_Libvirt
+BTRFS_TIMEOUT = 15*60
+
class Sliver_LXC(Sliver_Libvirt, Initscript):
"""This class wraps LXC commands"""
REF_IMG_BASE_DIR = '/vservers/.lvref'
CON_BASE_DIR = '/vservers'
- def __init__ (self, rec):
+ def __init__(self, rec):
name=rec['name']
- Sliver_Libvirt.__init__ (self,rec)
- Initscript.__init__ (self,name)
+ Sliver_Libvirt.__init__(self,rec)
+ Initscript.__init__(self,name)
- def configure (self, rec):
- Sliver_Libvirt.configure (self,rec)
+ def configure(self, rec):
+ Sliver_Libvirt.configure(self, rec)
# in case we update nodemanager..
self.install_and_enable_vinit()
# do the configure part from Initscript
- Initscript.configure(self,rec)
+ Initscript.configure(self, rec)
def start(self, delay=0):
if 'enabled' in self.rspec and self.rspec['enabled'] <= 0:
# expose .ssh for omf_friendly slivers
if 'tags' in self.rspec and 'omf_control' in self.rspec['tags']:
Account.mount_ssh_dir(self.name)
- Sliver_Libvirt.start (self, delay)
+ Sliver_Libvirt.start(self, delay)
# if a change has occured in the slice initscript, reflect this in /etc/init.d/vinit.slice
self.refresh_slice_vinit()
- def rerun_slice_vinit (self):
- """This is called whenever the initscript code changes"""
- # xxx - todo - not sure exactly how to:
- # (.) invoke something in the guest
- # (.) which options of systemctl should be used to trigger a restart
- # should not prevent the first run from going fine hopefully
- logger.log("WARNING: sliver_lxc.rerun_slice_vinit not implemented yet")
-
+ def rerun_slice_vinit(self):
+ """This is called at startup, and whenever the initscript code changes"""
+ logger.log("sliver_lxc.rerun_slice_vinit {}".format(self.name))
+ plain = "virsh -c lxc:/// lxc-enter-namespace --noseclabel -- {} /usr/bin/systemctl --system daemon-reload"\
+ .format(self.name)
+ command = plain.split()
+ logger.log_call(command, timeout=3)
+ plain = "virsh -c lxc:/// lxc-enter-namespace --noseclabel -- {} /usr/bin/systemctl restart vinit.service"\
+ .format(self.name)
+ command = plain.split()
+ logger.log_call(command, timeout=3)
+
+
@staticmethod
def create(name, rec=None):
- ''' Create dirs, copy fs image, lxc_create '''
- logger.verbose ('sliver_lxc: %s create'%(name))
+ '''
+ Create dirs, copy fs image, lxc_create
+ '''
+ logger.verbose('sliver_lxc: %s create' % name)
conn = Sliver_Libvirt.getConnection(Sliver_LXC.TYPE)
+ vref = rec['vref']
+ if vref is None:
+ vref = "lxc-f18-x86_64"
+ logger.log("sliver_libvirt: %s: WARNING - no vref attached, using hard-wired default %s" % (name,vref))
+
+ # compute guest arch from vref
+ # essentially we want x86_64 (default) or i686 here for libvirt
+ try:
+ (x, y, arch) = vref.split('-')
+ arch = "x86_64" if arch.find("64")>=0 else "i686"
+ except:
+ arch = 'x86_64'
+
# Get the type of image from vref myplc tags specified as:
# pldistro = lxc
# fcdistro = squeeze
if arch == 'i386':
arch = 'i686'
- vref = rec['vref']
- if vref is None:
- vref = "lxc-f18-x86_64"
- logger.log("sliver_libvirt: %s: WARNING - no vref attached, using hard-wired default %s" % (name,vref))
+
+
refImgDir = os.path.join(Sliver_LXC.REF_IMG_BASE_DIR, vref)
containerDir = os.path.join(Sliver_LXC.CON_BASE_DIR, name)
logger.log('sliver_lxc: %s: ERROR Expected reference image in %s'%(name,refImgDir))
return
- # Snapshot the reference image fs (assume the reference image is in its own
- # subvolume)
+# this hopefully should be fixed now
+# # in fedora20 we have some difficulty in properly cleaning up /vservers/<slicename>
+# # also note that running e.g. btrfs subvolume create /vservers/.lvref/image /vservers/foo
+# # behaves differently, whether /vservers/foo exists or not:
+# # if /vservers/foo does not exist, it creates /vservers/foo
+# # but if it does exist, then it creates /vservers/foo/image !!
+# # so we need to check the expected container rootfs does not exist yet
+# # this hopefully could be removed in a future release
+# if os.path.exists (containerDir):
+# logger.log("sliver_lxc: %s: WARNING cleaning up pre-existing %s"%(name,containerDir))
+# command = ['btrfs', 'subvolume', 'delete', containerDir]
+# logger.log_call(command, BTRFS_TIMEOUT)
+# # re-check
+# if os.path.exists (containerDir):
+# logger.log('sliver_lxc: %s: ERROR Could not create sliver - could not clean up empty %s'%(name,containerDir))
+# return
+
+ # Snapshot the reference image fs
+ # this assumes the reference image is in its own subvolume
command = ['btrfs', 'subvolume', 'snapshot', refImgDir, containerDir]
- if not logger.log_call(command, timeout=15*60):
+ if not logger.log_call(command, timeout=BTRFS_TIMEOUT):
logger.log('sliver_lxc: ERROR Could not create BTRFS snapshot at', containerDir)
return
command = ['chmod', '755', containerDir]
- logger.log_call(command, timeout=15*60)
+ logger.log_call(command)
# TODO: set quotas...
group = grp.getgrnam('slices')
except:
command = ['/usr/sbin/groupadd', 'slices']
- logger.log_call(command, timeout=15*60)
+ logger.log_call(command)
# Add unix account (TYPE is specified in the subclass)
command = ['/usr/sbin/useradd', '-g', 'slices', '-s', Sliver_LXC.SHELL, name, '-p', '*']
- logger.log_call(command, timeout=15*60)
+ logger.log_call(command)
command = ['mkdir', '/home/%s/.ssh'%name]
- logger.log_call(command, timeout=15*60)
+ logger.log_call(command)
# Create PK pair keys to connect from the host to the guest without
# password... maybe remove the need for authentication inside the
# guest?
command = ['su', '-s', '/bin/bash', '-c', 'ssh-keygen -t rsa -N "" -f /home/%s/.ssh/id_rsa'%(name)]
- logger.log_call(command, timeout=60)
+ logger.log_call(command)
command = ['chown', '-R', '%s.slices'%name, '/home/%s/.ssh'%name]
- logger.log_call(command, timeout=30)
+ logger.log_call(command)
command = ['mkdir', '%s/root/.ssh'%containerDir]
- logger.log_call(command, timeout=10)
+ logger.log_call(command)
command = ['cp', '/home/%s/.ssh/id_rsa.pub'%name, '%s/root/.ssh/authorized_keys'%containerDir]
- logger.log_call(command, timeout=30)
+ logger.log_call(command)
logger.log("creating /etc/slicename file in %s" % os.path.join(containerDir,'etc/slicename'))
try:
if uid is not None:
logger.log("uid is %d" % uid)
command = ['mkdir', '%s/home/%s' % (containerDir, name)]
- logger.log_call(command, timeout=10)
+ logger.log_call(command)
command = ['chown', name, '%s/home/%s' % (containerDir, name)]
- logger.log_call(command, timeout=10)
+ logger.log_call(command)
etcpasswd = os.path.join(containerDir, 'etc/passwd')
etcgroup = os.path.join(containerDir, 'etc/group')
if os.path.exists(etcpasswd):
logger.log_exc("exception while updating /etc/sudoers")
# customizations for the user environment - root or slice uid
- # we save the whole business in /etc/planetlab.profile
+ # we save the whole business in /etc/planetlab.profile
# and source this file for both root and the slice uid's .profile
# prompt for slice owner, + LD_PRELOAD for transparently wrap bind
pl_profile=os.path.join(containerDir,"etc/planetlab.profile")
# if dir is not yet existing let's forget it for now
if not os.path.isdir(os.path.dirname(from_root)): continue
found=False
- try:
+ try:
contents=file(from_root).readlines()
for content in contents:
if content==enforced_line: found=True
# in case we create the slice's .profile when writing
if from_root.find("/home")>=0:
command=['chown','%s:slices'%name,from_root]
- logger.log_call(command,timeout=5)
+ logger.log_call(command)
# Lookup for xid and create template after the user is created so we
# can get the correct xid based on the name of the slice
# Destroy libvirt domain
dom = conn.lookupByName(name)
except:
- logger.verbose('sliver_lxc: Domain %s does not exist!' % name)
+ logger.verbose('sliver_lxc.destroy: Domain %s does not exist!' % name)
+ return
+
+ # Slivers with vsys running will fail the subvolume delete
+ # removeSliverFromVsys return True if it stops vsys, telling us to start it again later
+ vsys_stopped = removeSliverFromVsys (name)
try:
+ logger.log("sliver_lxc.destroy: destroying domain %s"%name)
dom.destroy()
except:
- logger.verbose('sliver_lxc: Domain %s not running... continuing.' % name)
+ logger.verbose('sliver_lxc.destroy: Domain %s not running... continuing.' % name)
try:
+ logger.log("sliver_lxc.destroy: undefining domain %s"%name)
dom.undefine()
except:
- logger.verbose('sliver_lxc: Domain %s is not defined... continuing.' % name)
+ logger.verbose('sliver_lxc.destroy: Domain %s is not defined... continuing.' % name)
# Remove user after destroy domain to force logout
command = ['/usr/sbin/userdel', '-f', '-r', name]
- logger.log_call(command, timeout=15*60)
+ logger.log_call(command)
- # Slivers with vsys running will fail the subvolume delete.
- # A more permanent solution may be to ensure that the vsys module
- # is called before the sliver is destroyed.
- removeSliverFromVsys (name)
+ # Remove rootfs of destroyed domain
+ command = ['/usr/bin/rm', '-rf', containerDir]
+ logger.log_call(command, timeout=BTRFS_TIMEOUT)
+
+ # ???
+ logger.log("-TMP-ls-l %s"%name)
+ command = ['ls', '-lR', containerDir]
+ logger.log_call(command)
+ logger.log("-TMP-vsys-status")
+ command = ['/usr/bin/systemctl', 'status', 'vsys']
+ logger.log_call(command)
+ # ???
# Remove rootfs of destroyed domain
command = ['btrfs', 'subvolume', 'delete', containerDir]
- logger.log_call(command, timeout=60)
-
- if os.path.exists(containerDir):
- # oh no, it's still here...
- logger.log("WARNING: failed to destroy container %s" % containerDir)
-
- logger.verbose('sliver_libvirt: %s destroyed.'%name)
-
+ logger.log_call(command, timeout=BTRFS_TIMEOUT)
+
+ # For some reason I am seeing this :
+ #log_call: running command btrfs subvolume delete /vservers/inri_sl1
+ #log_call: ERROR: cannot delete '/vservers/inri_sl1' - Device or resource busy
+ #log_call: Delete subvolume '/vservers/inri_sl1'
+ #log_call:end command (btrfs subvolume delete /vservers/inri_sl1) returned with code 1
+ #
+ # something must have an open handle to a file in there, but I can't find out what it is
+ # the following code aims at gathering data on what is going on in the system at this point in time
+ # note that some time later (typically when the sliver gets re-created) the same
+ # attempt at deleting the subvolume does work
+ # also lsof never shows anything relevant; this is painful..
+
+ if not os.path.exists(containerDir):
+ logger.log('sliver_lxc.destroy: %s cleanly destroyed.'%name)
+ else:
+ # we're in /
+ #logger.log("-TMP-cwd %s : %s"%(name,os.getcwd()))
+ # also lsof never shows anything relevant; this is painful..
+ #logger.log("-TMP-lsof %s"%name)
+ #command=['lsof']
+ #logger.log_call(command)
+ logger.log("-TMP-ls-l %s"%name)
+ command = ['ls', '-lR', containerDir]
+ logger.log_call(command)
+ logger.log("-TMP-lsof")
+ command = ['lsof']
+ logger.log_call(command)
+ if os.path.exists(containerDir):
+ logger.log('sliver_lxc.destroy: ERROR could not cleanly destroy %s - giving up'%name)
+
+ if vsys_stopped:
+ vsysStartService()