# the util-vserver-pl module
import vserver
-import accounts
import logger
import tools
+from account import Account
+from initscript import Initscript
# special constant that tells vserver to keep its existing settings
KEEP_LIMIT = vserver.VC_LIM_KEEP
DEFAULT_ALLOCATION = {}
for rlimit in vserver.RLIMITS.keys():
rlim = rlimit.lower()
- DEFAULT_ALLOCATION["%s_min"%rlim]=KEEP_LIMIT
- DEFAULT_ALLOCATION["%s_soft"%rlim]=KEEP_LIMIT
- DEFAULT_ALLOCATION["%s_hard"%rlim]=KEEP_LIMIT
+ DEFAULT_ALLOCATION["%s_min"%rlim] = KEEP_LIMIT
+ DEFAULT_ALLOCATION["%s_soft"%rlim] = KEEP_LIMIT
+ DEFAULT_ALLOCATION["%s_hard"%rlim] = KEEP_LIMIT
-class Sliver_VS(accounts.Account, vserver.VServer):
+class Sliver_VS(vserver.VServer, Account, Initscript):
"""This class wraps vserver.VServer to make its interface closer to what we need."""
SHELL = '/bin/vsh'
_init_disk_info_sem = BoundedSemaphore()
def __init__(self, rec):
- name=rec['name']
+ name = rec['name']
logger.verbose ('sliver_vs: %s init'%name)
try:
logger.log("sliver_vs: %s: first chance..."%name)
- vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
+ vserver.VServer.__init__(self, name, logfile='/var/log/nodemanager')
+ Account.__init__ (self, name)
+ Initscript.__init__ (self, name)
except Exception, err:
if not isinstance(err, vserver.NoSuchVServer):
# Probably a bad vserver or vserver configuration file
- logger.log_exc("sliver_vs:__init__ (first chance) %s",name=name)
+ logger.log_exc("sliver_vs:__init__ (first chance) %s", name=name)
logger.log('sliver_vs: %s: recreating bad vserver' % name)
self.destroy(name)
self.create(name, rec)
- logger.log("sliver_vs: %s: second chance..."%name)
- vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
+ vserver.VServer.__init__(self, name, logfile='/var/log/nodemanager')
+ Account.__init__ (self, name)
+ Initscript.__init__ (self, name)
- self.keys = ''
self.rspec = {}
self.slice_id = rec['slice_id']
self.disk_usage_initialized = False
- self.initscript = ''
self.enabled = True
+ # xxx this almost certainly is wrong...
self.configure(rec)
@staticmethod
logger.verbose('sliver_vs: %s: create'%name)
vref = rec['vref']
if vref is None:
- logger.log("sliver_vs: %s: ERROR - no vref attached, this is unexpected"%(name))
# added by caglar
# band-aid for short period as old API doesn't have GetSliceFamily function
- #return
vref = "planetlab-f8-i386"
+ logger.log("sliver_vs: %s: ERROR - no vref attached, using hard-wired default %s"%(name, vref))
# used to look in /etc/planetlab/family,
# now relies on the 'GetSliceFamily' extra attribute in GetSlivers()
# check the template exists -- there's probably a better way..
if not os.path.isdir ("/vservers/.vref/%s"%vref):
- logger.log ("sliver_vs: %s: ERROR Could not create sliver - vreference image %s not found"%(name,vref))
+ logger.log ("sliver_vs: %s: ERROR Could not create sliver - vreference image %s not found"%(name, vref))
return
- # guess arch
+ # compute guest personality
try:
- (x,y,arch)=vref.split('-')
+ (x, y, arch) = vref.split('-')
# mh, this of course applies when 'vref' is e.g. 'netflow'
# and that's not quite right
except:
- arch='i386'
+ arch = 'i386'
def personality (arch):
- personality="linux32"
- if arch.find("64")>=0:
- personality="linux64"
- return personality
+ return "linux64" if arch.find("64") >= 0 else "linux32"
- command=[]
+ command = []
# be verbose
- command += ['/bin/bash','-x',]
+ command += ['/bin/bash', '-x', ]
command += ['/usr/sbin/vuseradd', ]
if 'attributes' in rec and 'isolate_loopback' in rec['attributes'] and rec['attributes']['isolate_loopback'] == '1':
- command += [ "-i",]
+ command += [ "-i", ]
# the vsliver imge to use
command += [ '-t', vref, ]
# slice name
command += [ name, ]
-# logger.log_call(['/usr/sbin/vuseradd', '-t', vref, name, ], timeout=15*60)
logger.log_call(command, timeout=15*60)
# export slicename to the slice in /etc/slicename
file('/vservers/%s/etc/slicename' % name, 'w').write(name)
# set personality: only if needed (if arch's differ)
if tools.root_context_arch() != arch:
file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch)+"\n")
- logger.log('sliver_vs: %s: set personality to %s'%(name,personality(arch)))
+ logger.log('sliver_vs: %s: set personality to %s'%(name, personality(arch)))
@staticmethod
def destroy(name):
-# logger.log_call(['/usr/sbin/vuserdel', name, ])
- logger.log_call(['/bin/bash','-x','/usr/sbin/vuserdel', name, ])
+ # need to umount before we trash, otherwise we end up with sequels in
+ # /vservers/slicename/ (namely in home/ )
+ # also because this is a static method we cannot check for 'omf_control'
+ # but it is no big deal as umount_ssh_dir checks before it umounts..
+ Account.umount_ssh_dir(name)
+ logger.log("sliver_vs: destroying %s"%name)
+ logger.log_call(['/bin/bash', '-x', '/usr/sbin/vuserdel', name, ])
+
def configure(self, rec):
# in case we update nodemanager..
self.rspec = new_rspec
self.set_resources()
- new_initscript = rec['initscript']
- if new_initscript != self.initscript:
- self.initscript = new_initscript
- # not used anymore, we always check against the installed script
- #self.initscriptchanged = True
- self.refresh_slice_vinit()
-
- accounts.Account.configure(self, rec) # install ssh keys
-
- # unconditionnally install and enable the generic vinit script
- # mimicking chkconfig for enabling the generic vinit script
- # this is hardwired for runlevel 3
- def install_and_enable_vinit (self):
- vinit_source="/usr/share/NodeManager/sliver-initscripts/vinit"
- vinit_script="/vservers/%s/etc/rc.d/init.d/vinit"%self.name
- rc3_link="/vservers/%s/etc/rc.d/rc3.d/S99vinit"%self.name
- rc3_target="../init.d/vinit"
- # install in sliver
- code=file(vinit_source).read()
- if tools.replace_file_with_string(vinit_script,code,chmod=0755):
- logger.log("vsliver_vs: %s: installed generic vinit rc script"%self.name)
- # create symlink for runlevel 3
- if not os.path.islink(rc3_link):
- try:
- logger.log("vsliver_vs: %s: creating runlevel3 symlink %s"%(self.name,rc3_link))
- os.symlink(rc3_target,rc3_link)
- except:
- logger.log_exc("vsliver_vs: %s: failed to create runlevel3 symlink %s"%rc3_link)
-
- def rerun_slice_vinit(self):
- command = "/usr/sbin/vserver %s exec /etc/rc.d/init.d/vinit restart" % (self.name)
- logger.log("vsliver_vs: %s: Rerunning slice initscript: %s" % (self.name, command))
- subprocess.call(command + "&", stdin=open('/dev/null', 'r'), stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, shell=True)
-
- # this one checks for the existence of the slice initscript
- # install or remove the slice inistscript, as instructed by the initscript tag
- def refresh_slice_vinit(self):
- code=self.initscript
- sliver_initscript="/vservers/%s/etc/rc.d/init.d/vinit.slice"%self.name
- if tools.replace_file_with_string(sliver_initscript,code,remove_if_empty=True,chmod=0755):
- if code:
- logger.log("vsliver_vs: %s: Installed new initscript in %s"%(self.name,sliver_initscript))
- if self.is_running():
- # Only need to rerun the initscript if the vserver is
- # already running. If the vserver isn't running, then the
- # initscript will automatically be started by
- # /etc/rc.d/vinit when the vserver is started.
- self.rerun_slice_vinit()
- else:
- logger.log("vsliver_vs: %s: Removed obsolete initscript %s"%(self.name,sliver_initscript))
+ # do the configure part from Initscript
+ # i.e. install slice initscript if defined
+ Initscript.configure(self, rec)
+ # install ssh keys
+ Account.configure(self, rec)
+ # remember configure() always gets called *before* start()
+ # in particular the slice initscript
+ # is expected to be in place already at this point
def start(self, delay=0):
if self.rspec['enabled'] <= 0:
logger.log('sliver_vs: not starting %s, is not enabled'%self.name)
+ return
+ logger.log('sliver_vs: %s: starting in %d seconds' % (self.name, delay))
+ time.sleep(delay)
+ # the generic /etc/init.d/vinit script is permanently refreshed, and enabled
+ self.install_and_enable_vinit()
+ # expose .ssh for omf_friendly slivers
+ if 'omf_control' in self.rspec['tags']:
+ Account.mount_ssh_dir(self.name)
+ child_pid = os.fork()
+ if child_pid == 0:
+ # VServer.start calls fork() internally,
+ # so just close the nonstandard fds and fork once to avoid creating zombies
+ tools.close_nonstandard_fds()
+ vserver.VServer.start(self)
+ os._exit(0)
else:
- logger.log('sliver_vs: %s: starting in %d seconds' % (self.name, delay))
- time.sleep(delay)
- # the generic /etc/init.d/vinit script is permanently refreshed, and enabled
- self.install_and_enable_vinit()
- # expose .ssh for omf_friendly slivers
- if 'omf_control' in self.rspec['tags']:
- self.expose_ssh_dir()
- # if a change has occured in the slice initscript, reflect this in /etc/init.d/vinit.slice
- self.refresh_slice_vinit()
- child_pid = os.fork()
- if child_pid == 0:
- # VServer.start calls fork() internally,
- # so just close the nonstandard fds and fork once to avoid creating zombies
- tools.close_nonstandard_fds()
- vserver.VServer.start(self)
- os._exit(0)
- else:
- os.waitpid(child_pid, 0)
+ os.waitpid(child_pid, 0)
def stop(self):
logger.log('sliver_vs: %s: stopping' % self.name)
def is_running(self):
return vserver.VServer.is_running(self)
+ # this one seems to belong in Initscript at first sight,
+ # but actually depends on the underlying vm techno
+ # so let's keep it here
+ def rerun_slice_vinit(self):
+ command = "/usr/sbin/vserver %s exec /etc/rc.d/init.d/vinit restart" % (self.name)
+ logger.log("vsliver_vs: %s: Rerunning slice initscript: %s" % (self.name, command))
+ subprocess.call(command + "&", stdin=open('/dev/null', 'r'),
+ stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, shell=True)
+
def set_resources(self):
disk_max = self.rspec['disk_max']
logger.log('sliver_vs: %s: setting max disk usage to %d KiB' % (self.name, disk_max))
self.disk_usage_initialized = True
vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
except:
- logger.log_exc('sliver_vs: failed to set max disk usage',name=self.name)
+ logger.log_exc('sliver_vs: failed to set max disk usage', name=self.name)
# get/set the min/soft/hard values for all of the vserver
# related RLIMITS. Note that vserver currently only
self.set_capabilities_config(self.rspec['capabilities'])
if self.rspec['capabilities']:
- logger.log('sliver_vs: %s: setting capabilities to %s' % (self.name, self.rspec['capabilities']))
+ logger.log('sliver_vs: %s: setting capabilities to %s'
+ % (self.name, self.rspec['capabilities']))
cpu_pct = self.rspec['cpu_pct']
cpu_share = self.rspec['cpu_share']
count = 1
for key in self.rspec.keys():
if key.find('sysctl.') == 0:
- sysctl=key.split('.')
+ sysctl = key.split('.')
try:
# /etc/vservers/<guest>/sysctl/<id>/
dirname = "/etc/vservers/%s/sysctl/%s" % (self.name, count)
value.close()
count += 1
- logger.log("sliver_vs: %s: writing %s=%s"%(self.name,key,self.rspec[key]))
+ logger.log("sliver_vs: %s: writing %s=%s"%(self.name, key, self.rspec[key]))
except IOError, e:
- logger.log("sliver_vs: %s: could not set %s=%s"%(self.name,key,self.rspec[key]))
- logger.log("sliver_vs: %s: error = %s"%(self.name,e))
+ logger.log("sliver_vs: %s: could not set %s=%s"%(self.name, key, self.rspec[key]))
+ logger.log("sliver_vs: %s: error = %s"%(self.name, e))
if self.rspec['enabled'] > 0:
if not os.path.exists (vserver_config_path):
os.makedirs (vserver_config_path)
file('%s/slice_id'%vserver_config_path, 'w').write("%d\n"%self.slice_id)
- logger.log("sliver_vs: Recorded slice id %d for slice %s"%(self.slice_id,self.name))
- except IOError,e:
- logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name,str(e)))
- except Exception,e:
- logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e),name=self.name)
+ logger.log("sliver_vs: Recorded slice id %d for slice %s"%(self.slice_id, self.name))
+ except IOError as e:
+ logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name, str(e)))
+ except Exception as e:
+ logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e), name=self.name)
if self.enabled == False: