log subprocess calls.
[nodemanager.git] / sliver_vs.py
index ab5c391..5573984 100644 (file)
@@ -17,13 +17,18 @@ don't have to guess if there is a running process or not.
 """
 
 import errno
-import os
+import traceback
+import os, os.path
 import time
+
 import vserver
 
 import accounts
 import logger
 import tools
+from threading import BoundedSemaphore
+
+globalsem = BoundedSemaphore()
 
 # special constant that tells vserver to keep its existing settings
 KEEP_LIMIT = vserver.VC_LIM_KEEP
@@ -42,7 +47,7 @@ class Sliver_VS(accounts.Account, vserver.VServer):
 
     SHELL = '/bin/vsh'
     TYPE = 'sliver.VServer'
-    _init_disk_info_sem = tools.NMLock("/var/run/nm-disk-info.lock")
+    _init_disk_info_sem = globalsem
 
     def __init__(self, rec):
         logger.verbose ('initing Sliver_VS with name=%s'%rec['name'])
@@ -60,6 +65,7 @@ class Sliver_VS(accounts.Account, vserver.VServer):
         self.keys = ''
         self.rspec = {}
         self.initscript = ''
+        self.slice_id = rec['slice_id']
         self.disk_usage_initialized = False
         self.initscriptchanged = False
         self.configure(rec)
@@ -67,11 +73,75 @@ class Sliver_VS(accounts.Account, vserver.VServer):
     @staticmethod
     def create(name, vref = None):
         logger.verbose('Sliver_VS:create - name=%s'%name)
-        if vref is not None:
-            logger.log_call('/usr/sbin/vuseradd', '-t', vref, name)
-        else:
-            logger.log_call('/usr/sbin/vuseradd', name)
-        open('/vservers/%s/etc/slicename' % name, 'w').write(name)
+        if vref is None:
+            vref='default'
+        try:
+            ### locating the right slicefamily
+            # this is a first draft, and more a proof of concept thing
+            # the idea is to parse vref for dash-separated wishes,
+            # and to project these against the defaults
+            # so e.g. if the default slice family (as found in /etc/planetlab/slicefamily)
+            # is planetlab-f8-i386, then here is what we get
+            # vref=x86_64             -> vuseradd -t planetlab-f8-x86_64 
+            # vref=centos5            -> vuseradd -t planetlab-centos5-i386 
+            # vref=centos5-onelab     -> vuseradd -t onelab-centos5-i386 
+            # vref=planetflow         -> vuseradd -t planetflow-f8-i386
+            # vref=x86_64-planetflow  -> vuseradd -t planetflow-f8-x86_64
+
+            # default
+            default=file("/etc/planetlab/slicefamily").read().strip()
+            (pldistro,fcdistro,arch) = default.split("-")
+
+            known_archs = [ 'i386', 'x86_64' ]
+            known_fcdistros = [ 'f8', 'f9', 'centos5' ]
+            # from the slice attribute: cut dashes and try to figure the meaning
+            slice_wishes = vref.split("-")
+            for wish in slice_wishes:
+                if wish in known_archs:
+                    arch=wish
+                elif wish in known_fcdistros:
+                    fcdistro=wish
+                else:
+                    pldistro=wish
+
+            # rejoin the parts
+            refname="-".join( (pldistro,fcdistro,arch) )
+
+            # check the template exists -- there's probably a better way..
+            if not os.path.isdir ("/vservers/.vref/%s"%refname):
+                logger.log("%s (%s) : vref %s not found, using default %s"%(
+                        name,vref,refname,default))
+                refname=default
+                # reset so arch is right
+                (pldistro,fcdistro,arch) = default.split("-")
+                # could check again, but as we have /etc/slicefamily 
+                # there's probably no /vservers/.vref/default
+
+        except IOError:
+            # have not found slicefamily
+            logger.log("%s (%s): legacy node - using fallback vrefname 'default'"%(name,vref))
+            # for legacy nodes
+            refname="default"
+            arch="i386"
+        except:
+            logger.log("%s (%s) : unexpected error follows - using 'default'"%(name,vref))
+            logger.log(traceback.format_exc())
+            refname="default"
+            arch="i386"
+            
+        def personality (arch):
+            personality="linux32"
+            if arch.find("64")>=0:
+                personality="linux64"
+            return personality
+
+        logger.log_call('/usr/sbin/vuseradd', '-t', refname, name)
+        # export slicename to the slice in /etc/slicename
+        file('/vservers/%s/etc/slicename' % name, 'w').write(name)
+        # set personality: only if needed (if arch's differ)
+        if tools.root_context_arch() != arch:
+            file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch))
+            logger.log('%s: set personality to %s'%(name,personality(arch)))
 
     @staticmethod
     def destroy(name): logger.log_call('/usr/sbin/vuserdel', name)
@@ -85,16 +155,7 @@ class Sliver_VS(accounts.Account, vserver.VServer):
         new_initscript = rec['initscript']
         if new_initscript != self.initscript:
             self.initscript = new_initscript
-            logger.log('%s: installing initscript' % self.name)
-            def install_initscript():
-                flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
-                fd = os.open('/etc/rc.vinit', flags, 0755)
-                os.write(fd, new_initscript)
-                os.close(fd)
-            try:
-                self.chroot_call(install_initscript)
-                self.initscriptchanged = True
-            except: logger.log_exc(self.name)
+            self.initscriptchanged = True
 
         accounts.Account.configure(self, rec)  # install ssh keys
 
@@ -102,28 +163,43 @@ class Sliver_VS(accounts.Account, vserver.VServer):
         if self.rspec['enabled'] > 0:
             logger.log('%s: starting in %d seconds' % (self.name, delay))
             time.sleep(delay)
+            # VServer.start calls fork() internally, 
+            # so just close the nonstandard fds and fork once to avoid creating zombies
             child_pid = os.fork()
             if child_pid == 0:
-                # VServer.start calls fork() internally, so just close the nonstandard fds and fork once to avoid creating zombies
+                if self.initscriptchanged:
+                    logger.log('%s: installing initscript' % self.name)
+                    def install_initscript():
+                        flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
+                        fd = os.open('/etc/rc.vinit', flags, 0755)
+                        os.write(fd, self.initscript)
+                        os.close(fd)
+                    try:
+                        self.chroot_call(install_initscript)
+                    except: logger.log_exc(self.name)
                 tools.close_nonstandard_fds()
-                vserver.VServer.start(self, True)
+                vserver.VServer.start(self)
                 os._exit(0)
-            else: os.waitpid(child_pid, 0)
+            else: 
+                os.waitpid(child_pid, 0)
+                self.initscriptchanged = False
         else: logger.log('%s: not starting, is not enabled' % self.name)
-        self.initscriptchanged = False
 
     def stop(self):
         logger.log('%s: stopping' % self.name)
         vserver.VServer.stop(self)
 
-    def set_resources(self):
+    def is_running(self): 
+        return vserver.VServer.is_running(self)
+
+    def set_resources(self,setup=False):
         disk_max = self.rspec['disk_max']
         logger.log('%s: setting max disk usage to %d KiB' % (self.name, disk_max))
         try:  # if the sliver is over quota, .set_disk_limit will throw an exception
             if not self.disk_usage_initialized:
                 self.vm_running = False
-                logger.log('%s: computing disk usage: beginning' % self.name)
                 Sliver_VS._init_disk_info_sem.acquire()
+                logger.log('%s: computing disk usage: beginning' % self.name)
                 try: self.init_disk_info()
                 finally: Sliver_VS._init_disk_info_sem.release()
                 logger.log('%s: computing disk usage: ended' % self.name)
@@ -141,44 +217,57 @@ class Sliver_VS(accounts.Account, vserver.VServer):
             minimum  = self.rspec['%s_min'%type]
             soft = self.rspec['%s_soft'%type]
             hard = self.rspec['%s_hard'%type]
-            self.set_rlimit_config(limit, hard, soft, minimum)
+            update = self.set_rlimit(limit, hard, soft, minimum)
+            if update:
+                logger.log('%s: setting rlimit %s to (%d, %d, %d)'
+                           % (self.name, type, hard, soft, minimum))
 
         self.set_capabilities_config(self.rspec['capabilities'])
         if self.rspec['capabilities']:
             logger.log('%s: setting capabilities to %s' % (self.name, self.rspec['capabilities']))
 
-        if False: # this code was commented out before
-            # N.B. net_*_rate are in kbps because of XML-RPC maxint
-            # limitations, convert to bps which is what bwlimit.py expects.
-            net_limits = (self.rspec['net_min_rate'] * 1000,
-                          self.rspec['net_max_rate'] * 1000,
-                          self.rspec['net_i2_min_rate'] * 1000,
-                          self.rspec['net_i2_max_rate'] * 1000,
-                          self.rspec['net_share'])
-            logger.log('%s: setting net limits to %s bps' % (self.name, net_limits[:-1]))
-            logger.log('%s: setting net share to %d' % (self.name, net_limits[-1]))
-            self.set_bwlimit(*net_limits)
-
-        cpu_min = self.rspec['cpu_min']
+        cpu_pct = self.rspec['cpu_pct']
         cpu_share = self.rspec['cpu_share']
 
+        if setup:
+            for key in self.rspec.keys():
+                if key.find('sysctl.') == 0:
+                    sysctl=key.split('.')
+                    try:
+                        path="/proc/sys/%s" % ("/".join(sysctl[1:]))
+                        logger.log("%s: opening %s"%(self.name,path))
+                        flags = os.O_WRONLY
+                        fd = os.open(path, flags)
+                        logger.log("%s: writing %s=%s"%(self.name,key,self.rspec[key]))
+                        os.write(fd,self.rspec[key])
+                        os.close(fd)
+                    except IOError, e:
+                        logger.log("%s: could not set %s=%s"%(self.name,key,self.rspec[key]))
+                        logger.log("%s: error = %s"%(self.name,e))
+
+
         if self.rspec['enabled'] > 0:
-            if cpu_min > 0:
-                logger.log('%s: setting cpu to %d%% guaranteed' % (self.name, cpu_min))
+            if cpu_pct > 0:
+                logger.log('%s: setting cpu reservation to %d%%' % (self.name, cpu_pct))
             else:
-                cpu_min = 0
+                cpu_pct = 0
 
             if cpu_share > 0:
                 logger.log('%s: setting cpu share to %d' % (self.name, cpu_share))
             else:
                 cpu_share = 0
 
-            self.set_sched_config(cpu_min, cpu_share)
+            self.set_sched_config(cpu_pct, cpu_share)
             # if IP address isn't set (even to 0.0.0.0), sliver won't be able to use network
             if self.rspec['ip_addresses'] != '0.0.0.0':
-                logger.log('%s: setting IP address(es) to %s' % (self.name, self.rspec['ip_addresses']))
+                logger.log('%s: setting IP address(es) to %s' % \
+                (self.name, self.rspec['ip_addresses']))
             self.set_ipaddresses_config(self.rspec['ip_addresses'])
 
+            if self.is_running():
+                logger.log("%s: Setting name to %s" % (self.name, self.slice_id),2)
+                self.setname(self.slice_id)
             if False: # Does not work properly yet.
                 if self.have_limits_changed():
                     logger.log('%s: limits have changed --- restarting' % self.name)