File locking isn't exclusive in the same process across threads. Switched to regular...
[nodemanager.git] / sliver_vs.py
index dbe568f..3060b9f 100644 (file)
@@ -17,13 +17,16 @@ don't have to guess if there is a running process or not.
 """
 
 import errno
-import os
+import os, os.path
 import time
 import vserver
 
 import accounts
 import logger
 import tools
+from threading import BoundedSemaphore
+
+globalsem = BoundedSemaphore()
 
 # special constant that tells vserver to keep its existing settings
 KEEP_LIMIT = vserver.VC_LIM_KEEP
@@ -42,12 +45,12 @@ class Sliver_VS(accounts.Account, vserver.VServer):
 
     SHELL = '/bin/vsh'
     TYPE = 'sliver.VServer'
-    _init_disk_info_sem = tools.NMLock("/var/run/nm-disk-info.lock")
+    _init_disk_info_sem = globalsem
 
     def __init__(self, rec):
         logger.verbose ('initing Sliver_VS with name=%s'%rec['name'])
         try:
-            vserver.VServer.__init__(self, rec['name'])
+            vserver.VServer.__init__(self, rec['name'],logfile='/var/log/nm')
         except Exception, err:
             if not isinstance(err, vserver.NoSuchVServer):
                 # Probably a bad vserver or vserver configuration file
@@ -55,7 +58,7 @@ class Sliver_VS(accounts.Account, vserver.VServer):
                 logger.log('%s: recreating bad vserver' % rec['name'])
                 self.destroy(rec['name'])
             self.create(rec['name'], rec['vref'])
-            vserver.VServer.__init__(self, rec['name'])
+            vserver.VServer.__init__(self, rec['name'],logfile='/var/log/nm')
 
         self.keys = ''
         self.rspec = {}
@@ -67,10 +70,58 @@ class Sliver_VS(accounts.Account, vserver.VServer):
     @staticmethod
     def create(name, vref = None):
         logger.verbose('Sliver_VS:create - name=%s'%name)
-        if vref is not None:
-            logger.log_call('/usr/sbin/vuseradd', '-t', vref, name)
-        else:
-            logger.log_call('/usr/sbin/vuseradd', name)
+        if vref is None:
+            vref='default'
+        try:
+            ### locating the right slicefamily
+            # this is a first draft, and more a proof of concept thing
+            # the idea is to parse vref for dash-separated wishes,
+            # and to project these against the defaults
+            # so e.g. if the default slice family (as found in /etc/planetlab/slicefamily)
+            # is planetlab-f8-i386, then here is what we get
+            # vref=x86_64             -> vuseradd -t planetlab-f8-x86_64 
+            # vref=centos5            -> vuseradd -t planetlab-centos5-i386 
+            # vref=centos5-onelab     -> vuseradd -t onelab-centos5-i386 
+            # vref=planetflow         -> vuseradd -t planetflow-f8-i386
+            # vref=x86_64-planetflow  -> vuseradd -t planetflow-f8-x86_64
+
+            # default
+            default=file("/etc/planetlab/slicefamily").read().strip()
+            (pldistro,fcdistro,arch) = default.split("-")
+            # from the slice attribute: cut dashes and try to figure the meaning
+            slice_wishes = vref.split("-")
+            for wish in slice_wishes:
+                if wish == "i386" or wish == "x86_64":
+                    arch=wish
+                elif wish == "f8" or wish == "centos5" :
+                    fcdistro=wish
+                else:
+                    pldistro=wish
+
+            # rejoin the parts
+            refname="-".join( (pldistro,fcdistro,arch) )
+
+            # check the templates exists -- there's probably a better way..
+            if not os.path.isdir ("/vservers/.vref/%s"%refname):
+                logger.verbose("%s (%s) : vref %s not found, using default %s"%(
+                        name,vref,refname,default))
+                refname=default
+                # could check again, but as we have /etc/slicefamily 
+                # there's probably no /vservers/.vref/default
+
+        except IOError:
+            # have not found slicefamily
+            logger.verbose("%s (%s): legacy node - using fallback vrefname 'default'"%(name,vref))
+                # for legacy nodes
+            refname="default"
+        except:
+            import traceback
+            logger.log("%s (%s) : unexpected error follows - using 'default'"%(
+                    name,vref))
+            logger.log(traceback.format_exc())
+            refname="default"
+            
+        logger.log_call('/usr/sbin/vuseradd', '-t', refname, name)
         open('/vservers/%s/etc/slicename' % name, 'w').write(name)
 
     @staticmethod
@@ -141,7 +192,10 @@ class Sliver_VS(accounts.Account, vserver.VServer):
             minimum  = self.rspec['%s_min'%type]
             soft = self.rspec['%s_soft'%type]
             hard = self.rspec['%s_hard'%type]
-            self.set_rlimit_config(limit, hard, soft, minimum)
+            update = self.set_rlimit(limit, hard, soft, minimum)
+            if update:
+                logger.log('%s: setting rlimit %s to (%d, %d, %d)'
+                           % (self.name, type, hard, soft, minimum))
 
         self.set_capabilities_config(self.rspec['capabilities'])
         if self.rspec['capabilities']:
@@ -159,17 +213,22 @@ class Sliver_VS(accounts.Account, vserver.VServer):
             logger.log('%s: setting net share to %d' % (self.name, net_limits[-1]))
             self.set_bwlimit(*net_limits)
 
-        cpu_min = self.rspec['cpu_min']
+        cpu_pct = self.rspec['cpu_pct']
         cpu_share = self.rspec['cpu_share']
 
         if self.rspec['enabled'] > 0:
-            if cpu_min >= 50:  # at least 5%: keep people from shooting themselves in the foot
-                logger.log('%s: setting cpu share to %d%% guaranteed' % (self.name, cpu_min/10.0))
-                self.set_sched_config(cpu_min, vserver.SCHED_CPU_GUARANTEED)
+            if cpu_pct > 0:
+                logger.log('%s: setting cpu reservation to %d%%' % (self.name, cpu_pct))
             else:
+                cpu_pct = 0
+
+            if cpu_share > 0:
                 logger.log('%s: setting cpu share to %d' % (self.name, cpu_share))
-                self.set_sched_config(cpu_share, 0)
+            else:
+                cpu_share = 0
 
+            self.set_sched_config(cpu_pct, cpu_share)
+            # if IP address isn't set (even to 0.0.0.0), sliver won't be able to use network
             if self.rspec['ip_addresses'] != '0.0.0.0':
                 logger.log('%s: setting IP address(es) to %s' % (self.name, self.rspec['ip_addresses']))
             self.set_ipaddresses_config(self.rspec['ip_addresses'])