Added ReCreate. Also added try catch to api eval of rpc method.
[nodemanager.git] / sliver_vs.py
index f93a4f0..bed3e62 100644 (file)
@@ -25,24 +25,42 @@ import accounts
 import logger
 import tools
 
+# special constant that tells vserver to keep its existing settings
+KEEP_LIMIT = vserver.VC_LIM_KEEP
+
+# populate the sliver/vserver specific default allocations table,
+# which is used to look for slice attributes
+DEFAULT_ALLOCATION = {}
+for rlimit in vserver.RLIMITS.keys():
+    rlim = rlimit.lower()
+    DEFAULT_ALLOCATION["%s_min"%rlim]=KEEP_LIMIT
+    DEFAULT_ALLOCATION["%s_soft"%rlim]=KEEP_LIMIT
+    DEFAULT_ALLOCATION["%s_hard"%rlim]=KEEP_LIMIT
 
 class Sliver_VS(accounts.Account, vserver.VServer):
     """This class wraps vserver.VServer to make its interface closer to what we need."""
 
     SHELL = '/bin/vsh'
     TYPE = 'sliver.VServer'
+    _init_disk_info_sem = tools.NMLock("/var/run/nm-disk-info.lock")
 
     def __init__(self, rec):
         try:
             vserver.VServer.__init__(self, rec['name'])
-        except vserver.NoSuchVServer:
-            self.create(rec['name'], rec['type'])
+        except Exception, err:
+            if not isinstance(err, vserver.NoSuchVServer):
+                # Probably a bad vserver or vserver configuration file
+                logger.log_exc(rec['name'])
+                logger.log('%s: recreating bad vserver' % rec['name'])
+                self.destroy(rec['name'])
+            self.create(rec['name'], rec['vref'])
             vserver.VServer.__init__(self, rec['name'])
 
         self.keys = ''
         self.rspec = {}
         self.initscript = ''
         self.disk_usage_initialized = False
+        self.initscriptchanged = False
         self.configure(rec)
 
     @staticmethod
@@ -51,6 +69,7 @@ class Sliver_VS(accounts.Account, vserver.VServer):
             logger.log_call('/usr/sbin/vuseradd', '-t', vref, name)
         else:
             logger.log_call('/usr/sbin/vuseradd', name)
+        open('/vservers/%s/etc/slicename' % name, 'w').write(name)
 
     @staticmethod
     def destroy(name): logger.log_call('/usr/sbin/vuserdel', name)
@@ -70,23 +89,26 @@ class Sliver_VS(accounts.Account, vserver.VServer):
                 fd = os.open('/etc/rc.vinit', flags, 0755)
                 os.write(fd, new_initscript)
                 os.close(fd)
-            try: self.chroot_call(install_initscript)
-            except: logger.log_exc()
+            try:
+                self.chroot_call(install_initscript)
+                self.initscriptchanged = True
+            except: logger.log_exc(self.name)
 
         accounts.Account.configure(self, rec)  # install ssh keys
 
     def start(self, delay=0):
-        if self.rspec['enabled']:
+        if self.rspec['enabled'] > 0:
             logger.log('%s: starting in %d seconds' % (self.name, delay))
+            time.sleep(delay)
             child_pid = os.fork()
             if child_pid == 0:
                 # VServer.start calls fork() internally, so just close the nonstandard fds and fork once to avoid creating zombies
                 tools.close_nonstandard_fds()
-                time.sleep(delay)
                 vserver.VServer.start(self, True)
                 os._exit(0)
             else: os.waitpid(child_pid, 0)
         else: logger.log('%s: not starting, is not enabled' % self.name)
+        self.initscriptchanged = False
 
     def stop(self):
         logger.log('%s: stopping' % self.name)
@@ -98,28 +120,61 @@ class Sliver_VS(accounts.Account, vserver.VServer):
         try:  # if the sliver is over quota, .set_disk_limit will throw an exception
             if not self.disk_usage_initialized:
                 self.vm_running = False
-                logger.log('%s: computing disk usage' % self.name)
-                self.init_disk_info()
+                logger.log('%s: computing disk usage: beginning' % self.name)
+                Sliver_VS._init_disk_info_sem.acquire()
+                try: self.init_disk_info()
+                finally: Sliver_VS._init_disk_info_sem.release()
+                logger.log('%s: computing disk usage: ended' % self.name)
                 self.disk_usage_initialized = True
-            vserver.VServer.set_disklimit(self, disk_max)
-        except OSError:
+            vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
+        except:
             logger.log('%s: failed to set max disk usage' % self.name)
-            logger.log_exc()
-
-        net_limits = (self.rspec['net_min'], self.rspec['net_max'], self.rspec['net2_min'], self.rspec['net2_max'], self.rspec['net_share'])
-        logger.log('%s: setting net limits to %s bps' % (self.name, net_limits[:-1]))
-        logger.log('%s: setting net share to %d' % (self.name, net_limits[-1]))
-        self.set_bwlimit(*net_limits)
+            logger.log_exc(self.name)
+
+        # get/set the min/soft/hard values for all of the vserver
+        # related RLIMITS.  Note that vserver currently only
+        # implements support for hard limits.
+        for limit in vserver.RLIMITS.keys():
+            type = limit.lower()
+            minimum  = self.rspec['%s_min'%type]
+            soft = self.rspec['%s_soft'%type]
+            hard = self.rspec['%s_hard'%type]
+            self.set_rlimit_config(limit, hard, soft, minimum)
+
+        if False: # this code was commented out before
+            # N.B. net_*_rate are in kbps because of XML-RPC maxint
+            # limitations, convert to bps which is what bwlimit.py expects.
+            net_limits = (self.rspec['net_min_rate'] * 1000,
+                          self.rspec['net_max_rate'] * 1000,
+                          self.rspec['net_i2_min_rate'] * 1000,
+                          self.rspec['net_i2_max_rate'] * 1000,
+                          self.rspec['net_share'])
+            logger.log('%s: setting net limits to %s bps' % (self.name, net_limits[:-1]))
+            logger.log('%s: setting net share to %d' % (self.name, net_limits[-1]))
+            self.set_bwlimit(*net_limits)
 
         cpu_min = self.rspec['cpu_min']
         cpu_share = self.rspec['cpu_share']
-        if self.rspec['enabled']:
+
+        if self.rspec['enabled'] > 0:
             if cpu_min >= 50:  # at least 5%: keep people from shooting themselves in the foot
                 logger.log('%s: setting cpu share to %d%% guaranteed' % (self.name, cpu_min/10.0))
                 self.set_sched_config(cpu_min, vserver.SCHED_CPU_GUARANTEED)
             else:
                 logger.log('%s: setting cpu share to %d' % (self.name, cpu_share))
                 self.set_sched_config(cpu_share, 0)
+
+            if False: # Does not work properly yet.
+                if self.have_limits_changed():
+                    logger.log('%s: limits have changed --- restarting' % self.name)
+                    stopcount = 10
+                    while self.is_running() and stopcount > 0:
+                        self.stop()
+                        delay = 1
+                        time.sleep(delay)
+                        stopcount = stopcount - 1
+                    self.start()
+
         else:  # tell vsh to disable remote login by setting CPULIMIT to 0
             logger.log('%s: disabling remote login' % self.name)
             self.set_sched_config(0, 0)