Setting tag nodemanager-1.8-39
[nodemanager.git] / sliver_vs.py
index 5167ba0..57de4f9 100644 (file)
@@ -17,14 +17,18 @@ don't have to guess if there is a running process or not.
 """
 
 import errno
+import traceback
 import os, os.path
+import sys
 import time
+
 import vserver
 
 import accounts
 import logger
 import tools
 from threading import BoundedSemaphore
+import subprocess
 
 globalsem = BoundedSemaphore()
 
@@ -66,6 +70,7 @@ class Sliver_VS(accounts.Account, vserver.VServer):
         self.slice_id = rec['slice_id']
         self.disk_usage_initialized = False
         self.initscriptchanged = False
+        self.enabled = True
         self.configure(rec)
 
     @staticmethod
@@ -89,12 +94,15 @@ class Sliver_VS(accounts.Account, vserver.VServer):
             # default
             default=file("/etc/planetlab/slicefamily").read().strip()
             (pldistro,fcdistro,arch) = default.split("-")
+
+            known_archs = [ 'i386', 'x86_64' ]
+            known_fcdistros = [ 'centos5', 'f8', 'f9', 'f10', 'f11', 'f12' ]
             # from the slice attribute: cut dashes and try to figure the meaning
             slice_wishes = vref.split("-")
             for wish in slice_wishes:
-                if wish == "i386" or wish == "x86_64":
+                if wish in known_archs:
                     arch=wish
-                elif wish == "f8" or wish == "centos5" :
+                elif wish in known_fcdistros:
                     fcdistro=wish
                 else:
                     pldistro=wish
@@ -102,33 +110,49 @@ class Sliver_VS(accounts.Account, vserver.VServer):
             # rejoin the parts
             refname="-".join( (pldistro,fcdistro,arch) )
 
-            # check the templates exists -- there's probably a better way..
+            # check the template exists -- there's probably a better way..
             if not os.path.isdir ("/vservers/.vref/%s"%refname):
-                logger.verbose("%s (%s) : vref %s not found, using default %s"%(
+                logger.log("%s (%s) : vref %s not found, using default %s"%(
                         name,vref,refname,default))
                 refname=default
+                # reset so arch is right
+                (pldistro,fcdistro,arch) = default.split("-")
                 # could check again, but as we have /etc/slicefamily 
                 # there's probably no /vservers/.vref/default
 
         except IOError:
             # have not found slicefamily
-            logger.verbose("%s (%s): legacy node - using fallback vrefname 'default'"%(name,vref))
-                # for legacy nodes
+            logger.log("%s (%s): legacy node - using fallback vrefname 'default'"%(name,vref))
+            # for legacy nodes
             refname="default"
+            arch="i386"
         except:
-            import traceback
-            logger.log("%s (%s) : unexpected error follows - using 'default'"%(
-                    name,vref))
+            logger.log("%s (%s) : unexpected error follows - using 'default'"%(name,vref))
             logger.log(traceback.format_exc())
             refname="default"
+            arch="i386"
             
+        def personality (arch):
+            personality="linux32"
+            if arch.find("64")>=0:
+                personality="linux64"
+            return personality
+
         logger.log_call('/usr/sbin/vuseradd', '-t', refname, name)
-        open('/vservers/%s/etc/slicename' % name, 'w').write(name)
+        # export slicename to the slice in /etc/slicename
+        file('/vservers/%s/etc/slicename' % name, 'w').write(name)
+        # set personality: only if needed (if arch's differ)
+        if tools.root_context_arch() != arch:
+            file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch))
+            logger.log('%s: set personality to %s'%(name,personality(arch)))
 
     @staticmethod
     def destroy(name): logger.log_call('/usr/sbin/vuserdel', name)
 
     def configure(self, rec):
+        # in case we update nodemanager..
+        self.install_and_enable_vinit()
+
         new_rspec = rec['_rspec']
         if new_rspec != self.rspec:
             self.rspec = new_rspec
@@ -137,54 +161,89 @@ class Sliver_VS(accounts.Account, vserver.VServer):
         new_initscript = rec['initscript']
         if new_initscript != self.initscript:
             self.initscript = new_initscript
-            logger.log('%s: installing initscript' % self.name)
-            def install_initscript():
-                flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
-                fd = os.open('/etc/rc.vinit', flags, 0755)
-                os.write(fd, new_initscript)
-                os.close(fd)
-            try:
-                self.chroot_call(install_initscript)
-                self.initscriptchanged = True
-            except: logger.log_exc(self.name)
+            # not used anymore, we always check against the installed script
+            #self.initscriptchanged = True
+            self.refresh_slice_vinit()
 
         accounts.Account.configure(self, rec)  # install ssh keys
 
+    # unconditionnally install and enable the generic vinit script
+    # mimicking chkconfig for enabling the generic vinit script
+    # this is hardwired for runlevel 3
+    def install_and_enable_vinit (self):
+        vinit_source="/usr/share/NodeManager/sliver-initscripts/vinit"
+        vinit_script="/vservers/%s/etc/rc.d/init.d/vinit"%self.name
+        rc3_link="/vservers/%s/etc/rc.d/rc3.d/S99vinit"%self.name
+        rc3_target="../init.d/vinit"
+        # install in sliver
+        body=file(vinit_source).read()
+        if tools.replace_file_with_string(vinit_script,body,chmod=0755):
+            logger.log("vsliver_vs: %s: installed generic vinit rc script"%self.name)
+        # create symlink for runlevel 3
+        if not os.path.islink(rc3_link):
+            try:
+                logger.log("vsliver_vs: %s: creating runlevel3 symlink %s"%(self.name,rc3_link))
+                os.symlink(rc3_target,rc3_link)
+            except:
+                logger.log_exc("vsliver_vs: %s: failed to create runlevel3 symlink %s"%rc3_link)
+
+    def rerun_slice_vinit(self):
+        command = "/usr/sbin/vserver %s exec /etc/rc.d/init.d/vinit restart" % (self.name)
+        logger.log("vsliver_vs: %s: Rerunning slice initscript: %s" % (self.name, command))
+        subprocess.call(command + "&", stdin=open('/dev/null', 'r'), stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, shell=True)
+
+    # this one checks for the existence of the slice initscript
+    # install or remove the slice inistscript, as instructed by the initscript tag
+    def refresh_slice_vinit(self):
+        body=self.initscript
+        sliver_initscript="/vservers/%s/etc/rc.d/init.d/vinit.slice"%self.name
+        if tools.replace_file_with_string(sliver_initscript,body,remove_if_empty=True,chmod=0755):
+            if body:
+                logger.log("vsliver_vs: %s: Installed new initscript in %s"%(self.name,sliver_initscript))
+                if self.is_running():
+                    # Only need to rerun the initscript if the vserver is
+                    # already running. If the vserver isn't running, then the
+                    # initscript will automatically be started by
+                    # /etc/rc.d/vinit when the vserver is started.
+                    self.rerun_slice_vinit()
+            else:
+                logger.log("vsliver_vs: %s: Removed obsolete initscript %s"%(self.name,sliver_initscript))
+
     def start(self, delay=0):
-        if self.rspec['enabled'] > 0:
-            logger.log('%s: starting in %d seconds' % (self.name, delay))
+        if self.rspec['enabled'] <= 0:
+            logger.log('sliver_vs: not starting %s, is not enabled'%self.name)
+        else:
+            logger.log('sliver_vs: %s: starting in %d seconds' % (self.name, delay))
             time.sleep(delay)
-            # VServer.start calls fork() internally
-            vserver.VServer.start(self)
-            # Watch for 5 mins to see if slice is running before setting the name
-            # It would make sense to do this as part of start in VServer, but the name
-            # comes from NM.  Also, the name would only change in NM.  Name can only be
-            # set from root context, so overloading chcontext wont work;  chcontext, setname
-            # will fail, and in the converse the context isn't setup in the kernel.
-            for i in range(0,60):
-                time.sleep(5)
-                if vserver.VServer.is_running(self):
-                    # Set the vciVHI_CONTEXT to slice_id for 
-                    # fprobe-ulog to mark packets with.
-                    logger.log("%s: Setting name to %s" % (self.name, self.slice_id),2)
-                    self.setname(self.slice_id)
-                    break
-
-        else: logger.log('%s: not starting, is not enabled' % self.name)
-        self.initscriptchanged = False
+            # the generic /etc/init.d/vinit script is permanently refreshed, and enabled
+            self.install_and_enable_vinit()
+            # if a change has occured in the slice initscript, reflect this in /etc/init.d/vinit.slice
+            self.refresh_slice_vinit()
+            child_pid = os.fork()
+            if child_pid == 0:
+                # VServer.start calls fork() internally,
+                # so just close the nonstandard fds and fork once to avoid creating zombies
+                tools.close_nonstandard_fds()
+                vserver.VServer.start(self)
+                os._exit(0)
+            else:
+                os.waitpid(child_pid, 0)
 
     def stop(self):
         logger.log('%s: stopping' % self.name)
         vserver.VServer.stop(self)
 
+    def is_running(self): 
+        return vserver.VServer.is_running(self)
+
     def set_resources(self):
         disk_max = self.rspec['disk_max']
         logger.log('%s: setting max disk usage to %d KiB' % (self.name, disk_max))
         try:  # if the sliver is over quota, .set_disk_limit will throw an exception
             if not self.disk_usage_initialized:
                 self.vm_running = False
-                logger.log('%s: computing disk usage: beginning' % self.name)
                 Sliver_VS._init_disk_info_sem.acquire()
+                logger.log('%s: computing disk usage: beginning' % self.name)
                 try: self.init_disk_info()
                 finally: Sliver_VS._init_disk_info_sem.release()
                 logger.log('%s: computing disk usage: ended' % self.name)
@@ -214,6 +273,31 @@ class Sliver_VS(accounts.Account, vserver.VServer):
         cpu_pct = self.rspec['cpu_pct']
         cpu_share = self.rspec['cpu_share']
 
+        count = 1
+        for key in self.rspec.keys():
+            if key.find('sysctl.') == 0:
+                sysctl=key.split('.')
+                try:
+                    # /etc/vservers/<guest>/sysctl/<id>/
+                    dirname = "/etc/vservers/%s/sysctl/%s" % (self.name, count)
+                    try:
+                        os.makedirs(dirname, 0755)
+                    except:
+                        pass
+                    setting = open("%s/setting" % dirname, "w")
+                    setting.write("%s\n" % key.lstrip("sysctl."))
+                    setting.close()
+                    value = open("%s/value" % dirname, "w")
+                    value.write("%s\n" % self.rspec[key])
+                    value.close()
+                    count += 1
+
+                    logger.log("sliver_vs: %s: writing %s=%s"%(self.name,key,self.rspec[key]))
+                except IOError, e:
+                    logger.log("sliver_vs: %s: could not set %s=%s"%(self.name,key,self.rspec[key]))
+                    logger.log("sliver_vs: %s: error = %s"%(self.name,e))
+
+
         if self.rspec['enabled'] > 0:
             if cpu_pct > 0:
                 logger.log('%s: setting cpu reservation to %d%%' % (self.name, cpu_pct))
@@ -232,6 +316,21 @@ class Sliver_VS(accounts.Account, vserver.VServer):
                 (self.name, self.rspec['ip_addresses']))
             self.set_ipaddresses_config(self.rspec['ip_addresses'])
 
+            try:
+                vserver_config_path = '/etc/vservers/%s'%self.name
+                if not os.path.exists (vserver_config_path):
+                    os.makedirs (vserver_config_path)
+                file('%s/slice_id'%vserver_config_path, 'w').write("%d"%self.slice_id)
+                logger.log("sliver_vs: Recorded slice id %d for slice %s"%(self.slice_id,self.name))
+            except IOError,e:
+                logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name,str(e)))
+            except Exception,e:
+                logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e),name=self.name)
+
+            if self.enabled == False:
+                self.enabled = True
+                self.start()
             if False: # Does not work properly yet.
                 if self.have_limits_changed():
                     logger.log('%s: limits have changed --- restarting' % self.name)
@@ -246,4 +345,5 @@ class Sliver_VS(accounts.Account, vserver.VServer):
         else:  # tell vsh to disable remote login by setting CPULIMIT to 0
             logger.log('%s: disabling remote login' % self.name)
             self.set_sched_config(0, 0)
+            self.enabled = False
             self.stop()