triggers all scripts in qaapi/qa/tests/node in one node's root context
[tests.git] / system / TestPlc.py
index 0134687..cd9f222 100644 (file)
@@ -5,6 +5,7 @@ import time
 import sys
 import traceback
 from types import StringTypes
+import socket
 
 import utils
 from TestSite import TestSite
@@ -62,16 +63,18 @@ SEP='<sep>'
 class TestPlc:
 
     default_steps = ['uninstall','install','install_rpm', 
-                     'configure', 'start', SEP,
+                     'configure', 'start', 'fetch_keys', SEP,
                      'store_keys', 'clear_known_hosts', 'initscripts', SEP,
                      'sites', 'nodes', 'slices', 'nodegroups', SEP,
                      'init_node','bootcd', 'configure_qemu', 'export_qemu',
                      'kill_all_qemus', 'reinstall_node','start_node', SEP,
-                     'nodes_booted', 'nodes_ssh', 'check_slice',
-                     'check_initscripts', 'check_tcp',SEP,
+                     'nodes_booted', 'nodes_ssh', 'check_slice', 'check_initscripts', SEP,
+                     'check_sanity', 'check_tcp', 'plcsh_stress_test', SEP,
                      'force_gather_logs', 'force_kill_qemus', 'force_record_tracker','force_free_tracker' ]
-    other_steps = [ 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', SEP,
-                    'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP,
+    other_steps = [ 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', 'vs_start', SEP,
+                    'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
+                    'clean_sites', 'clean_nodes', 
+                    'clean_slices', 'clean_keys', SEP,
                     'show_boxes', 'list_all_qemus', 'list_qemus', SEP,
                     'db_dump' , 'db_restore', ' cleanup_tracker',
                     'standby_1 through 20'
@@ -79,7 +82,7 @@ class TestPlc:
 
     @staticmethod
     def printable_steps (list):
-        return " ".join(list).replace(" "+SEP+" ","\n")
+        return " ".join(list).replace(" "+SEP+" "," \\\n")
     @staticmethod
     def valid_step (step):
         return step != SEP
@@ -94,17 +97,12 @@ class TestPlc:
             self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
             self.vserver=True
         except:
-            self.vserver=False
-            self.url="https://%s:443/PLCAPI/"%plc_spec['hostname']
-#        utils.header('Using API url %s'%self.url)
+            raise Exception,'chroot-based myplc testing is deprecated'
        self.apiserver=TestApiserver(self.url,options.dry_run)
         
     def name(self):
         name=self.plc_spec['name']
-        if self.vserver:
-            return "%s.%s"%(name,self.vservername)
-        else:
-            return "%s.chroot"%name
+        return "%s.%s"%(name,self.vservername)
 
     def hostname(self):
         return self.plc_spec['hostname']
@@ -120,37 +118,23 @@ class TestPlc:
     def actual_command_in_guest (self,command):
         return self.test_ssh.actual_command(self.host_to_guest(command))
     
+    def start_guest (self):
+      return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
+    
     def run_in_guest (self,command):
         return utils.system(self.actual_command_in_guest(command))
     
     def run_in_host (self,command):
         return self.test_ssh.run_in_buildname(command)
 
-    #command gets run in the chroot/vserver
+    #command gets run in the vserver
     def host_to_guest(self,command):
-        if self.vserver:
-            return "vserver %s exec %s"%(self.vservername,command)
-        else:
-            return "chroot /plc/root %s"%TestSsh.backslash_shell_specials(command)
+        return "vserver %s exec %s"%(self.vservername,command)
+    
+    #command gets run in the vserver
+    def start_guest_in_host(self):
+        return "vserver %s start"%(self.vservername)
     
-    # copy a file to the myplc root image - pass in_data=True if the file must go in /plc/data
-    def copy_in_guest (self, localfile, remotefile, in_data=False):
-        if in_data:
-            chroot_dest="/plc/data"
-        else:
-            chroot_dest="/plc/root"
-        if self.is_local():
-            if not self.vserver:
-                utils.system("cp %s %s/%s"%(localfile,chroot_dest,remotefile))
-            else:
-                utils.system("cp %s /vservers/%s/%s"%(localfile,self.vservername,remotefile))
-        else:
-            if not self.vserver:
-                utils.system("scp %s %s:%s/%s"%(localfile,self.hostname(),chroot_dest,remotefile))
-            else:
-                utils.system("scp %s %s@/vservers/%s/%s"%(localfile,self.hostname(),self.vservername,remotefile))
-
-
     # xxx quick n dirty
     def run_in_guest_piped (self,local,remote):
         return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
@@ -195,6 +179,37 @@ class TestPlc:
                 return slice
         raise Exception,"Cannot locate slice %s"%slicename
 
+    def all_sliver_objs (self):
+        result=[]
+        for slice_spec in self.plc_spec['slices']:
+            slicename = slice_spec['slice_fields']['name']
+            for nodename in slice_spec['nodenames']:
+                result.append(self.locate_sliver_obj (nodename,slicename))
+        return result
+
+    def locate_sliver_obj (self,nodename,slicename):
+        (site,node) = self.locate_node(nodename)
+        slice = self.locate_slice (slicename)
+        # build objects
+        test_site = TestSite (self, site)
+        test_node = TestNode (self, test_site,node)
+        # xxx the slice site is assumed to be the node site - mhh - probably harmless
+        test_slice = TestSlice (self, test_site, slice)
+        return TestSliver (self, test_node, test_slice)
+
+    def locate_first_node(self):
+        nodename=self.plc_spec['slices'][0]['nodenames'][0]
+        (site,node) = self.locate_node(nodename)
+        test_site = TestSite (self, site)
+        test_node = TestNode (self, test_site,node)
+        return test_node
+
+    def locate_first_sliver (self):
+        slice_spec=self.plc_spec['slices'][0]
+        slicename=slice_spec['slice_fields']['name']
+        nodename=slice_spec['nodenames'][0]
+        return self.locate_sliver_obj(nodename,slicename)
+
     # all different hostboxes used in this plc
     def gather_hostBoxes(self):
         # maps on sites and nodes, return [ (host_box,test_node) ]
@@ -265,9 +280,6 @@ class TestPlc:
     TRACKER_FILE="~/running-test-plcs"
 
     def record_tracker (self):
-        if not self.vserver:
-            print 'record_tracker active on vserver plcs only - ignored'
-            return True
         command="echo %s %s >> %s"%(self.vservername,self.test_ssh.hostname,TestPlc.TRACKER_FILE)
         (code,output) = utils.output_of (self.test_ssh.actual_command(command))
         if code != 0:
@@ -277,9 +289,6 @@ class TestPlc:
         return True
 
     def free_tracker (self):
-        if not self.vserver:
-            print 'free_tracker active on vserver plcs only - ignored'
-            return True
         command="head -1 %s"%TestPlc.TRACKER_FILE
         (code,line) = utils.output_of(self.test_ssh.actual_command(command))
         if code != 0:
@@ -297,50 +306,25 @@ class TestPlc:
         utils.system(self.test_ssh.actual_command(flush_command))
         return True
 
+    # this should/could stop only the ones in TRACKER_FILE if that turns out to be reliable
     def cleanup_tracker (self):
         stop_all = "cd /vservers ; for i in * ; do vserver --silent $i stop ; done"
         utils.system(self.test_ssh.actual_command(stop_all))
         clean_tracker = "rm -f %s"%TestPlc.TRACKER_FILE
         utils.system(self.test_ssh.actual_command(clean_tracker))
 
-    #################### step methods
-
-    ### uninstall
-    def uninstall_chroot(self):
-        self.run_in_host('service plc safestop')
-        #####detecting the last myplc version installed and remove it
-        self.run_in_host('rpm -e myplc')
-        ##### Clean up the /plc directory
-        self.run_in_host('rm -rf /plc/data')
-        ##### stop any running vservers
-        self.run_in_host('for vserver in $(ls -d /vservers/* | sed -e s,/vservers/,,) ; do case $vserver in vtest*) echo Shutting down vserver $vserver ; vserver $vserver stop ;; esac ; done')
-        return True
-
-    def uninstall_vserver(self):
-        self.run_in_host("vserver --silent %s delete"%self.vservername)
-        return True
-
     def uninstall(self):
-        # if there's a chroot-based myplc running, and then a native-based myplc is being deployed
-        # it sounds safer to have the former uninstalled too
-        # now the vserver method cannot be invoked for chroot instances as vservername is required
-        if self.vserver:
-            self.uninstall_vserver()
-            self.uninstall_chroot()
-        else:
-            self.uninstall_chroot()
+        self.run_in_host("vserver --silent %s delete"%self.vservername)
         return True
 
     ### install
-    def install_chroot(self):
-        # nothing to do
-        return True
-
-    def install_vserver(self):
-        # we need build dir for vtest-init-vserver
+    def install(self):
         if self.is_local():
             # a full path for the local calls
-            build_dir=os.path.dirname(sys.argv[0])+"/build"
+            build_dir=os.path.dirname(sys.argv[0])
+            # sometimes this is empty - set to "." in such a case
+            if not build_dir: build_dir="."
+            build_dir += "/build"
         else:
             # use a standard name - will be relative to remote buildname
             build_dir="build"
@@ -348,48 +332,29 @@ class TestPlc:
         build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
         if self.run_in_host(build_checkout) != 0:
             return False
-        # the repo url is taken from myplc-url 
-        # with the last two steps (i386/myplc...) removed
-        repo_url = self.options.myplc_url
-        for level in [ 'rpmname','arch' ]:
+        # the repo url is taken from arch-rpms-url 
+        # with the last step (i386.) removed
+        repo_url = self.options.arch_rpms_url
+        for level in [ 'arch' ]:
            repo_url = os.path.dirname(repo_url)
         if self.options.arch == "i386":
-            personality="-p linux32"
+            personality_option="-p linux32"
         else:
-            personality="-p linux64"
-        create_vserver="%s/vtest-init-vserver.sh %s %s %s -- --interface eth0:%s"%\
-            (build_dir,personality,self.vservername,repo_url,self.vserverip)
+            personality_option="-p linux64"
+        script="vtest-init-vserver.sh"
+        vserver_name = self.vservername
+        vserver_options="--netdev eth0 --interface %s"%self.vserverip
+        try:
+            vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
+            vserver_options += " --hostname %s"%vserver_hostname
+        except:
+            pass
+        create_vserver="%(build_dir)s/%(script)s %(personality_option)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
         return self.run_in_host(create_vserver) == 0
 
-    def install(self):
-        if self.vserver:
-            return self.install_vserver()
-        else:
-            return self.install_chroot()
-    
-    ### install_rpm - make this an optional step
-    def cache_rpm(self):
-        url = self.options.myplc_url
-        rpm = os.path.basename(url)
-        cache_fetch="pwd;if [ -f %(rpm)s ] ; then echo Using cached rpm %(rpm)s ; else echo Fetching %(url)s ; curl -O %(url)s; fi"%locals()
-       return self.run_in_host(cache_fetch)==0
-
-    def install_rpm_chroot(self):
-        url = self.options.myplc_url
-        rpm = os.path.basename(url)
-       if not self.cache_rpm():
-            return False
-       utils.header('Installing the :  %s'%rpm)
-        return self.run_in_host('rpm -Uvh '+rpm)==0 and self.run_in_host('service plc mount')==0
-
-    def install_rpm_vserver(self):
-        return self.run_in_guest("yum -y install myplc-native")==0
-
+    ### install_rpm 
     def install_rpm(self):
-        if self.vserver:
-            return self.install_rpm_vserver()
-        else:
-            return self.install_rpm_chroot()
+        return self.run_in_guest("yum -y install myplc-native")==0
 
     ### 
     def configure(self):
@@ -415,22 +380,19 @@ class TestPlc:
         utils.system('rm %s'%tmpname)
         return True
 
-    # the chroot install is slightly different to this respect
     def start(self):
-        if self.vserver:
-            self.run_in_guest('service plc start')
-        else:
-            self.run_in_host('service plc start')
+        self.run_in_guest('service plc start')
         return True
-        
+
     def stop(self):
-        if self.vserver:
-            self.run_in_guest('service plc stop')
-        else:
-            self.run_in_host('service plc stop')
+        self.run_in_guest('service plc stop')
         return True
         
-    # could use a TestKey class
+    def vs_start (self):
+        self.start_guest()
+        return True
+
+    # stores the keys from the config for further use
     def store_keys(self):
         for key_spec in self.plc_spec['keys']:
                TestKey(self,key_spec).store_key()
@@ -439,6 +401,21 @@ class TestPlc:
     def clean_keys(self):
         utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
 
+    # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
+    # for later direct access to the nodes
+    def fetch_keys(self):
+        dir="./keys"
+        if not os.path.isdir(dir):
+            os.mkdir(dir)
+        prefix = 'root_ssh_key'
+        vservername=self.vservername
+        overall=True
+        for ext in [ 'pub', 'rsa' ] :
+            src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
+            dst="keys/%(vservername)s.%(ext)s"%locals()
+            if self.test_ssh.fetch(src,dst) != 0: overall=False
+        return overall
+
     def sites (self):
         return self.do_sites()
     
@@ -460,6 +437,13 @@ class TestPlc:
                 test_site.create_users()
         return True
 
+    def clean_all_sites (self):
+        print 'auth_root',self.auth_root()
+        site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
+        for site_id in site_ids:
+            print 'Deleting site_id',site_id
+            self.apiserver.DeleteSite(self.auth_root(),site_id)
+
     def nodes (self):
         return self.do_nodes()
     def clean_nodes (self):
@@ -482,9 +466,13 @@ class TestPlc:
                     test_node.create_node ()
         return True
 
-    # create nodegroups if needed, and populate
-    # no need for a clean_nodegroups if we are careful enough
     def nodegroups (self):
+        return self.do_nodegroups("add")
+    def clean_nodegroups (self):
+        return self.do_nodegroups("delete")
+
+    # create nodegroups if needed, and populate
+    def do_nodegroups (self, action="add"):
         # 1st pass to scan contents
         groups_dict = {}
         for site_spec in self.plc_spec['sites']:
@@ -500,14 +488,54 @@ class TestPlc:
                             groups_dict[nodegroupname]=[]
                         groups_dict[nodegroupname].append(test_node.name())
         auth=self.auth_root()
+        overall = True
         for (nodegroupname,group_nodes) in groups_dict.iteritems():
-            try:
-                self.apiserver.GetNodeGroups(auth,{'name':nodegroupname})[0]
-            except:
-                self.apiserver.AddNodeGroup(auth,{'name':nodegroupname})
-            for node in group_nodes:
-                self.apiserver.AddNodeToNodeGroup(auth,node,nodegroupname)
-        return True
+            if action == "add":
+                print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
+                # first, check if the nodetagtype is here
+                tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
+                if tag_types:
+                    tag_type_id = tag_types[0]['tag_type_id']
+                else:
+                    tag_type_id = self.apiserver.AddTagType(auth,
+                                                            {'tagname':nodegroupname,
+                                                             'description': 'for nodegroup %s'%nodegroupname,
+                                                             'category':'test',
+                                                             'min_role_id':10})
+                print 'located tag (type)',nodegroupname,'as',tag_type_id
+                # create nodegroup
+                nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
+                if not nodegroups:
+                    self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
+                    print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
+                # set node tag on all nodes, value='yes'
+                for nodename in group_nodes:
+                    try:
+                        self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
+                    except:
+                        traceback.print_exc()
+                        print 'node',nodename,'seems to already have tag',nodegroupname
+                    # check anyway
+                    try:
+                        expect_yes = self.apiserver.GetNodeTags(auth,
+                                                                {'hostname':nodename,
+                                                                 'tagname':nodegroupname},
+                                                                ['tagvalue'])[0]['tagvalue']
+                        if expect_yes != "yes":
+                            print 'Mismatch node tag on node',nodename,'got',expect_yes
+                            overall=False
+                    except:
+                        if not self.options.dry_run:
+                            print 'Cannot find tag',nodegroupname,'on node',nodename
+                            overall = False
+            else:
+                try:
+                    print 'cleaning nodegroup',nodegroupname
+                    self.apiserver.DeleteNodeGroup(auth,nodegroupname)
+                except:
+                    traceback.print_exc()
+                    overall=False
+        return overall
 
     def all_hostnames (self) :
         hostnames = []
@@ -517,7 +545,7 @@ class TestPlc:
         return hostnames
 
     # gracetime : during the first <gracetime> minutes nothing gets printed
-    def do_nodes_booted (self, minutes, gracetime,period=30):
+    def do_nodes_booted (self, minutes, gracetime,period=15):
         if self.options.dry_run:
             print 'dry_run'
             return True
@@ -565,7 +593,7 @@ class TestPlc:
     def nodes_booted(self):
         return self.do_nodes_booted(minutes=20,gracetime=15)
 
-    def do_nodes_ssh(self,minutes,gracetime,period=30):
+    def do_nodes_ssh(self,minutes,gracetime,period=15):
         # compute timeout
         timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
         graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
@@ -601,7 +629,7 @@ class TestPlc:
         return True
         
     def nodes_ssh(self):
-        return self.do_nodes_ssh(minutes=6,gracetime=4)
+        return self.do_nodes_ssh(minutes=10,gracetime=5)
     
     @node_mapper
     def init_node (self): pass
@@ -614,6 +642,16 @@ class TestPlc:
     @node_mapper
     def export_qemu (self): pass
         
+    ### check sanity : invoke scripts from qaapi/qa/tests/{node,slice}
+    def check_sanity_node (self): 
+        return self.locate_first_node().check_sanity()
+    def check_sanity_sliver (self) : 
+        return self.locate_first_sliver().check_sanity()
+    
+    def check_sanity (self):
+        return self.check_sanity_node() and self.check_sanity_sliver()
+
+    ### initscripts
     def do_check_initscripts(self):
         overall = True
         for slice_spec in self.plc_spec['slices']:
@@ -640,6 +678,18 @@ class TestPlc:
             self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
         return True
 
+    def clean_initscripts (self):
+        for initscript in self.plc_spec['initscripts']:
+            initscript_name = initscript['initscript_fields']['name']
+            print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
+            try:
+                self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
+                print initscript_name,'deleted'
+            except:
+                print 'deletion went wrong - probably did not exist'
+        return True
+
+    ### manage slices
     def slices (self):
         return self.do_slices()
 
@@ -669,24 +719,6 @@ class TestPlc:
     @node_mapper
     def start_node (self) : pass
 
-    def all_sliver_objs (self):
-        result=[]
-        for slice_spec in self.plc_spec['slices']:
-            slicename = slice_spec['slice_fields']['name']
-            for nodename in slice_spec['nodenames']:
-                result.append(self.locate_sliver_obj (nodename,slicename))
-        return result
-
-    def locate_sliver_obj (self,nodename,slicename):
-        (site,node) = self.locate_node(nodename)
-        slice = self.locate_slice (slicename)
-        # build objects
-        test_site = TestSite (self, site)
-        test_node = TestNode (self, test_site,node)
-        # xxx the slice site is assumed to be the node site - mhh - probably harmless
-        test_slice = TestSlice (self, test_site, slice)
-        return TestSliver (self, test_node, test_slice)
-
     def check_tcp (self):
         specs = self.plc_spec['tcp_test']
         overall=True
@@ -703,7 +735,17 @@ class TestPlc:
             if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
                 overall=False
         return overall
-    
+
+    def plcsh_stress_test (self):
+        # install the stress-test in the plc image
+        location = "/usr/share/plc_api/plcsh-stress-test.py"
+        remote="/vservers/%s/%s"%(self.vservername,location)
+        self.test_ssh.copy_abs("plcsh-stress-test.py",remote)
+        command = location
+        command += " -- --check"
+        if self.options.small_test:
+            command +=  " --tiny"
+        return ( self.run_in_guest(command) == 0)
 
     def gather_logs (self):
         # (1) get the plc's /var/log and store it locally in logs/myplc.var-log.<plcname>/*
@@ -737,9 +779,11 @@ class TestPlc:
         return True
 
     def gather_var_logs (self):
+        utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
         to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")        
         command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
-        utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
+        utils.system(command)
+        command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
         utils.system(command)
 
     def gather_nodes_var_logs (self):