patch to force the kill_qemus step
[tests.git] / system / TestPlc.py
index 97e9215..801c771 100644 (file)
@@ -14,25 +14,29 @@ from TestNode import TestNode
 from TestUser import TestUser
 from TestKey import TestKey
 from TestSlice import TestSlice
-
-# inserts a backslash before each occurence of the following chars
-# \ " ' < > & | ; ( ) $ * ~ @
-def backslash_shell_specials (command):
-    result=''
-    for char in command:
-        if char in "\\\"'<>&|;()$*~@":
-            result +='\\'+char
-        else:
-            result +=char
-    return result
+from TestSliver import TestSliver
+from TestBox import TestBox
+from TestSsh import TestSsh
 
 # step methods must take (self, options) and return a boolean
 
+def standby(minutes):
+        utils.header('Entering StandBy for %d mn'%minutes)
+        time.sleep(60*minutes)
+        return True
+
+def standby_generic (func):
+    def actual(self,options):
+        minutes=int(func.__name__.split("_")[1])
+        return standby(minutes)
+    return actual
+
 class TestPlc:
 
     def __init__ (self,plc_spec):
        self.plc_spec=plc_spec
        self.path=os.path.dirname(sys.argv[0])
+       self.test_ssh=TestSsh(self)
         try:
             self.vserverip=plc_spec['vserverip']
             self.vservername=plc_spec['vservername']
@@ -51,40 +55,24 @@ class TestPlc:
         else:
             return name+"[chroot]"
 
+    def hostname(self):
+        return self.plc_spec['hostname']
+
     def is_local (self):
-        return self.plc_spec['hostname'] == 'localhost'
+        return utils.is_local(self.hostname())
 
     # define the API methods on this object through xmlrpc
     # would help, but not strictly necessary
     def connect (self):
        pass
     
-    # command gets run in the chroot/vserver
+    #command gets run in the chroot/vserver
     def host_to_guest(self,command):
         if self.vserver:
             return "vserver %s exec %s"%(self.vservername,command)
         else:
-            return "chroot /plc/root %s"%backslash_shell_specials(command)
-
-    # command gets run on the right box
-    def to_host(self,command):
-        if self.is_local():
-            return command
-        else:
-            return "ssh %s %s"%(self.plc_spec['hostname'],backslash_shell_specials(command))
-
-    def full_command(self,command):
-        return self.to_host(self.host_to_guest(command))
-
-    def run_in_guest (self,command):
-        return utils.system(self.full_command(command))
-    def run_in_host (self,command):
-        return utils.system(self.to_host(command))
-
-    # xxx quick n dirty
-    def run_in_guest_piped (self,local,remote):
-        return utils.system(local+" | "+self.full_command(remote))
-
+            return "chroot /plc/root %s"%utils.backslash_shell_specials(command)
+    
     # copy a file to the myplc root image - pass in_data=True if the file must go in /plc/data
     def copy_in_guest (self, localfile, remotefile, in_data=False):
         if in_data:
@@ -98,9 +86,9 @@ class TestPlc:
                 utils.system("cp %s /vservers/%s/%s"%(localfile,self.vservername,remotefile))
         else:
             if not self.vserver:
-                utils.system("scp %s %s:%s/%s"%(localfile,self.plc_spec['hostname'],chroot_dest,remotefile))
+                utils.system("scp %s %s:%s/%s"%(localfile,self.hostname(),chroot_dest,remotefile))
             else:
-                utils.system("scp %s %s@/vservers/%s/%s"%(localfile,self.plc_spec['hostname'],self.vservername,remotefile))
+                utils.system("scp %s %s@/vservers/%s/%s"%(localfile,self.hostname(),self.vservername,remotefile))
 
     def auth_root (self):
        return {'Username':self.plc_spec['PLC_ROOT_USER'],
@@ -129,32 +117,63 @@ class TestPlc:
                 return key
         raise Exception,"Cannot locate key %s"%keyname
 
-    #this to catch up all different hostboxes used in this plc
-    def locate_hostBoxes(self,site_spec):
-        #Get The first host box to avoid returning a long list with the same host box
-        #in case  only one is used for all the nodes
-        HostBoxes=[site_spec['nodes'][0]['host_box']]
-        for node_spec in site_spec['nodes']:
-            if node_spec['host_box']!= HostBoxes[0]:
-                HostBoxes.append( node_spec['host_box'])
-
-        return HostBoxes
-            
-    def kill_all_qemus(self):
+    # all different hostboxes used in this plc
+    def gather_hostBoxes(self):
+        # maps on sites and nodes, return [ (host_box,test_node) ]
+        tuples=[]
         for site_spec in self.plc_spec['sites']:
             test_site = TestSite (self,site_spec)
-            hostboxes_list=self.locate_hostBoxes(site_spec)
-            if (hostboxes_list):
-                for node_spec in site_spec['nodes']:
-                    TestNode(self,test_site,node_spec).stop_qemu(node_spec)
+            for node_spec in site_spec['nodes']:
+                test_node = TestNode (self, test_site, node_spec)
+                if not test_node.is_real():
+                    tuples.append( (test_node.host_box(),test_node) )
+        # transform into a dict { 'host_box' -> [ hostnames .. ] }
+        result = {}
+        for (box,node) in tuples:
+            if not result.has_key(box):
+                result[box]=[node]
             else:
-                utils.header("No emulated node running on this PLC config ignore the kill() step")
-            
+                result[box].append(node)
+        return result
+                    
+    # a step for checking this stuff
+    def showboxes (self,options):
+        print 'showboxes'
+        for (box,nodes) in self.gather_hostBoxes().iteritems():
+            print box,":"," + ".join( [ node.name() for node in nodes ] )
+        return True
+
+    # make this a valid step
+    def kill_all_qemus(self,options):
+        for (box,nodes) in self.gather_hostBoxes().iteritems():
+            # this is the brute force version, kill all qemus on that host box
+            TestBox(box,options.buildname).kill_all_qemus()
+        return True
+
+    # make this a valid step
+    def list_all_qemus(self,options):
+        for (box,nodes) in self.gather_hostBoxes().iteritems():
+           # push the script
+           TestBox(box,options.buildname).copy("qemu_kill.sh") 
+            # this is the brute force version, kill all qemus on that host box
+            TestBox(box,options.buildname).run_in_buildname("qemu_kill.sh -l")
+        return True
+
+    # kill only the right qemus
+    def force_kill_qemus(self,options):
+        for (box,nodes) in self.gather_hostBoxes().iteritems():
+           # push the script
+           TestBox(box,options.buildname).copy("qemu_kill.sh") 
+            # the fine-grain version
+            for node in nodes:
+                node.kill_qemu()
+        return True
+
     def clear_ssh_config (self,options):
         # install local ssh_config file as root's .ssh/config - ssh should be quiet
         # dir might need creation first
-        self.run_in_guest("mkdir /root/.ssh")
-        self.run_in_guest("chmod 700 /root/.ssh")
+        self.test_ssh.run_in_guest("mkdir /root/.ssh")
+        self.test_ssh.run_in_guest("chmod 700 /root/.ssh")
         # this does not work - > redirection somehow makes it until an argument to cat
         #self.run_in_guest_piped("cat ssh_config","cat > /root/.ssh/config")
         self.copy_in_guest("ssh_config","/root/.ssh/config",True)
@@ -164,17 +183,17 @@ class TestPlc:
 
     ### uninstall
     def uninstall_chroot(self,options):
-        self.run_in_host('service plc safestop')
+        self.test_ssh.run_in_host('service plc safestop')
         #####detecting the last myplc version installed and remove it
-        self.run_in_host('rpm -e myplc')
+        self.test_ssh.run_in_host('rpm -e myplc')
         ##### Clean up the /plc directory
-        self.run_in_host('rm -rf  /plc/data')
+        self.test_ssh.run_in_host('rm -rf  /plc/data')
         ##### stop any running vservers
-        self.run_in_host('for vserver in $(ls /vservers/* | sed -e s,/vservers/,,) ; do vserver $vserver stop ; done')
+        self.test_ssh.run_in_host('for vserver in $(ls /vservers/* | sed -e s,/vservers/,,) ; do vserver $vserver stop ; done')
         return True
 
     def uninstall_vserver(self,options):
-        self.run_in_host("vserver --silent %s delete"%self.vservername)
+        self.test_ssh.run_in_host("vserver --silent %s delete"%self.vservername)
         return True
 
     def uninstall(self,options):
@@ -203,16 +222,15 @@ class TestPlc:
             # use a standard name - will be relative to HOME 
             build_dir="tests-system-build"
         build_checkout = "svn checkout %s %s"%(options.build_url,build_dir)
-        if self.run_in_host(build_checkout) != 0:
+        if self.test_ssh.run_in_host(build_checkout) != 0:
             raise Exception,"Cannot checkout build dir"
         # the repo url is taken from myplc-url 
         # with the last two steps (i386/myplc...) removed
         repo_url = options.myplc_url
         repo_url = os.path.dirname(repo_url)
-        repo_url = os.path.dirname(repo_url)
         create_vserver="%s/vtest-init-vserver.sh %s %s -- --interface eth0:%s"%\
             (build_dir,self.vservername,repo_url,self.vserverip)
-        if self.run_in_host(create_vserver) != 0:
+        if self.test_ssh.run_in_host(create_vserver) != 0:
             raise Exception,"Could not create vserver for %s"%self.vservername
         return True
 
@@ -221,17 +239,28 @@ class TestPlc:
             return self.install_vserver(options)
         else:
             return self.install_chroot(options)
-
+    
     ### install_rpm
+    def cache_rpm(self,url):
+        self.test_ssh.run_in_host('rm -rf *.rpm')
+       utils.header('Curling rpm from %s'%url)
+       id= self.test_ssh.run_in_host('curl -O '+url)
+       if (id != 0):
+               raise Exception,"Could not get rpm from  %s"%url
+               return False
+       return True
+
     def install_rpm_chroot(self,options):
-        utils.header('Installing from %s'%options.myplc_url)
-        url=options.myplc_url
-        self.run_in_host('rpm -Uvh '+url)
-        self.run_in_host('service plc mount')
+        rpm = os.path.basename(options.myplc_url)
+       if (not os.path.isfile(rpm)):
+               self.cache_rpm(options.myplc_url)
+       utils.header('Installing the :  %s'%rpm)
+        self.test_ssh.run_in_host('rpm -Uvh '+rpm)
+        self.test_ssh.run_in_host('service plc mount')
         return True
 
     def install_rpm_vserver(self,options):
-        self.run_in_guest("yum -y install myplc-native")
+        self.test_ssh.run_in_guest("yum -y install myplc-native")
         return True
 
     def install_rpm(self,options):
@@ -242,7 +271,7 @@ class TestPlc:
 
     ### 
     def configure(self,options):
-        tmpname='%s/%s.plc-config-tty'%(options.path,self.name())
+        tmpname='%s.plc-config-tty'%(self.name())
         fileconf=open(tmpname,'w')
         for var in [ 'PLC_NAME',
                      'PLC_ROOT_PASSWORD',
@@ -260,23 +289,23 @@ class TestPlc:
         fileconf.write('q\n')
         fileconf.close()
         utils.system('cat %s'%tmpname)
-        self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
+        self.test_ssh.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
         utils.system('rm %s'%tmpname)
         return True
 
     # the chroot install is slightly different to this respect
     def start(self, options):
         if self.vserver:
-            self.run_in_guest('service plc start')
+            self.test_ssh.run_in_guest('service plc start')
         else:
-            self.run_in_host('service plc start')
+            self.test_ssh.run_in_host('service plc start')
         return True
         
     def stop(self, options):
         if self.vserver:
-            self.run_in_guest('service plc stop')
+            self.test_ssh.run_in_guest('service plc stop')
         else:
-            self.run_in_host('service plc stop')
+            self.test_ssh.run_in_host('service plc stop')
         return True
         
     # could use a TestKey class
@@ -326,7 +355,7 @@ class TestPlc:
             else:
                 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
                 for node_spec in site_spec['nodes']:
-                    utils.show_spec('Creating node %s'%node_spec,node_spec)
+                    utils.pprint('Creating node %s'%node_spec,node_spec)
                     test_node = TestNode (self,test_site,node_spec)
                     test_node.create_node ()
         return True
@@ -366,7 +395,7 @@ class TestPlc:
         return hostnames
 
     # gracetime : during the first <gracetime> minutes nothing gets printed
-    def do_check_nodesStatus (self, minutes, gracetime=2):
+    def do_nodes_booted (self, minutes, gracetime=2):
         # compute timeout
         timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
         graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
@@ -408,36 +437,35 @@ class TestPlc:
         # only useful in empty plcs
         return True
 
-    def check_nodesStatus(self,options):
-        return self.do_check_nodesStatus(minutes=5)
+    def nodes_booted(self,options):
+        return self.do_nodes_booted(minutes=5)
     
     #to scan and store the nodes's public keys and avoid to ask for confirmation when  ssh 
     def scan_publicKeys(self,hostnames):
         try:
             temp_knownhosts="/root/known_hosts"
             remote_knownhosts="/root/.ssh/known_hosts"
-            self.run_in_host("touch %s"%temp_knownhosts )
+            self.test_ssh.run_in_host("touch %s"%temp_knownhosts )
             for hostname in hostnames:
                 utils.header("Scan Public %s key and store it in the known_host file(under the root image) "%hostname)
-                scan=self.run_in_host('ssh-keyscan -t rsa %s >> %s '%(hostname,temp_knownhosts))
+                scan=self.test_ssh.run_in_host('ssh-keyscan -t rsa %s >> %s '%(hostname,temp_knownhosts))
             #Store the public keys in the right root image
             self.copy_in_guest(temp_knownhosts,remote_knownhosts,True)
             #clean the temp keys file used
-            self.run_in_host('rm -f  %s '%temp_knownhosts )
+            self.test_ssh.run_in_host('rm -f  %s '%temp_knownhosts )
         except Exception, err:
             print err
             
     def do_check_nodesSsh(self,minutes):
         # compute timeout
         timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
-        #graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
         tocheck = self.all_hostnames()
         self.scan_publicKeys(tocheck)
         utils.header("checking Connectivity on nodes %r"%tocheck)
         while tocheck:
             for hostname in tocheck:
                 # try to ssh in nodes
-                access=self.run_in_guest('ssh -i /etc/planetlab/root_ssh_key.rsa root@%s date'%hostname )
+                access=self.test_ssh.run_in_guest('ssh -i /etc/planetlab/root_ssh_key.rsa root@%s date'%hostname )
                 if (not access):
                     utils.header('The node %s is sshable -->'%hostname)
                     # refresh tocheck
@@ -446,8 +474,8 @@ class TestPlc:
                     (site_spec,node_spec)=self.locate_node(hostname)
                     if TestNode.is_real_model(node_spec['node_fields']['model']):
                         utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
-                    tocheck.remove(hostname)
-            if not tocheck:
+                       tocheck.remove(hostname)
+            if  not tocheck:
                 return True
             if datetime.datetime.now() > timeout:
                 for hostname in tocheck:
@@ -458,27 +486,37 @@ class TestPlc:
         # only useful in empty plcs
         return True
         
-    def check_nodesConnectivity(self, options):
+    def nodes_ssh(self, options):
         return  self.do_check_nodesSsh(minutes=2)
-            
-    def standby(self,options):
-        #Method for waiting a while when nodes are booting and being sshable,giving time to NM to be up
-        utils.header('Entering in StanbdBy mode for 10min at %s'%datetime.datetime.now())
-        time.sleep(600)
-        utils.header('Exist StandBy mode at %s'%datetime.datetime.now())
-        return True
     
     def bootcd (self, options):
         for site_spec in self.plc_spec['sites']:
             test_site = TestSite (self,site_spec)
             for node_spec in site_spec['nodes']:
                 test_node=TestNode (self,test_site,node_spec)
-                test_node.create_boot_cd(options.path)
+                test_node.prepare_area()
+                test_node.create_boot_cd()
+               test_node.configure_qemu()
         return True
-                
+
+    def do_check_intiscripts(self):
+       for site_spec in self.plc_spec['sites']:
+               test_site = TestSite (self,site_spec)
+               test_node = TestNode (self,test_site,site_spec['nodes'])
+               for slice_spec in self.plc_spec['slices']:
+                       test_slice=TestSlice (self,test_site,slice_spec)
+                       test_sliver=TestSliver(self,test_node,test_slice)
+                       init_status=test_sliver.get_initscript(slice_spec)
+                       if (not init_status):
+                               return False
+               return init_status
+           
+    def check_initscripts(self, options):
+           return self.do_check_intiscripts()
+                   
     def initscripts (self, options):
         for initscript in self.plc_spec['initscripts']:
-            utils.show_spec('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
+            utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
             self.server.AddInitScript(self.auth_root(),initscript['initscript_fields'])
         return True
 
@@ -497,7 +535,7 @@ class TestPlc:
                 utils.header("Deleting slices in site %s"%test_site.name())
                 test_slice.delete_slice()
             else:    
-                utils.show_spec("Creating slice",slice)
+                utils.pprint("Creating slice",slice)
                 test_slice.create_slice()
                 utils.header('Created Slice %s'%slice['slice_fields']['name'])
         return True
@@ -508,19 +546,29 @@ class TestPlc:
             test_site = TestSite(self,site_spec)
             test_slice=TestSlice(self,test_site,slice_spec)
             status=test_slice.do_check_slice(options)
-            return status
+            if (not status):
+                return False
+        return status
     
     def start_nodes (self, options):
-        self.kill_all_qemus()
         utils.header("Starting  nodes")
         for site_spec in self.plc_spec['sites']:
             TestSite(self,site_spec).start_nodes (options)
         return True
 
     def stop_nodes (self, options):
-        self.kill_all_qemus()
+        self.kill_all_qemus(options)
         return True
 
+    def check_tcp (self, options):
+           #we just need to create a sliver object nothing else
+           test_sliver=TestSliver(self,
+                                  TestNode(self, TestSite(self,self.plc_spec['sites'][0]),
+                                           self.plc_spec['sites'][0]['nodes'][0]),
+                                  TestSlice(self,TestSite(self,self.plc_spec['sites'][0]),
+                                            self.plc_spec['slices']))
+           return test_sliver.do_check_tcp(self.plc_spec['tcp_param'],options)
+
     # returns the filename to use for sql dump/restore, using options.dbname if set
     def dbfile (self, database, options):
         # uses options.dbname if it is found
@@ -537,19 +585,61 @@ class TestPlc:
     def db_dump(self, options):
         
         dump=self.dbfile("planetab4",options)
-        self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
+        self.test_ssh.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
         utils.header('Dumped planetlab4 database in %s'%dump)
         return True
 
     def db_restore(self, options):
         dump=self.dbfile("planetab4",options)
         ##stop httpd service
-        self.run_in_guest('service httpd stop')
+        self.test_ssh.run_in_guest('service httpd stop')
         # xxx - need another wrapper
-        self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
-        self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
-        self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
+        self.test_ssh.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
+        self.test_ssh.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
+        self.test_ssh.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
         ##starting httpd service
-        self.run_in_guest('service httpd start')
+        self.test_ssh.run_in_guest('service httpd start')
 
         utils.header('Database restored from ' + dump)
+
+    @standby_generic 
+    def standby_1(): pass
+    @standby_generic 
+    def standby_2(): pass
+    @standby_generic 
+    def standby_3(): pass
+    @standby_generic 
+    def standby_4(): pass
+    @standby_generic 
+    def standby_5(): pass
+    @standby_generic 
+    def standby_6(): pass
+    @standby_generic 
+    def standby_7(): pass
+    @standby_generic 
+    def standby_8(): pass
+    @standby_generic 
+    def standby_9(): pass
+    @standby_generic 
+    def standby_10(): pass
+    @standby_generic 
+    def standby_11(): pass
+    @standby_generic 
+    def standby_12(): pass
+    @standby_generic 
+    def standby_13(): pass
+    @standby_generic 
+    def standby_14(): pass
+    @standby_generic 
+    def standby_15(): pass
+    @standby_generic 
+    def standby_16(): pass
+    @standby_generic 
+    def standby_17(): pass
+    @standby_generic 
+    def standby_18(): pass
+    @standby_generic 
+    def standby_19(): pass
+    @standby_generic 
+    def standby_20(): pass
+