moving to multi-plcs daily tests
[tests.git] / system / TestPlc.py
index 112b993..88a4a41 100644 (file)
@@ -3,9 +3,9 @@ import os, os.path
 import datetime
 import time
 import sys
-import datetime
 import traceback
 from types import StringTypes
+import socket
 
 import utils
 from TestSite import TestSite
@@ -58,8 +58,41 @@ def slice_mapper_options (method):
         return overall
     return actual
 
+SEP='<sep>'
+
 class TestPlc:
 
+    default_steps = [
+        'display','uninstall','install','install_rpm', 
+        'configure', 'start', 'fetch_keys', SEP,
+        'store_keys', 'clear_known_hosts', 'initscripts', SEP,
+        'sites', 'nodes', 'slices', 'nodegroups', SEP,
+        'init_node','bootcd', 'configure_qemu', 'export_qemu',
+        'kill_all_qemus', 'reinstall_node','start_node', SEP,
+        # better use of time: do this now that the nodes are taking off
+        'plcsh_stress_test', SEP,
+        'nodes_ssh_debug', 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEP,
+        'check_tcp',  SEP,
+        'force_gather_logs', 'force_kill_qemus', 'force_record_tracker','force_free_tracker',
+        ]
+    other_steps = [ 
+        'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', 'vs_start', SEP,
+        'check_sanity',  SEP,
+        'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
+        'clean_sites', 'clean_nodes', 
+        'clean_slices', 'clean_keys', SEP,
+        'show_boxes', 'list_all_qemus', 'list_qemus', SEP,
+        'db_dump' , 'db_restore', 'cleanup_trackers', 'cleanup_all_trackers',
+        'standby_1 through 20',
+        ]
+
+    @staticmethod
+    def printable_steps (list):
+        return " ".join(list).replace(" "+SEP+" "," \\\n")
+    @staticmethod
+    def valid_step (step):
+        return step != SEP
+
     def __init__ (self,plc_spec,options):
        self.plc_spec=plc_spec
         self.options=options
@@ -70,17 +103,12 @@ class TestPlc:
             self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
             self.vserver=True
         except:
-            self.vserver=False
-            self.url="https://%s:443/PLCAPI/"%plc_spec['hostname']
-#        utils.header('Using API url %s'%self.url)
+            raise Exception,'chroot-based myplc testing is deprecated'
        self.apiserver=TestApiserver(self.url,options.dry_run)
         
     def name(self):
         name=self.plc_spec['name']
-        if self.vserver:
-            return name+".vserver.%s"%self.vservername
-        else:
-            return name+".chroot"
+        return "%s.%s"%(name,self.vservername)
 
     def hostname(self):
         return self.plc_spec['hostname']
@@ -96,40 +124,26 @@ class TestPlc:
     def actual_command_in_guest (self,command):
         return self.test_ssh.actual_command(self.host_to_guest(command))
     
+    def start_guest (self):
+      return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
+    
     def run_in_guest (self,command):
         return utils.system(self.actual_command_in_guest(command))
     
     def run_in_host (self,command):
         return self.test_ssh.run_in_buildname(command)
 
-    #command gets run in the chroot/vserver
+    #command gets run in the vserver
     def host_to_guest(self,command):
-        if self.vserver:
-            return "vserver %s exec %s"%(self.vservername,command)
-        else:
-            return "chroot /plc/root %s"%TestSsh.backslash_shell_specials(command)
+        return "vserver %s exec %s"%(self.vservername,command)
+    
+    #command gets run in the vserver
+    def start_guest_in_host(self):
+        return "vserver %s start"%(self.vservername)
     
-    # copy a file to the myplc root image - pass in_data=True if the file must go in /plc/data
-    def copy_in_guest (self, localfile, remotefile, in_data=False):
-        if in_data:
-            chroot_dest="/plc/data"
-        else:
-            chroot_dest="/plc/root"
-        if self.is_local():
-            if not self.vserver:
-                utils.system("cp %s %s/%s"%(localfile,chroot_dest,remotefile))
-            else:
-                utils.system("cp %s /vservers/%s/%s"%(localfile,self.vservername,remotefile))
-        else:
-            if not self.vserver:
-                utils.system("scp %s %s:%s/%s"%(localfile,self.hostname(),chroot_dest,remotefile))
-            else:
-                utils.system("scp %s %s@/vservers/%s/%s"%(localfile,self.hostname(),self.vservername,remotefile))
-
-
     # xxx quick n dirty
     def run_in_guest_piped (self,local,remote):
-        return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote)))
+        return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
 
     def auth_root (self):
        return {'Username':self.plc_spec['PLC_ROOT_USER'],
@@ -171,6 +185,37 @@ class TestPlc:
                 return slice
         raise Exception,"Cannot locate slice %s"%slicename
 
+    def all_sliver_objs (self):
+        result=[]
+        for slice_spec in self.plc_spec['slices']:
+            slicename = slice_spec['slice_fields']['name']
+            for nodename in slice_spec['nodenames']:
+                result.append(self.locate_sliver_obj (nodename,slicename))
+        return result
+
+    def locate_sliver_obj (self,nodename,slicename):
+        (site,node) = self.locate_node(nodename)
+        slice = self.locate_slice (slicename)
+        # build objects
+        test_site = TestSite (self, site)
+        test_node = TestNode (self, test_site,node)
+        # xxx the slice site is assumed to be the node site - mhh - probably harmless
+        test_slice = TestSlice (self, test_site, slice)
+        return TestSliver (self, test_node, test_slice)
+
+    def locate_first_node(self):
+        nodename=self.plc_spec['slices'][0]['nodenames'][0]
+        (site,node) = self.locate_node(nodename)
+        test_site = TestSite (self, site)
+        test_node = TestNode (self, test_site,node)
+        return test_node
+
+    def locate_first_sliver (self):
+        slice_spec=self.plc_spec['slices'][0]
+        slicename=slice_spec['slice_fields']['name']
+        nodename=slice_spec['nodenames'][0]
+        return self.locate_sliver_obj(nodename,slicename)
+
     # all different hostboxes used in this plc
     def gather_hostBoxes(self):
         # maps on sites and nodes, return [ (host_box,test_node) ]
@@ -181,7 +226,7 @@ class TestPlc:
                 test_node = TestNode (self, test_site, node_spec)
                 if not test_node.is_real():
                     tuples.append( (test_node.host_box(),test_node) )
-        # transform into a dict { 'host_box' -> [ hostnames .. ] }
+        # transform into a dict { 'host_box' -> [ test_node .. ] }
         result = {}
         for (box,node) in tuples:
             if not result.has_key(box):
@@ -198,9 +243,11 @@ class TestPlc:
 
     # make this a valid step
     def kill_all_qemus(self):
+        # this is the brute force version, kill all qemus on that host box
         for (box,nodes) in self.gather_hostBoxes().iteritems():
-            # this is the brute force version, kill all qemus on that host box
-            TestBox(box,self.options.buildname).kill_all_qemus()
+            # pass the first nodename, as we don't push template-qemu on testboxes
+            nodedir=nodes[0].nodedir()
+            TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
         return True
 
     # make this a valid step
@@ -226,89 +273,242 @@ class TestPlc:
                 node.kill_qemu()
         return True
 
-    #################### step methods
-
-    ### uninstall
-    def uninstall_chroot(self):
-        self.run_in_host('service plc safestop')
-        #####detecting the last myplc version installed and remove it
-        self.run_in_host('rpm -e myplc')
-        ##### Clean up the /plc directory
-        self.run_in_host('rm -rf /plc/data')
-        ##### stop any running vservers
-        self.run_in_host('for vserver in $(ls -d /vservers/* | sed -e s,/vservers/,,) ; do case $vserver in vtest*) echo Shutting down vserver $vserver ; vserver $vserver stop ;; esac ; done')
+    #################### display config
+    def display (self):
+        self.display_pass (1)
+        self.display_pass (2)
         return True
 
-    def uninstall_vserver(self):
-        self.run_in_host("vserver --silent %s delete"%self.vservername)
+    # entry point
+    def display_pass (self,passno):
+        for (key,val) in self.plc_spec.iteritems():
+            if passno == 2:
+                if key == 'sites':
+                    for site in val:
+                        self.display_site_spec(site)
+                        for node in site['nodes']:
+                            self.display_node_spec(node)
+                elif key=='initscripts':
+                    for initscript in val:
+                        self.display_initscript_spec (initscript)
+                elif key=='slices':
+                    for slice in val:
+                        self.display_slice_spec (slice)
+                elif key=='keys':
+                    for key in val:
+                        self.display_key_spec (key)
+            elif passno == 1:
+                if key not in ['sites','initscripts','slices','keys']:
+                    print '*   ',key,':',val
+
+    def display_site_spec (self,site):
+        print '* ======== site',site['site_fields']['name']
+        for (k,v) in site.iteritems():
+            if k=='nodes':
+                if v: 
+                    print '*       ','nodes : ',
+                    for node in v:  
+                        print node['node_fields']['hostname'],'',
+                    print ''
+            elif k=='users':
+                if v: 
+                    print '*       users : ',
+                    for user in v:  
+                        print user['name'],'',
+                    print ''
+            elif k == 'site_fields':
+                print '*       login_base',':',v['login_base']
+            elif k == 'address_fields':
+                pass
+            else:
+                print '*       ',k,
+                PrettyPrinter(indent=8,depth=2).pprint(v)
+        
+    def display_initscript_spec (self,initscript):
+        print '* ======== initscript',initscript['initscript_fields']['name']
+
+    def display_key_spec (self,key):
+        print '* ======== key',key['name']
+
+    def display_slice_spec (self,slice):
+        print '* ======== slice',slice['slice_fields']['name']
+        for (k,v) in slice.iteritems():
+            if k=='nodenames':
+                if v: 
+                    print '*       nodes : ',
+                    for nodename in v:  
+                        print nodename,'',
+                    print ''
+            elif k=='usernames':
+                if v: 
+                    print '*       users : ',
+                    for username in v:  
+                        print username,'',
+                    print ''
+            elif k=='slice_fields':
+                print '*       fields',':',
+                print 'max_nodes=',v['max_nodes'],
+                print ''
+            else:
+                print '*       ',k,v
+
+    def display_node_spec (self,node):
+        print "*           node",node['name'],"host_box=",node['host_box'],
+        print "hostname=",node['node_fields']['hostname'],
+        print "ip=",node['interface_fields']['ip']
+    
+
+    # another entry point for just showing the boxes involved
+    def display_mapping (self):
+        TestPlc.display_mapping_plc(self.plc_spec)
         return True
 
-    def uninstall(self):
-        # if there's a chroot-based myplc running, and then a native-based myplc is being deployed
-        # it sounds safer to have the former uninstalled too
-        # now the vserver method cannot be invoked for chroot instances as vservername is required
-        if self.vserver:
-            self.uninstall_vserver()
-            self.uninstall_chroot()
-        else:
-            self.uninstall_chroot()
+    @staticmethod
+    def display_mapping_plc (plc_spec):
+        print '* MyPLC',plc_spec['name']
+        print '*\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
+        print '*\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
+        for site_spec in plc_spec['sites']:
+            for node_spec in site_spec['nodes']:
+                TestPlc.display_mapping_node(node_spec)
+
+    @staticmethod
+    def display_mapping_node (node_spec):
+        print '*   NODE %s'%(node_spec['name'])
+        print '*\tqemu box %s'%node_spec['host_box']
+        print '*\thostname=%s'%node_spec['node_fields']['hostname']
+
+    ### utility methods for handling the pool of IP addresses allocated to plcs
+    # Logic
+    # (*) running plcs are recorded in the file named ~/running-test-plcs
+    # (*) this file contains a line for each running plc, older first
+    # (*) each line contains the vserver name + the hostname of the (vserver) testbox where it sits
+    # (*) the free_tracker method performs a vserver stop on the oldest entry
+    # (*) the record_tracker method adds an entry at the bottom of the file
+    # (*) the cleanup_tracker method stops all known vservers and removes the tracker file
+
+    TRACKER_FILE=os.environ['HOME']+"/running-test-plcs"
+    # how many concurrent plcs are we keeping alive - adjust with the IP pool size
+    TRACKER_KEEP_VSERVERS = 12
+
+    def record_tracker (self):
+        try:
+            lines=file(TestPlc.TRACKER_FILE).readlines()
+        except:
+            lines=[]
+
+        this_line="%s %s\n"%(self.vservername,self.test_ssh.hostname)
+        for line in lines:
+            if line==this_line:
+                print 'this vserver is already included in %s'%TestPlc.TRACKER_FILE
+                return True
+        if self.options.dry_run:
+            print 'dry_run: record_tracker - skipping tracker update'
+            return True
+        tracker=file(TestPlc.TRACKER_FILE,"w")
+        for line in lines+[this_line]:
+            tracker.write(line)
+        tracker.close()
+        print "Recorded %s in running plcs on host %s"%(self.vservername,self.test_ssh.hostname)
         return True
 
-    ### install
-    def install_chroot(self):
-        # nothing to do
+    def free_tracker (self, keep_vservers=None):
+        if not keep_vservers: keep_vservers=TestPlc.TRACKER_KEEP_VSERVERS
+        try:
+            lines=file(TestPlc.TRACKER_FILE).readlines()
+        except:
+            print 'dry_run: free_tracker - skipping tracker update'
+            return True
+        how_many = len(lines) - keep_vservers
+        # nothing todo until we have more than keep_vservers in the tracker
+        if how_many <= 0:
+            print 'free_tracker : limit %d not reached'%keep_vservers
+            return True
+        to_stop = lines[:how_many]
+        to_keep = lines[how_many:]
+        for line in to_stop:
+            print '>%s<'%line
+            [vname,hostname]=line.split()
+            command=TestSsh(hostname).actual_command("vserver --silent %s stop"%vname)
+            utils.system(command)
+        if self.options.dry_run:
+            print 'dry_run: free_tracker would stop %d vservers'%len(to_stop)
+            for line in to_stop: print line,
+            print 'dry_run: free_tracker would keep %d vservers'%len(to_keep)
+            for line in to_keep: print line,
+            return True
+        print "Storing %d remaining vservers in %s"%(len(to_keep),TestPlc.TRACKER_FILE)
+        tracker=open(TestPlc.TRACKER_FILE,"w")
+        for line in to_keep:
+            tracker.write(line)
+        tracker.close()
+        return True
+
+    # this should/could stop only the ones in TRACKER_FILE if that turns out to be reliable
+    def cleanup_trackers (self):
+        try:
+            for line in TestPlc.TRACKER_FILE.readlines():
+                [vname,hostname]=line.split()
+                stop="vserver --silent %s stop"%vname
+                command=TestSsh(hostname).actual_command(stop)
+                utils.system(command)
+            clean_tracker = "rm -f %s"%TestPlc.TRACKER_FILE
+            utils.system(self.test_ssh.actual_command(clean_tracker))
+        except:
+            return True
+
+    # this should/could stop only the ones in TRACKER_FILE if that turns out to be reliable
+    def cleanup_all_trackers (self):
+        stop_all = "cd /vservers ; for i in * ; do vserver --silent $i stop ; done"
+        utils.system(self.test_ssh.actual_command(stop_all))
+        clean_tracker = "rm -f %s"%TestPlc.TRACKER_FILE
+        utils.system(self.test_ssh.actual_command(clean_tracker))
         return True
 
-    def install_vserver(self):
-        # we need build dir for vtest-init-vserver
+    def uninstall(self):
+        self.run_in_host("vserver --silent %s delete"%self.vservername)
+        return True
+
+    ### install
+    def install(self):
         if self.is_local():
             # a full path for the local calls
-            build_dir=os.path(sys.argv[0])+"/build"
+            build_dir=os.path.dirname(sys.argv[0])
+            # sometimes this is empty - set to "." in such a case
+            if not build_dir: build_dir="."
+            build_dir += "/build"
         else:
-            # use a standard name - will be relative to HOME 
-            build_dir="options.buildname"
+            # use a standard name - will be relative to remote buildname
+            build_dir="build"
        # run checkout in any case - would do an update if already exists
         build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
         if self.run_in_host(build_checkout) != 0:
             return False
-        # the repo url is taken from myplc-url 
-        # with the last two steps (i386/myplc...) removed
-        repo_url = self.options.myplc_url
-        for level in [ 'rpmname','arch' ]:
+        # the repo url is taken from arch-rpms-url 
+        # with the last step (i386.) removed
+        repo_url = self.options.arch_rpms_url
+        for level in [ 'arch' ]:
            repo_url = os.path.dirname(repo_url)
-        create_vserver="%s/vtest-init-vserver.sh %s %s -- --interface eth0:%s"%\
-            (build_dir,self.vservername,repo_url,self.vserverip)
+        # pass the vbuild-nightly options to vtest-init-vserver
+        test_env_options=""
+        test_env_options += " -p %s"%self.options.personality
+        test_env_options += " -d %s"%self.options.pldistro
+        test_env_options += " -f %s"%self.options.fcdistro
+        script="vtest-init-vserver.sh"
+        vserver_name = self.vservername
+        vserver_options="--netdev eth0 --interface %s"%self.vserverip
+        try:
+            vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
+            vserver_options += " --hostname %s"%vserver_hostname
+        except:
+            pass
+        create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
         return self.run_in_host(create_vserver) == 0
 
-    def install(self):
-        if self.vserver:
-            return self.install_vserver()
-        else:
-            return self.install_chroot()
-    
-    ### install_rpm - make this an optional step
-    def cache_rpm(self):
-        url = self.options.myplc_url
-        rpm = os.path.basename(url)
-        cache_fetch="pwd;if [ -f %(rpm)s ] ; then echo Using cached rpm %(rpm)s ; else echo Fetching %(url)s ; curl -O %(url)s; fi"%locals()
-       return self.run_in_host(cache_fetch)==0
-
-    def install_rpm_chroot(self):
-        url = self.options.myplc_url
-        rpm = os.path.basename(url)
-       if not self.cache_rpm():
-            return False
-       utils.header('Installing the :  %s'%rpm)
-        return self.run_in_host('rpm -Uvh '+rpm)==0 and self.run_in_host('service plc mount')==0
-
-    def install_rpm_vserver(self):
-        return self.run_in_guest("yum -y install myplc-native")==0
-
+    ### install_rpm 
     def install_rpm(self):
-        if self.vserver:
-            return self.install_rpm_vserver()
-        else:
-            return self.install_rpm_chroot()
+        return self.run_in_guest("yum -y install myplc-native")==0 \
+            and self.run_in_guest("yum -y install noderepo-%s-%s"%(self.options.pldistro,self.options.arch))==0
 
     ### 
     def configure(self):
@@ -334,22 +534,19 @@ class TestPlc:
         utils.system('rm %s'%tmpname)
         return True
 
-    # the chroot install is slightly different to this respect
     def start(self):
-        if self.vserver:
-            self.run_in_guest('service plc start')
-        else:
-            self.run_in_host('service plc start')
+        self.run_in_guest('service plc start')
         return True
-        
+
     def stop(self):
-        if self.vserver:
-            self.run_in_guest('service plc stop')
-        else:
-            self.run_in_host('service plc stop')
+        self.run_in_guest('service plc stop')
         return True
         
-    # could use a TestKey class
+    def vs_start (self):
+        self.start_guest()
+        return True
+
+    # stores the keys from the config for further use
     def store_keys(self):
         for key_spec in self.plc_spec['keys']:
                TestKey(self,key_spec).store_key()
@@ -358,6 +555,26 @@ class TestPlc:
     def clean_keys(self):
         utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
 
+    # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
+    # for later direct access to the nodes
+    def fetch_keys(self):
+        dir="./keys"
+        if not os.path.isdir(dir):
+            os.mkdir(dir)
+        vservername=self.vservername
+        overall=True
+        prefix = 'root_ssh_key'
+        for ext in [ 'pub', 'rsa' ] :
+            src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
+            dst="keys/%(vservername)s.%(ext)s"%locals()
+            if self.test_ssh.fetch(src,dst) != 0: overall=False
+        prefix = 'debug_ssh_key'
+        for ext in [ 'pub', 'rsa' ] :
+            src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
+            dst="keys/%(vservername)s-debug.%(ext)s"%locals()
+            if self.test_ssh.fetch(src,dst) != 0: overall=False
+        return overall
+
     def sites (self):
         return self.do_sites()
     
@@ -379,6 +596,13 @@ class TestPlc:
                 test_site.create_users()
         return True
 
+    def clean_all_sites (self):
+        print 'auth_root',self.auth_root()
+        site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
+        for site_id in site_ids:
+            print 'Deleting site_id',site_id
+            self.apiserver.DeleteSite(self.auth_root(),site_id)
+
     def nodes (self):
         return self.do_nodes()
     def clean_nodes (self):
@@ -401,9 +625,13 @@ class TestPlc:
                     test_node.create_node ()
         return True
 
-    # create nodegroups if needed, and populate
-    # no need for a clean_nodegroups if we are careful enough
     def nodegroups (self):
+        return self.do_nodegroups("add")
+    def clean_nodegroups (self):
+        return self.do_nodegroups("delete")
+
+    # create nodegroups if needed, and populate
+    def do_nodegroups (self, action="add"):
         # 1st pass to scan contents
         groups_dict = {}
         for site_spec in self.plc_spec['sites']:
@@ -419,14 +647,54 @@ class TestPlc:
                             groups_dict[nodegroupname]=[]
                         groups_dict[nodegroupname].append(test_node.name())
         auth=self.auth_root()
+        overall = True
         for (nodegroupname,group_nodes) in groups_dict.iteritems():
-            try:
-                self.apiserver.GetNodeGroups(auth,{'name':nodegroupname})[0]
-            except:
-                self.apiserver.AddNodeGroup(auth,{'name':nodegroupname})
-            for node in group_nodes:
-                self.apiserver.AddNodeToNodeGroup(auth,node,nodegroupname)
-        return True
+            if action == "add":
+                print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
+                # first, check if the nodetagtype is here
+                tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
+                if tag_types:
+                    tag_type_id = tag_types[0]['tag_type_id']
+                else:
+                    tag_type_id = self.apiserver.AddTagType(auth,
+                                                            {'tagname':nodegroupname,
+                                                             'description': 'for nodegroup %s'%nodegroupname,
+                                                             'category':'test',
+                                                             'min_role_id':10})
+                print 'located tag (type)',nodegroupname,'as',tag_type_id
+                # create nodegroup
+                nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
+                if not nodegroups:
+                    self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
+                    print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
+                # set node tag on all nodes, value='yes'
+                for nodename in group_nodes:
+                    try:
+                        self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
+                    except:
+                        traceback.print_exc()
+                        print 'node',nodename,'seems to already have tag',nodegroupname
+                    # check anyway
+                    try:
+                        expect_yes = self.apiserver.GetNodeTags(auth,
+                                                                {'hostname':nodename,
+                                                                 'tagname':nodegroupname},
+                                                                ['tagvalue'])[0]['tagvalue']
+                        if expect_yes != "yes":
+                            print 'Mismatch node tag on node',nodename,'got',expect_yes
+                            overall=False
+                    except:
+                        if not self.options.dry_run:
+                            print 'Cannot find tag',nodegroupname,'on node',nodename
+                            overall = False
+            else:
+                try:
+                    print 'cleaning nodegroup',nodegroupname
+                    self.apiserver.DeleteNodeGroup(auth,nodegroupname)
+                except:
+                    traceback.print_exc()
+                    overall=False
+        return overall
 
     def all_hostnames (self) :
         hostnames = []
@@ -435,14 +703,14 @@ class TestPlc:
                            for node_spec in site_spec['nodes'] ]
         return hostnames
 
-    # gracetime : during the first <gracetime> minutes nothing gets printed
-    def do_nodes_booted (self, minutes, gracetime=2):
+    # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
+    def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
         if self.options.dry_run:
             print 'dry_run'
             return True
         # compute timeout
-        timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
-        graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
+        timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
+        graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
         # the nodes that haven't checked yet - start with a full list and shrink over time
         tocheck = self.all_hostnames()
         utils.header("checking nodes %r"%tocheck)
@@ -455,21 +723,21 @@ class TestPlc:
             for array in tocheck_status:
                 hostname=array['hostname']
                 boot_state=array['boot_state']
-                if boot_state == 'boot':
-                    utils.header ("%s has reached the 'boot' state"%hostname)
+                if boot_state == target_boot_state:
+                    utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
                 else:
                     # if it's a real node, never mind
                     (site_spec,node_spec)=self.locate_hostname(hostname)
                     if TestNode.is_real_model(node_spec['node_fields']['model']):
                         utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
                         # let's cheat
-                        boot_state = 'boot'
-                    if datetime.datetime.now() > graceout:
+                        boot_state = target_boot_state
+                    elif datetime.datetime.now() > graceout:
                         utils.header ("%s still in '%s' state"%(hostname,boot_state))
                         graceout=datetime.datetime.now()+datetime.timedelta(1)
                 status[hostname] = boot_state
             # refresh tocheck
-            tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != 'boot' ]
+            tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
             if not tocheck:
                 return True
             if datetime.datetime.now() > timeout:
@@ -477,27 +745,47 @@ class TestPlc:
                     utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
                 return False
             # otherwise, sleep for a while
-            time.sleep(15)
+            time.sleep(period)
         # only useful in empty plcs
         return True
 
     def nodes_booted(self):
-        return self.do_nodes_booted(minutes=0)
-    
+        return self.nodes_check_boot_state('boot',timeout_minutes=20,silent_minutes=15)
 
-    def do_nodes_ssh(self,minutes):
+    def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=20):
         # compute timeout
-        timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
+        timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
+        graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
+        vservername=self.vservername
+        if debug: 
+            message="debug"
+            local_key = "keys/%(vservername)s-debug.rsa"%locals()
+        else: 
+            message="boot"
+            local_key = "keys/%(vservername)s.rsa"%locals()
         tocheck = self.all_hostnames()
-#        self.scan_publicKeys(tocheck)
-        utils.header("checking Connectivity on nodes %r"%tocheck)
+        utils.header("checking ssh access (expected in %s mode) to nodes %r"%(message,tocheck))
+        utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
+                         (timeout_minutes,silent_minutes,period))
         while tocheck:
             for hostname in tocheck:
-                # try to ssh in nodes
-                node_test_ssh = TestSsh (hostname,key="/etc/planetlab/root_ssh_key.rsa")
-                access=self.run_in_guest(node_test_ssh.actual_command("date"))
-                if not access:
-                    utils.header('The node %s is sshable -->'%hostname)
+                # try to run 'hostname' in the node
+                command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
+                # don't spam logs - show the command only after the grace period 
+                if datetime.datetime.now() > graceout:
+                    success=utils.system(command)
+                else:
+                    # truly silent, just print out a dot to show we're alive
+                    print '.',
+                    sys.stdout.flush()
+                    command += " 2>/dev/null"
+                    if self.options.dry_run:
+                        print 'dry_run',command
+                        success=0
+                    else:
+                        success=os.system(command)
+                if success==0:
+                    utils.header('Successfully entered root@%s (%s)'%(hostname,message))
                     # refresh tocheck
                     tocheck.remove(hostname)
                 else:
@@ -513,12 +801,15 @@ class TestPlc:
                     utils.header("FAILURE to ssh into %s"%hostname)
                 return False
             # otherwise, sleep for a while
-            time.sleep(15)
+            time.sleep(period)
         # only useful in empty plcs
         return True
         
-    def nodes_ssh(self):
-        return self.do_nodes_ssh(minutes=2)
+    def nodes_ssh_debug(self):
+        return self.check_nodes_ssh(debug=True,timeout_minutes=30,silent_minutes=10)
+    
+    def nodes_ssh_boot(self):
+        return self.check_nodes_ssh(debug=False,timeout_minutes=30,silent_minutes=10)
     
     @node_mapper
     def init_node (self): pass
@@ -526,7 +817,21 @@ class TestPlc:
     def bootcd (self): pass
     @node_mapper
     def configure_qemu (self): pass
+    @node_mapper
+    def reinstall_node (self): pass
+    @node_mapper
+    def export_qemu (self): pass
         
+    ### check sanity : invoke scripts from qaapi/qa/tests/{node,slice}
+    def check_sanity_node (self): 
+        return self.locate_first_node().check_sanity()
+    def check_sanity_sliver (self) : 
+        return self.locate_first_sliver().check_sanity()
+    
+    def check_sanity (self):
+        return self.check_sanity_node() and self.check_sanity_sliver()
+
+    ### initscripts
     def do_check_initscripts(self):
         overall = True
         for slice_spec in self.plc_spec['slices']:
@@ -553,6 +858,18 @@ class TestPlc:
             self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
         return True
 
+    def clean_initscripts (self):
+        for initscript in self.plc_spec['initscripts']:
+            initscript_name = initscript['initscript_fields']['name']
+            print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
+            try:
+                self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
+                print initscript_name,'deleted'
+            except:
+                print 'deletion went wrong - probably did not exist'
+        return True
+
+    ### manage slices
     def slices (self):
         return self.do_slices()
 
@@ -579,27 +896,8 @@ class TestPlc:
     @node_mapper
     def clear_known_hosts (self): pass
     
-    def start_nodes (self):
-        utils.header("Starting  nodes")
-        for site_spec in self.plc_spec['sites']:
-            TestSite(self,site_spec).start_nodes (self.options)
-        return True
-
-    def locate_first_sliver (self):
-        slice_spec = self.plc_spec['slices'][0]
-        slicename = slice_spec['slice_fields']['name']
-        nodename = slice_spec['nodenames'][0]
-        return self.locate_sliver_obj(nodename,slicename)
-
-    def locate_sliver_obj (self,nodename,slicename):
-        (site,node) = self.locate_node(nodename)
-        slice = self.locate_slice (slicename)
-        # build objects
-        test_site = TestSite (self, site)
-        test_node = TestNode (self, test_site,node)
-        # xxx the slice site is assumed to be the node site - mhh - probably harmless
-        test_slice = TestSlice (self, test_site, slice)
-        return TestSliver (self, test_node, test_slice)
+    @node_mapper
+    def start_node (self) : pass
 
     def check_tcp (self):
         specs = self.plc_spec['tcp_test']
@@ -617,16 +915,30 @@ class TestPlc:
             if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
                 overall=False
         return overall
-    
+
+    def plcsh_stress_test (self):
+        # install the stress-test in the plc image
+        location = "/usr/share/plc_api/plcsh-stress-test.py"
+        remote="/vservers/%s/%s"%(self.vservername,location)
+        self.test_ssh.copy_abs("plcsh-stress-test.py",remote)
+        command = location
+        command += " -- --check"
+        if self.options.size == 1:
+            command +=  " --tiny"
+        return ( self.run_in_guest(command) == 0)
 
     def gather_logs (self):
-        # (1) get the plc's /var/log and store it locally in logs/<plcname>-var-log/*
-        # (2) get all the nodes qemu log and store it as logs/<node>-qemu.log
-        # (3) get the nodes /var/log and store is as logs/<node>-var-log/*
-        # (4) as far as possible get the slice's /var/log as logs/<slice>-<node>-var-log/*
-        # (1)
+        # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
+        # (1.b) get the plc's  /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
+        # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
+        # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
+        # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
+        # (1.a)
         print "-------------------- TestPlc.gather_logs : PLC's /var/log"
         self.gather_var_logs ()
+        # (1.b)
+        print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
+        self.gather_pgsql_logs ()
         # (2) 
         print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
         for site_spec in self.plc_spec['sites']:
@@ -639,24 +951,29 @@ class TestPlc:
         self.gather_nodes_var_logs()
         # (4)
         print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
-        self.gather_first_sliver_logs()
+        self.gather_slivers_var_logs()
         return True
 
-    def gather_first_sliver_logs(self):
-        try:
-            test_sliver = self.locate_first_sliver()
+    def gather_slivers_var_logs(self):
+        for test_sliver in self.all_sliver_objs():
             remote = test_sliver.tar_var_logs()
-            utils.system("mkdir -p logs/%s-var-log"%test_sliver.name())
-            command = remote + " | tar -C logs/%s-var-log -xf -"%test_sliver.name()
+            utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
+            command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
             utils.system(command)
-        except Exception,e:
-            print 'Cannot locate first sliver - giving up',e
         return True
 
     def gather_var_logs (self):
+        utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
         to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")        
-        command = to_plc + "| tar -C logs/%s-var-log -xf -"%self.name()
-        utils.system("mkdir -p logs/%s-var-log"%self.name())
+        command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
+        utils.system(command)
+        command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
+        utils.system(command)
+
+    def gather_pgsql_logs (self):
+        utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
+        to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")        
+        command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
         utils.system(command)
 
     def gather_nodes_var_logs (self):
@@ -666,8 +983,8 @@ class TestPlc:
                 test_node=TestNode(self,test_site,node_spec)
                 test_ssh = TestSsh (test_node.name(),key="/etc/planetlab/root_ssh_key.rsa")
                 to_plc = self.actual_command_in_guest ( test_ssh.actual_command("tar -C /var/log -cf - ."))
-                command = to_plc + "| tar -C logs/%s-var-log -xf -"%test_node.name()
-                utils.system("mkdir -p logs/%s-var-log"%test_node.name())
+                command = to_plc + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
+                utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
                 utils.system(command)