bugfix
[tests.git] / system / TestPlc.py
index eb7a15e..f061eb6 100644 (file)
@@ -1,13 +1,13 @@
 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
 # Copyright (C) 2010 INRIA 
 #
-import os, os.path
-import datetime
-import time
 import sys
+import time
+import os, os.path
 import traceback
-from types import StringTypes
 import socket
+from datetime import datetime, timedelta
+from types import StringTypes
 
 import utils
 from TestSite import TestSite
@@ -19,7 +19,9 @@ from TestSliver import TestSliver
 from TestBoxQemu import TestBoxQemu
 from TestSsh import TestSsh
 from TestApiserver import TestApiserver
-from TestSliceSfa import TestSliceSfa
+from TestAuthSfa import TestAuthSfa
+from PlcapiUrlScanner import PlcapiUrlScanner
+from Completer import Completer, CompleterTask
 
 # step methods must take (self) and return a boolean (options is a member of the class)
 
@@ -45,7 +47,7 @@ def node_mapper (method):
             if not node_method(test_node, *args, **kwds): overall=False
         return overall
     # restore the doc text
-    actual.__doc__=method.__doc__
+    actual.__doc__=TestNode.__dict__[method.__name__].__doc__
     return actual
 
 def slice_mapper (method):
@@ -59,23 +61,65 @@ def slice_mapper (method):
             if not slice_method(test_slice,self.options): overall=False
         return overall
     # restore the doc text
-    actual.__doc__=method.__doc__
+    actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
     return actual
 
-def slice_sfa_mapper (method):
+# run a step but return True so that we can go on
+def ignore_result (method):
+    def wrappee (self):
+        # ssh_slice_ignore->ssh_slice
+        ref_name=method.__name__.replace('_ignore','').replace('force_','')
+        ref_method=TestPlc.__dict__[ref_name]
+        result=ref_method(self)
+        print "Actual (but ignored) result for %(ref_name)s is %(result)s"%locals()
+        return Ignored (result)
+    wrappee.__doc__="ignored version of " + method.__name__.replace('_ignore','').replace('ignore_','')
+    return wrappee
+
+# a variant that expects the TestSlice method to return a list of CompleterTasks that
+# are then merged into a single Completer run to avoid wating for all the slices
+# esp. useful when a test fails of course
+# because we need to pass arguments we use a class instead..
+class slice_mapper__tasks (object):
+    # could not get this to work with named arguments
+    def __init__ (self,timeout_minutes,silent_minutes,period_seconds):
+        self.timeout=timedelta(minutes=timeout_minutes)
+        self.silent=timedelta(minutes=silent_minutes)
+        self.period=timedelta(seconds=period_seconds)
+    def __call__ (self, method):
+        decorator_self=self
+        # compute augmented method name
+        method_name = method.__name__ + "__tasks"
+        # locate in TestSlice
+        slice_method = TestSlice.__dict__[ method_name ]
+        def wrappee(self):
+            tasks=[]
+            for slice_spec in self.plc_spec['slices']:
+                site_spec = self.locate_site (slice_spec['sitename'])
+                test_site = TestSite(self,site_spec)
+                test_slice=TestSlice(self,test_site,slice_spec)
+                tasks += slice_method (test_slice, self.options)
+            return Completer (tasks).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
+        # restore the doc text from the TestSlice method even if a bit odd
+        wrappee.__doc__ = slice_method.__doc__
+        return wrappee
+
+def auth_sfa_mapper (method):
     def actual(self):
         overall=True
-        slice_method = TestSliceSfa.__dict__[method.__name__]
-        for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
-            site_spec = self.locate_site (slice_spec['sitename'])
-            test_site = TestSite(self,site_spec)
-            test_slice=TestSliceSfa(self,test_site,slice_spec)
-            if not slice_method(test_slice,self.options): overall=False
+        auth_method = TestAuthSfa.__dict__[method.__name__]
+        for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
+            test_auth=TestAuthSfa(self,auth_spec)
+            if not auth_method(test_auth,self.options): overall=False
         return overall
     # restore the doc text
-    actual.__doc__=method.__doc__
+    actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
     return actual
 
+class Ignored:
+    def __init__ (self,result):
+        self.result=result
+
 SEP='<sep>'
 SEPSFA='<sep_sfa>'
 
@@ -86,20 +130,29 @@ class TestPlc:
         'vs_delete','timestamp_vs','vs_create', SEP,
         'plc_install', 'plc_configure', 'plc_start', SEP,
         'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
+        'plcapi_urls','speed_up_slices', SEP,
         'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
+# slices created under plcsh interactively seem to be fine but these ones don't have the tags
+# keep this our of the way for now
+#        'check_vsys_defaults', SEP,
         'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
-        'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
+        'qemu_kill_mine','qemu_clean_mine', 'qemu_export', 'qemu_start', 'timestamp_qemu', SEP,
         'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
-        'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
-        'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA, 
-        'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
+        'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
+        'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_renew_slice@1', SEPSFA,
+        'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
+        'sfi_list@1', 'sfi_show@1', 'sfa_utest@1', SEPSFA,
         # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
         # but as the stress test might take a while, we sometimes missed the debug mode..
         'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
-        'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
+        'ssh_node_boot@1', 'node_bmlogs@1', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts_ignore', SEP,
         'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
-        'check_tcp', 'check_sys_slice', SEP,
-        'force_gather_logs', SEP,
+        'cross_check_tcp@1', 'check_system_slice', SEP,
+        # check slices are turned off properly
+        'empty_slices', 'ssh_slice_off', SEP,
+        # check they are properly re-created with the same name
+        'fill_slices', 'ssh_slice_again_ignore', SEP,
+        'gather_logs_force', SEP,
         ]
     other_steps = [ 
         'export', 'show_boxes', SEP,
@@ -107,13 +160,15 @@ class TestPlc:
         'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
         'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
         'delete_leases', 'list_leases', SEP,
-        'populate' , SEP,
+        'populate', SEP,
         'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
         'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
        'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
         'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
         'plc_db_dump' , 'plc_db_restore', SEP,
-        'standby_1_through_20',SEP,
+        'check_netflow','check_drl', SEP,
+        'debug_nodemanager', SEP,
+        'standby_1_through_20','yes','no',SEP,
         ]
 
     @staticmethod
@@ -125,15 +180,20 @@ class TestPlc:
         return step != SEP and step != SEPSFA
 
     # turn off the sfa-related steps when build has skipped SFA
-    # this is originally for centos5 as recent SFAs won't build on this platform
+    # this was originally for centos5 but is still valid
+    # for up to f12 as recent SFAs with sqlalchemy won't build before f14
     @staticmethod
     def check_whether_build_has_sfa (rpms_url):
+        utils.header ("Checking if build provides SFA package...")
         # warning, we're now building 'sface' so let's be a bit more picky
         retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
         # full builds are expected to return with 0 here
-        if retcod!=0:
+        if retcod==0:
+            utils.header("build does provide SFA")
+        else:
             # move all steps containing 'sfa' from default_steps to other_steps
-            sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
+            utils.header("SFA package not found - removing steps with sfa or sfi")
+            sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
             TestPlc.other_steps += sfa_steps
             for step in sfa_steps: TestPlc.default_steps.remove(step)
 
@@ -145,6 +205,8 @@ class TestPlc:
         self.vservername=plc_spec['vservername']
         self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
        self.apiserver=TestApiserver(self.url,options.dry_run)
+        (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
+        (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
         
     def has_addresses_api (self):
         return self.apiserver.has_method('AddIpAddress')
@@ -164,57 +226,58 @@ class TestPlc:
     def connect (self):
        pass
 
-    def actual_command_in_guest (self,command):
-        return self.test_ssh.actual_command(self.host_to_guest(command))
+    def actual_command_in_guest (self,command, backslash=False):
+        raw1=self.host_to_guest(command)
+        raw2=self.test_ssh.actual_command(raw1,dry_run=self.options.dry_run, backslash=backslash)
+        return raw2
     
     def start_guest (self):
-      return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
+      return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),dry_run=self.options.dry_run))
     
     def stop_guest (self):
-      return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
+      return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),dry_run=self.options.dry_run))
     
-    def run_in_guest (self,command):
-        return utils.system(self.actual_command_in_guest(command))
+    def run_in_guest (self,command,backslash=False):
+        raw=self.actual_command_in_guest(command,backslash)
+        return utils.system(raw)
     
     def run_in_host (self,command):
-        return self.test_ssh.run_in_buildname(command)
+        return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
 
+    # backslashing turned out so awful at some point that I've turned off auto-backslashing
+    # see e.g. plc_start esp. the version for f14
     #command gets run in the plc's vm
     def host_to_guest(self,command):
-        if self.options.plcs_use_lxc:
-            return "ssh -o StrictHostKeyChecking=no %s %s"%(self.vserverip,command)
+        # f14 still needs some extra help
+        if self.options.fcdistro == 'f14':
+            raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %s" %(self.vservername,command)
         else:
-            return "vserver %s exec %s"%(self.vservername,command)
+            raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env %s" %(self.vservername,command)
+        return raw
     
+    # this /vservers thing is legacy...
     def vm_root_in_host(self):
-        if self.options.plcs_use_lxc:
-            return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
-        else:
-            return "/vservers/%s"%(self.vservername)
+        return "/vservers/%s/"%(self.vservername)
 
     def vm_timestamp_path (self):
-        if self.options.plcs_use_lxc:
-            return "/var/lib/lxc/%s/%s.timestamp"%(self.vservername,self.vservername)
-        else:
-            return "/vservers/%s.timestamp"%(self.vservername)
+        return "/vservers/%s/%s.timestamp"%(self.vservername,self.vservername)
 
     #start/stop the vserver
     def start_guest_in_host(self):
-        if self.options.plcs_use_lxc:
-            return "lxc-start --daemon --name=%s"%(self.vservername)
-        else:
-            return "vserver %s start"%(self.vservername)
+        return "virsh -c lxc:/// start %s"%(self.vservername)
     
     def stop_guest_in_host(self):
-        if self.options.plcs_use_lxc:
-            return "lxc-stop --name=%s"%(self.vservername)
-        else:
-            return "vserver %s stop"%(self.vservername)
+        return "virsh -c lxc:/// destroy %s"%(self.vservername)
     
     # xxx quick n dirty
     def run_in_guest_piped (self,local,remote):
         return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
 
+    def yum_check_installed (self, rpms):
+        if isinstance (rpms, list): 
+            rpms=" ".join(rpms)
+        return self.run_in_guest("rpm -q %s"%rpms)==0
+        
     # does a yum install in the vs, ignore yum retcod, check with rpm
     def yum_install (self, rpms):
         if isinstance (rpms, list): 
@@ -222,7 +285,7 @@ class TestPlc:
         self.run_in_guest("yum -y install %s"%rpms)
         # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
         self.run_in_guest("yum-complete-transaction -y")
-        return self.run_in_guest("rpm -q %s"%rpms)==0
+        return self.yum_check_installed (rpms)
 
     def auth_root (self):
        return {'Username':self.plc_spec['PLC_ROOT_USER'],
@@ -252,11 +315,24 @@ class TestPlc:
                     return (site,node)
         raise Exception,"Cannot locate hostname %s"%hostname
         
-    def locate_key (self,keyname):
+    def locate_key (self,key_name):
         for key in self.plc_spec['keys']:
-            if key['name'] == keyname:
+            if key['key_name'] == key_name:
                 return key
-        raise Exception,"Cannot locate key %s"%keyname
+        raise Exception,"Cannot locate key %s"%key_name
+
+    def locate_private_key_from_key_names (self, key_names):
+        # locate the first avail. key
+        found=False
+        for key_name in key_names:
+            key_spec=self.locate_key(key_name)
+            test_key=TestKey(self,key_spec)
+            publickey=test_key.publicpath()
+            privatekey=test_key.privatepath()
+            if os.path.isfile(publickey) and os.path.isfile(privatekey):
+                found=True
+        if found: return privatekey
+        else:     return None
 
     def locate_slice (self, slicename):
         for slice in self.plc_spec['slices']:
@@ -296,7 +372,7 @@ class TestPlc:
         return self.locate_sliver_obj(nodename,slicename)
 
     # all different hostboxes used in this plc
-    def gather_hostBoxes(self):
+    def get_BoxNodes(self):
         # maps on sites and nodes, return [ (host_box,test_node) ]
         tuples=[]
         for site_spec in self.plc_spec['sites']:
@@ -317,7 +393,7 @@ class TestPlc:
     # a step for checking this stuff
     def show_boxes (self):
         'print summary of nodes location'
-        for (box,nodes) in self.gather_hostBoxes().iteritems():
+        for (box,nodes) in self.get_BoxNodes().iteritems():
             print box,":"," + ".join( [ node.name() for node in nodes ] )
         return True
 
@@ -325,7 +401,7 @@ class TestPlc:
     def qemu_kill_all(self):
         'kill all qemu instances on the qemu boxes involved by this setup'
         # this is the brute force version, kill all qemus on that host box
-        for (box,nodes) in self.gather_hostBoxes().iteritems():
+        for (box,nodes) in self.get_BoxNodes().iteritems():
             # pass the first nodename, as we don't push template-qemu on testboxes
             nodedir=nodes[0].nodedir()
             TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
@@ -334,24 +410,33 @@ class TestPlc:
     # make this a valid step
     def qemu_list_all(self):
         'list all qemu instances on the qemu boxes involved by this setup'
-        for (box,nodes) in self.gather_hostBoxes().iteritems():
+        for (box,nodes) in self.get_BoxNodes().iteritems():
             # this is the brute force version, kill all qemus on that host box
             TestBoxQemu(box,self.options.buildname).qemu_list_all()
         return True
 
-    # kill only the right qemus
+    # kill only the qemus related to this test
     def qemu_list_mine(self):
         'list qemu instances for our nodes'
-        for (box,nodes) in self.gather_hostBoxes().iteritems():
+        for (box,nodes) in self.get_BoxNodes().iteritems():
             # the fine-grain version
             for node in nodes:
                 node.list_qemu()
         return True
 
+    # kill only the qemus related to this test
+    def qemu_clean_mine(self):
+        'cleanup (rm -rf) qemu instances for our nodes'
+        for (box,nodes) in self.get_BoxNodes().iteritems():
+            # the fine-grain version
+            for node in nodes:
+                node.qemu_clean()
+        return True
+
     # kill only the right qemus
     def qemu_kill_mine(self):
         'kill the qemu instances for our nodes'
-        for (box,nodes) in self.gather_hostBoxes().iteritems():
+        for (box,nodes) in self.get_BoxNodes().iteritems():
             # the fine-grain version
             for node in nodes:
                 node.kill_qemu()
@@ -364,16 +449,20 @@ class TestPlc:
         self.show_pass (2)
         return True
 
+    # uggly hack to make sure 'run export' only reports about the 1st plc 
+    # to avoid confusion - also we use 'inri_slice1' in various aliases..
+    exported_id=1
     def export (self):
         "print cut'n paste-able stuff to export env variables to your shell"
         # guess local domain from hostname
+        if TestPlc.exported_id>1: 
+            print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
+            return True
+        TestPlc.exported_id+=1
         domain=socket.gethostname().split('.',1)[1]
         fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
         print "export BUILD=%s"%self.options.buildname
-        if self.options.plcs_use_lxc:
-            print "export PLCHOSTLXC=%s"%fqdn
-        else:
-            print "export PLCHOSTVS=%s"%fqdn
+        print "export PLCHOSTLXC=%s"%fqdn
         print "export GUESTNAME=%s"%self.plc_spec['vservername']
         vplcname=self.plc_spec['vservername'].split('-')[-1]
         print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
@@ -435,7 +524,7 @@ class TestPlc:
         print '+ ======== initscript',initscript['initscript_fields']['name']
 
     def display_key_spec (self,key):
-        print '+ ======== key',key['name']
+        print '+ ======== key',key['key_name']
 
     def display_slice_spec (self,slice):
         print '+ ======== slice',slice['slice_fields']['name']
@@ -490,6 +579,7 @@ class TestPlc:
     # write a timestamp in /vservers/<>.timestamp
     # cannot be inside the vserver, that causes vserver .. build to cough
     def timestamp_vs (self):
+        "Create a timestamp to remember creation date for this plc"
         now=int(time.time())
         # TODO-lxc check this one
         # a first approx. is to store the timestamp close to the VM root like vs does
@@ -504,13 +594,10 @@ class TestPlc:
         "vserver delete the test myplc"
         stamp_path=self.vm_timestamp_path()
         self.run_in_host("rm -f %s"%stamp_path)
-        if self.options.plcs_use_lxc:
-            self.run_in_host("lxc-stop --name %s"%self.vservername)
-            self.run_in_host("lxc-destroy --name %s"%self.vservername)
-            return True
-        else:
-            self.run_in_host("vserver --silent %s delete"%self.vservername)
-            return True
+        self.run_in_host("virsh -c lxc:// destroy %s"%self.vservername)
+        self.run_in_host("virsh -c lxc:// undefine %s"%self.vservername)
+        self.run_in_host("rm -fr /vservers/%s"%self.vservername)
+        return True
 
     ### install
     # historically the build was being fetched by the tests
@@ -536,25 +623,24 @@ class TestPlc:
         repo_url = self.options.arch_rpms_url
         for level in [ 'arch' ]:
            repo_url = os.path.dirname(repo_url)
-        # pass the vbuild-nightly options to vtest-init-vserver
-        test_env_options=""
-        test_env_options += " -p %s"%self.options.personality
-        test_env_options += " -d %s"%self.options.pldistro
-        test_env_options += " -f %s"%self.options.fcdistro
-        if self.options.plcs_use_lxc:
-            script="vtest-init-lxc.sh"
-        else:
-            script="vtest-init-vserver.sh"
+
+        # invoke initvm (drop support for vs)
+        script="lbuild-initvm.sh"
+        script_options=""
+        # pass the vbuild-nightly options to [lv]test-initvm
+        script_options += " -p %s"%self.options.personality
+        script_options += " -d %s"%self.options.pldistro
+        script_options += " -f %s"%self.options.fcdistro
+        script_options += " -r %s"%repo_url
         vserver_name = self.vservername
-        vserver_options="--netdev eth0 --interface %s"%self.vserverip
         try:
             vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
-            vserver_options += " --hostname %s"%vserver_hostname
+            script_options += " -n %s"%vserver_hostname
         except:
             print "Cannot reverse lookup %s"%self.vserverip
             print "This is considered fatal, as this might pollute the test results"
             return False
-        create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
+        create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s"%locals()
         return self.run_in_host(create_vserver) == 0
 
     ### install_rpm 
@@ -582,6 +668,11 @@ class TestPlc:
         pkgs_string=" ".join(pkgs_list)
         return self.yum_install (pkgs_list)
 
+    ###
+    def mod_python(self):
+        """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
+        return self.yum_install ( [ 'mod_python' ] )
+
     ### 
     def plc_configure(self):
         "run plc-config-tty"
@@ -604,6 +695,7 @@ class TestPlc:
                      'PLC_RESERVATION_GRANULARITY',
                      'PLC_OMF_ENABLED',
                      'PLC_OMF_XMPP_SERVER',
+                     'PLC_VSYS_DEFAULTS',
                      ]:
             fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
         fileconf.write('w\n')
@@ -614,16 +706,34 @@ class TestPlc:
         utils.system('rm %s'%tmpname)
         return True
 
+# f14 is a bit odd in this respect, although this worked fine in guests up to f18
+# however using a vplc guest under f20 requires this trick
+# the symptom is this: service plc start
+# Starting plc (via systemctl):  Failed to get D-Bus connection: \
+#    Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
+# weird thing is the doc says f14 uses upstart by default and not systemd
+# so this sounds kind of harmless
+    def start_service (self,service): return self.start_stop_service (service,'start')
+    def stop_service  (self,service): return self.start_stop_service (service,'stop')
+
+    def start_stop_service (self, service,start_or_stop):
+        "utility to start/stop a service with the special trick for f14"
+        if self.options.fcdistro != 'f14':
+            return self.run_in_guest ("service %s %s"%(service,start_or_stop))==0
+        else:
+            # patch /sbin/service so it does not reset environment
+            self.run_in_guest ('sed -i -e \\"s,env -i,env,\\" /sbin/service')
+            # this is because our own scripts in turn call service 
+            return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service %s %s"%(service,start_or_stop))==0
+
     def plc_start(self):
         "service plc start"
-        self.run_in_guest('service plc start')
-        return True
+        return self.start_service ('plc')
 
     def plc_stop(self):
         "service plc stop"
-        self.run_in_guest('service plc stop')
-        return True
-        
+        return self.stop_service ('plc')
+
     def vs_start (self):
         "start the PLC vserver"
         self.start_guest()
@@ -689,8 +799,11 @@ class TestPlc:
     def delete_all_sites (self):
         "Delete all sites in PLC, and related objects"
         print 'auth_root',self.auth_root()
-        site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
-        for site_id in site_ids:
+        sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
+        for site in sites:
+            # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
+            if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
+            site_id=site['site_id']
             print 'Deleting site_id',site_id
             self.apiserver.DeleteSite(self.auth_root(),site_id)
         return True
@@ -881,139 +994,103 @@ class TestPlc:
         return res
 
     # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
-    def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
+    def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
         if self.options.dry_run:
             print 'dry_run'
             return True
-        # compute timeout
-        timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
-        graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
+
+        class CompleterTaskBootState (CompleterTask):
+            def __init__ (self, test_plc,hostname):
+                self.test_plc=test_plc
+                self.hostname=hostname
+                self.last_boot_state='undef'
+            def actual_run (self):
+                try:
+                    node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
+                                                               ['boot_state'])[0]
+                    self.last_boot_state = node['boot_state'] 
+                    return self.last_boot_state == target_boot_state
+                except:
+                    return False
+            def message (self):
+                return "CompleterTaskBootState with node %s"%self.hostname
+            def failure_message (self):
+                return "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
+                
+        timeout = timedelta(minutes=timeout_minutes)
+        graceout = timedelta(minutes=silent_minutes)
+        period   = timedelta(seconds=period_seconds)
         # the nodes that haven't checked yet - start with a full list and shrink over time
-        tocheck = self.all_hostnames()
-        utils.header("checking nodes %r"%tocheck)
-        # create a dict hostname -> status
-        status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
-        while tocheck:
-            # get their status
-            tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
-            # update status
-            for array in tocheck_status:
-                hostname=array['hostname']
-                boot_state=array['boot_state']
-                if boot_state == target_boot_state:
-                    utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
-                else:
-                    # if it's a real node, never mind
-                    (site_spec,node_spec)=self.locate_hostname(hostname)
-                    if TestNode.is_real_model(node_spec['node_fields']['model']):
-                        utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
-                        # let's cheat
-                        boot_state = target_boot_state
-                    elif datetime.datetime.now() > graceout:
-                        utils.header ("%s still in '%s' state"%(hostname,boot_state))
-                        graceout=datetime.datetime.now()+datetime.timedelta(1)
-                status[hostname] = boot_state
-            # refresh tocheck
-            tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
-            if not tocheck:
-                return True
-            if datetime.datetime.now() > timeout:
-                for hostname in tocheck:
-                    utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
-                return False
-            # otherwise, sleep for a while
-            time.sleep(period)
-        # only useful in empty plcs
-        return True
+        utils.header("checking nodes boot state (expected %s)"%target_boot_state)
+        tasks = [ CompleterTaskBootState (self,hostname) \
+                      for (hostname,_) in self.all_node_infos() ]
+        return Completer (tasks).run (timeout, graceout, period)
 
     def nodes_booted(self):
         return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
 
-    def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
-        # compute timeout
-        timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
-        graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
+    def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
+        class CompleterTaskNodeSsh (CompleterTask):
+            def __init__ (self, hostname, qemuname, boot_state, local_key):
+                self.hostname=hostname
+                self.qemuname=qemuname
+                self.boot_state=boot_state
+                self.local_key=local_key
+            def run (self, silent):
+                command = TestSsh (self.hostname,key=self.local_key).actual_command("hostname;uname -a")
+                return utils.system (command, silent=silent)==0
+            def failure_message (self):
+                return "Cannot reach %s @ %s in %s mode"%(self.hostname, self.qemuname, self.boot_state)
+
+        # various delays 
+        timeout  = timedelta(minutes=timeout_minutes)
+        graceout = timedelta(minutes=silent_minutes)
+        period   = timedelta(seconds=period_seconds)
         vservername=self.vservername
         if debug: 
             message="debug"
             local_key = "keys/%(vservername)s-debug.rsa"%locals()
         else: 
             message="boot"
-           local_key = "keys/key1.rsa"
+           local_key = "keys/key_admin.rsa"
+        utils.header("checking ssh access to nodes (expected in %s mode)"%message)
         node_infos = self.all_node_infos()
-        utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
-        for (nodename,qemuname) in node_infos:
-            utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
-        utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
-                         (timeout_minutes,silent_minutes,period))
-        while node_infos:
-            for node_info in node_infos:
-                (hostname,qemuname) = node_info
-                # try to run 'hostname' in the node
-                command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
-                # don't spam logs - show the command only after the grace period 
-                success = utils.system ( command, silent=datetime.datetime.now() < graceout)
-                if success==0:
-                    utils.header('Successfully entered root@%s (%s)'%(hostname,message))
-                    # refresh node_infos
-                    node_infos.remove(node_info)
-                else:
-                    # we will have tried real nodes once, in case they're up - but if not, just skip
-                    (site_spec,node_spec)=self.locate_hostname(hostname)
-                    if TestNode.is_real_model(node_spec['node_fields']['model']):
-                        utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
-                       node_infos.remove(node_info)
-            if  not node_infos:
-                return True
-            if datetime.datetime.now() > timeout:
-                for (hostname,qemuname) in node_infos:
-                    utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
-                return False
-            # otherwise, sleep for a while
-            time.sleep(period)
-        # only useful in empty plcs
-        return True
+        tasks = [ CompleterTaskNodeSsh (nodename, qemuname, message, local_key) \
+                      for (nodename,qemuname) in node_infos ]
+        return Completer (tasks).run (timeout, graceout, period)
         
     def ssh_node_debug(self):
         "Tries to ssh into nodes in debug mode with the debug ssh key"
-        return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=8)
+        return self.check_nodes_ssh(debug=True,
+                                    timeout_minutes=self.ssh_node_debug_timeout,
+                                    silent_minutes=self.ssh_node_debug_silent)
     
     def ssh_node_boot(self):
         "Tries to ssh into nodes in production mode with the root ssh key"
-        return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38)
+        return self.check_nodes_ssh(debug=False,
+                                    timeout_minutes=self.ssh_node_boot_timeout,
+                                    silent_minutes=self.ssh_node_boot_silent)
+
+    def node_bmlogs(self):
+        "Checks that there's a non-empty dir. /var/log/bm/raw"
+        return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
     
     @node_mapper
-    def qemu_local_init (self): 
-        "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
-        pass
+    def qemu_local_init (self): pass
     @node_mapper
-    def bootcd (self): 
-        "all nodes: invoke GetBootMedium and store result locally"
-        pass
+    def bootcd (self): pass
     @node_mapper
-    def qemu_local_config (self): 
-        "all nodes: compute qemu config qemu.conf and store it locally"
-        pass
+    def qemu_local_config (self): pass
     @node_mapper
-    def nodestate_reinstall (self): 
-        "all nodes: mark PLCAPI boot_state as reinstall"
-        pass
+    def nodestate_reinstall (self): pass
     @node_mapper
-    def nodestate_safeboot (self): 
-        "all nodes: mark PLCAPI boot_state as safeboot"
-        pass
+    def nodestate_safeboot (self): pass
     @node_mapper
-    def nodestate_boot (self): 
-        "all nodes: mark PLCAPI boot_state as boot"
-        pass
+    def nodestate_boot (self): pass
     @node_mapper
-    def nodestate_show (self): 
-        "all nodes: show PLCAPI boot_state"
-        pass
+    def nodestate_show (self): pass
     @node_mapper
-    def qemu_export (self): 
-        "all nodes: push local node-dep directory on the qemu box"
-        pass
+    def qemu_export (self): pass
         
     ### check hooks : invoke scripts from hooks/{node,slice}
     def check_hooks_node (self): 
@@ -1027,21 +1104,33 @@ class TestPlc:
 
     ### initscripts
     def do_check_initscripts(self):
-        overall = True
+        class CompleterTaskInitscript (CompleterTask):
+            def __init__ (self, test_sliver, stamp):
+                self.test_sliver=test_sliver
+                self.stamp=stamp
+            def actual_run (self):
+                return self.test_sliver.check_initscript_stamp (self.stamp)
+            def message (self):
+                return "initscript checker for %s"%self.test_sliver.name()
+            def failure_message (self):
+                return "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
+            
+        tasks=[]
         for slice_spec in self.plc_spec['slices']:
             if not slice_spec.has_key('initscriptstamp'):
                 continue
             stamp=slice_spec['initscriptstamp']
+            slicename=slice_spec['slice_fields']['name']
             for nodename in slice_spec['nodenames']:
+                print 'nodename',nodename,'slicename',slicename,'stamp',stamp
                 (site,node) = self.locate_node (nodename)
                 # xxx - passing the wrong site - probably harmless
                 test_site = TestSite (self,site)
                 test_slice = TestSlice (self,test_site,slice_spec)
                 test_node = TestNode (self,test_site,node)
                 test_sliver = TestSliver (self, test_node, test_slice)
-                if not test_sliver.check_initscript_stamp(stamp):
-                    overall = False
-        return overall
+                tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
+        return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
            
     def check_initscripts(self):
         "check that the initscripts have triggered"
@@ -1069,91 +1158,158 @@ class TestPlc:
     ### manage slices
     def slices (self):
         "create slices with PLCAPI"
-        return self.do_slices()
+        return self.do_slices(action="add")
 
     def delete_slices (self):
         "delete slices with PLCAPI"
-        return self.do_slices("delete")
+        return self.do_slices(action="delete")
+
+    def fill_slices (self):
+        "add nodes in slices with PLCAPI"
+        return self.do_slices(action="fill")
+
+    def empty_slices (self):
+        "remove nodes from slices with PLCAPI"
+        return self.do_slices(action="empty")
 
     def do_slices (self,  action="add"):
         for slice in self.plc_spec['slices']:
             site_spec = self.locate_site (slice['sitename'])
             test_site = TestSite(self,site_spec)
             test_slice=TestSlice(self,test_site,slice)
-            if action != "add":
-                utils.header("Deleting slices in site %s"%test_site.name())
+            if action == "delete":
                 test_slice.delete_slice()
-            else:    
-                utils.pprint("Creating slice",slice)
+            elif action=="fill":
+                test_slice.add_nodes()
+            elif action=="empty":
+                test_slice.delete_nodes()
+            else:
                 test_slice.create_slice()
-                utils.header('Created Slice %s'%slice['slice_fields']['name'])
         return True
         
+    @slice_mapper__tasks(20,10,15)
+    def ssh_slice(self): pass
+    @slice_mapper__tasks(20,19,15)
+    def ssh_slice_off (self): pass
+
+    # use another name so we can exclude/ignore it from the tests on the nightly command line
+    def ssh_slice_again(self): return self.ssh_slice()
+    # note that simply doing ssh_slice_again=ssh_slice would kind od work too
+    # but for some reason the ignore-wrapping thing would not
+
     @slice_mapper
-    def ssh_slice(self): 
-        "tries to ssh-enter the slice with the user key, to ensure slice creation"
-        pass
+    def ssh_slice_basics(self): pass
+
+    @slice_mapper
+    def check_vsys_defaults(self): pass
 
     @node_mapper
-    def keys_clear_known_hosts (self): 
-        "remove test nodes entries from the local known_hosts file"
-        pass
+    def keys_clear_known_hosts (self): pass
     
+    def plcapi_urls (self):
+        return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
+
+    def speed_up_slices (self):
+        "tweak nodemanager settings on all nodes using a conf file"
+        # create the template on the server-side 
+        template="%s.nodemanager"%self.name()
+        template_file = open (template,"w")
+        template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
+        template_file.close()
+        in_vm="/var/www/html/PlanetLabConf/nodemanager"
+        remote="%s/%s"%(self.vm_root_in_host(),in_vm)
+        self.test_ssh.copy_abs(template,remote)
+        # Add a conf file
+        self.apiserver.AddConfFile (self.auth_root(),
+                                    {'dest':'/etc/sysconfig/nodemanager',
+                                     'source':'PlanetLabConf/nodemanager',
+                                     'postinstall_cmd':'service nm restart',})
+        return True
+
+    def debug_nodemanager (self):
+        "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
+        template="%s.nodemanager"%self.name()
+        template_file = open (template,"w")
+        template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
+        template_file.close()
+        in_vm="/var/www/html/PlanetLabConf/nodemanager"
+        remote="%s/%s"%(self.vm_root_in_host(),in_vm)
+        self.test_ssh.copy_abs(template,remote)
+        return True
+
     @node_mapper
-    def qemu_start (self) : 
-        "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
-        pass
+    def qemu_start (self) : pass
 
     @node_mapper
-    def timestamp_qemu (self) : 
-        "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
-        pass
+    def timestamp_qemu (self) : pass
 
-    def check_tcp (self):
+    # when a spec refers to a node possibly on another plc
+    def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
+        for plc in [ self ] + other_plcs:
+            try:
+                return plc.locate_sliver_obj (nodename, slicename)
+            except:
+                pass
+        raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
+
+    # implement this one as a cross step so that we can take advantage of different nodes
+    # in multi-plcs mode
+    def cross_check_tcp (self, other_plcs):
         "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
-        specs = self.plc_spec['tcp_test']
+        if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']: 
+            utils.header ("check_tcp: no/empty config found")
+            return True
+        specs = self.plc_spec['tcp_specs']
         overall=True
         for spec in specs:
             port = spec['port']
             # server side
-            s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
-            if not s_test_sliver.run_tcp_server(port,timeout=10):
+            s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
+            if not s_test_sliver.run_tcp_server(port,timeout=20):
                 overall=False
                 break
 
             # idem for the client side
-            c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
-            if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
+            c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
+            # use nodename from locatesd sliver, unless 'client_connect' is set
+            if 'client_connect' in spec:
+                destination = spec['client_connect']
+            else:
+                destination=s_test_sliver.test_node.name()
+            if not c_test_sliver.run_tcp_client(destination,port):
                 overall=False
         return overall
 
     # painfully enough, we need to allow for some time as netflow might show up last
-    def check_sys_slice (self): 
+    def check_system_slice (self): 
         "all nodes: check that a system slice is alive"
-# would probably make more sense to check for netflow, 
-# but that one is currently not working in the lxc distro        
-#        return self.check_systemslice ('netflow')
-        return self.check_systemslice ('drl')
+        # netflow currently not working in the lxc distro
+        # drl not built at all in the wtx distro
+        # if we find either of them we're happy
+        return self.check_netflow() or self.check_drl()
     
+    # expose these
+    def check_netflow (self): return self._check_system_slice ('netflow')
+    def check_drl (self): return self._check_system_slice ('drl')
+
     # we have the slices up already here, so it should not take too long
-    def check_systemslice (self, slicename, timeout_minutes=5, period=15):
-        timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
-        test_nodes=self.all_nodes()
-        while test_nodes:
-            for test_node in test_nodes:
-                if test_node.check_systemslice (slicename,dry_run=self.options.dry_run):
-                    utils.header ("ok")
-                    test_nodes.remove(test_node)
-                else:
-                    print '.',
-            if not test_nodes:
-                return True
-            if datetime.datetime.now () > timeout:
-                for test_node in test_nodes:
-                    utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
-                return False
-            time.sleep(period)
-        return True
+    def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
+        class CompleterTaskSystemSlice (CompleterTask):
+            def __init__ (self, test_node, dry_run): 
+                self.test_node=test_node
+                self.dry_run=dry_run
+            def actual_run (self): 
+                return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
+            def message (self): 
+                return "System slice %s @ %s"%(slicename, self.test_node.name())
+            def failure_message (self): 
+                return "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
+        timeout = timedelta(minutes=timeout_minutes)
+        silent  = timedelta (0)
+        period  = timedelta (seconds=period_seconds)
+        tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
+                      for test_node in self.all_nodes() ]
+        return Completer (tasks) . run (timeout, silent, period)
 
     def plcsh_stress_test (self):
         "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
@@ -1183,19 +1339,49 @@ class TestPlc:
         "yum install sfa-plc"
         return self.yum_install("sfa-plc")
         
-    def sfa_install_client(self):
-        "yum install sfa-client"
-        return self.yum_install("sfa-client")
-        
     def sfa_install_sfatables(self):
         "yum install sfa-sfatables"
         return self.yum_install ("sfa-sfatables")
 
+    # for some very odd reason, this sometimes fails with the following symptom
+    # # yum install sfa-client
+    # Setting up Install Process
+    # ...
+    # Downloading Packages:
+    # Running rpm_check_debug
+    # Running Transaction Test
+    # Transaction Test Succeeded
+    # Running Transaction
+    # Transaction couldn't start:
+    # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
+    # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
+    # even though in the same context I have
+    # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h 
+    # Filesystem            Size  Used Avail Use% Mounted on
+    # /dev/hdv1             806G  264G  501G  35% /
+    # none                   16M   36K   16M   1% /tmp
+    #
+    # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
+    def sfa_install_client(self):
+        "yum install sfa-client"
+        first_try=self.yum_install("sfa-client")
+        if first_try: return True
+        utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
+        (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
+        utils.header("rpm_path=<<%s>>"%rpm_path)
+        # just for checking 
+        self.run_in_guest("rpm -i %s"%cached_rpm_path)
+        return self.yum_check_installed ("sfa-client")
+
     def sfa_dbclean(self):
         "thoroughly wipes off the SFA database"
-        self.run_in_guest("sfa-nuke.py")==0 or \
-        self.run_in_guest("sfa-nuke-plc.py") or \
-        self.run_in_guest("sfaadmin.py registry nuke")
+        return self.run_in_guest("sfaadmin reg nuke")==0 or \
+            self.run_in_guest("sfa-nuke.py")==0 or \
+            self.run_in_guest("sfa-nuke-plc.py")==0
+
+    def sfa_fsclean(self):
+        "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
+        self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
         return True
 
     def sfa_plcclean(self):
@@ -1203,14 +1389,19 @@ class TestPlc:
         # ignore result 
         sfa_spec=self.plc_spec['sfa']
 
-        for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
-            slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
-            try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
-            except: print "Slice %s already absent from PLC db"%slicename
+        for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
+            login_base=auth_sfa_spec['login_base']
+            try: self.apiserver.DeleteSite (self.auth_root(),login_base)
+            except: print "Site %s already absent from PLC db"%login_base
 
-            username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
-            try: self.apiserver.DeletePerson(self.auth_root(),username)
-            except: print "User %s already absent from PLC db"%username
+            for spec_name in ['pi_spec','user_spec']:
+                user_spec=auth_sfa_spec[spec_name]
+                username=user_spec['email']
+                try: self.apiserver.DeletePerson(self.auth_root(),username)
+                except: 
+                    # this in fact is expected as sites delete their members
+                    #print "User %s already absent from PLC db"%username
+                    pass
 
         print "REMEMBER TO RUN sfa_import AGAIN"
         return True
@@ -1250,7 +1441,7 @@ class TestPlc:
         if not os.path.isdir(dirname):
             utils.system("mkdir -p %s"%dirname)
         if not os.path.isdir(dirname):
-            raise "Cannot create config dir for plc %s"%self.name()
+            raise Exception,"Cannot create config dir for plc %s"%self.name()
         return dirname
 
     def conffile(self,filename):
@@ -1288,6 +1479,8 @@ class TestPlc:
                      'SFA_DB_PASSWORD',
                      'SFA_DB_NAME',
                      'SFA_API_LOGLEVEL',
+                     'SFA_GENERIC_FLAVOUR',
+                     'SFA_AGGREGATE_ENABLED',
                      ]:
             if self.plc_spec['sfa'].has_key(var):
                 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
@@ -1332,17 +1525,14 @@ class TestPlc:
             and  self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
 
     def sfa_import(self):
-        "sfa-import-plc"
+        "use sfaadmin to import from plc"
         auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
-        return self.run_in_guest('sfa-import.py')==0 or \
-               self.run_in_guest('sfa-import-plc.py')==0 or \
-               self.run_in_guest('sfaadmin.py registry import_registry')==0
-# not needed anymore
-#        self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
+        return self.run_in_guest('sfaadmin reg import_registry')==0 
 
     def sfa_start(self):
         "service sfa start"
-        return self.run_in_guest('service sfa start')==0
+        return self.start_service('sfa')
+
 
     def sfi_configure(self):
         "Create /root/sfi on the plc side for sfi client configuration"
@@ -1350,13 +1540,12 @@ class TestPlc:
             utils.header("DRY RUN - skipping step")
             return True
         sfa_spec=self.plc_spec['sfa']
-        # cannot use sfa_slice_mapper to pass dir_name
-        for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
-            site_spec = self.locate_site (slice_spec['sitename'])
-            test_site = TestSite(self,site_spec)
-            test_slice=TestSliceSfa(self,test_site,slice_spec)
-            dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
-            test_slice.sfi_config(dir_name)
+        # cannot use auth_sfa_mapper to pass dir_name
+        for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
+            test_slice=TestAuthSfa(self,slice_spec)
+            dir_basename=os.path.basename(test_slice.sfi_path())
+            dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
+            test_slice.sfi_configure(dir_name)
             # push into the remote /root/sfi area
             location = test_slice.sfi_path()
             remote="%s/%s"%(self.vm_root_in_host(),location)
@@ -1371,64 +1560,40 @@ class TestPlc:
         self.run_in_guest("rm -rf /root/sfi")
         return True
 
-    @slice_sfa_mapper
-    def sfa_add_user(self):
-        "run sfi.py add"
-        pass
-
-    @slice_sfa_mapper
-    def sfa_update_user(self):
-        "run sfi.py update"
-
-    @slice_sfa_mapper
-    def sfa_add_slice(self):
-        "run sfi.py add (on Registry) from slice.xml"
-        pass
-
-    @slice_sfa_mapper
-    def sfa_discover(self):
-        "discover resources into resouces_in.rspec"
-        pass
-
-    @slice_sfa_mapper
-    def sfa_create_slice(self):
-        "run sfi.py create (on SM) - 1st time"
-        pass
-
-    @slice_sfa_mapper
-    def sfa_check_slice_plc(self):
-        "check sfa_create_slice at the plcs - all local nodes should be in slice"
-        pass
-
-    @slice_sfa_mapper
-    def sfa_update_slice(self):
-        "run sfi.py create (on SM) on existing object"
-        pass
-
-    @slice_sfa_mapper
-    def sfa_view(self):
-        "various registry-related calls"
-        pass
-
-    @slice_sfa_mapper
-    def ssh_slice_sfa(self): 
-       "tries to ssh-enter the SFA slice"
-        pass
-
-    @slice_sfa_mapper
-    def sfa_delete_user(self):
-       "run sfi.py delete"
-        pass
-
-    @slice_sfa_mapper
-    def sfa_delete_slice(self):
-       "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
-        pass
+    @auth_sfa_mapper
+    def sfa_add_site (self): pass
+    @auth_sfa_mapper
+    def sfa_add_pi (self): pass
+    @auth_sfa_mapper
+    def sfa_add_user(self): pass
+    @auth_sfa_mapper
+    def sfa_update_user(self): pass
+    @auth_sfa_mapper
+    def sfa_add_slice(self): pass
+    @auth_sfa_mapper
+    def sfa_renew_slice(self): pass
+    @auth_sfa_mapper
+    def sfa_discover(self): pass
+    @auth_sfa_mapper
+    def sfa_create_slice(self): pass
+    @auth_sfa_mapper
+    def sfa_check_slice_plc(self): pass
+    @auth_sfa_mapper
+    def sfa_update_slice(self): pass
+    @auth_sfa_mapper
+    def sfi_list(self): pass
+    @auth_sfa_mapper
+    def sfi_show(self): pass
+    @auth_sfa_mapper
+    def ssh_slice_sfa(self): pass
+    @auth_sfa_mapper
+    def sfa_delete_user(self): pass
+    @auth_sfa_mapper
+    def sfa_delete_slice(self): pass
 
     def sfa_stop(self):
         "service sfa stop"
-        self.run_in_guest('service sfa stop')==0
-        return True
+        return self.stop_service ('sfa')
 
     def populate (self):
         "creates random entries in the PLCAPI"
@@ -1509,7 +1674,7 @@ class TestPlc:
             test_site = TestSite (self,site_spec)
             for node_spec in site_spec['nodes']:
                 test_node=TestNode(self,test_site,node_spec)
-                test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
+                test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
                 command = test_ssh.actual_command("tar -C /var/log -cf - .")
                 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
                 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
@@ -1524,7 +1689,7 @@ class TestPlc:
             if not isinstance(name,StringTypes):
                 raise Exception
         except:
-            t=datetime.datetime.now()
+            t=datetime.now()
             d=t.date()
             name=str(d)
         return "/root/%s-%s.sql"%(database,name)
@@ -1550,6 +1715,26 @@ class TestPlc:
 
         utils.header('Database restored from ' + dump)
 
+    @staticmethod
+    def create_ignore_steps ():
+        for step in TestPlc.default_steps + TestPlc.other_steps:
+            # default step can have a plc qualifier
+            if '@' in step: (step,qualifier)=step.split('@')
+            # or be defined as forced or ignored by default
+            for keyword in ['_ignore','_force']:
+                if step.endswith (keyword): step=step.replace(keyword,'')
+            if step == SEP or step == SEPSFA : continue
+            method=getattr(TestPlc,step)
+            name=step+'_ignore'
+            wrapped=ignore_result(method)
+#            wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
+            setattr(TestPlc, name, wrapped)
+            
+#    @ignore_result
+#    def ssh_slice_again_ignore (self): pass
+#    @ignore_result
+#    def check_initscripts_ignore (self): pass
+    
     def standby_1_through_20(self):
         """convenience function to wait for a specified number of minutes"""
         pass
@@ -1593,3 +1778,7 @@ class TestPlc:
     def standby_19(): pass
     @standby_generic 
     def standby_20(): pass
+
+    # convenience for debugging the test logic
+    def yes (self): return True
+    def no (self): return False