rework check-tcp so that we first wait for the network to be ready in the sliver
[tests.git] / system / TestPlc.py
index 2878344..9504b70 100644 (file)
@@ -10,8 +10,9 @@ from datetime import datetime, timedelta
 from types import StringTypes
 
 import utils
+from Completer import Completer, CompleterTask
 from TestSite import TestSite
-from TestNode import TestNode
+from TestNode import TestNode, CompleterTaskNodeSsh
 from TestUser import TestUser
 from TestKey import TestKey
 from TestSlice import TestSlice
@@ -21,7 +22,6 @@ from TestSsh import TestSsh
 from TestApiserver import TestApiserver
 from TestAuthSfa import TestAuthSfa
 from PlcapiUrlScanner import PlcapiUrlScanner
-from Completer import Completer, CompleterTask
 
 has_sfa_cache_filename="sfa-cache"
 
@@ -107,8 +107,9 @@ class slice_mapper__tasks (object):
                 test_site = TestSite(self,site_spec)
                 test_slice=TestSlice(self,test_site,slice_spec)
                 tasks += slice_method (test_slice, self.options)
-            return Completer (tasks).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
+            return Completer (tasks, message=method.__name__).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
         # restore the doc text from the TestSlice method even if a bit odd
+        wrappee.__name__ = method.__name__
         wrappee.__doc__ = slice_method.__doc__
         return wrappee
 
@@ -148,24 +149,32 @@ class TestPlc:
         'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
         'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP,
         'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
-        'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
-        'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_renew_slice@1', SEPSFA,
-        'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
-        'sfi_list@1', 'sfi_show@1', 'sfa_utest@1', SEPSFA,
+        'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
+        'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
+        'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1', 
+        'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
+        'sfa_discover@1', 'sfa_rspec@1', 'sfa_allocate@1', 'sfa_provision@1', SEPSFA,
+        'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
+        'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
         # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
         # but as the stress test might take a while, we sometimes missed the debug mode..
+        'probe_kvm_iptables',
         'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
         'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts_ignore', SEP,
-        'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
+        'ssh_slice_sfa@1', SEPSFA, 
+        'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1','sfa_check_slice_plc_empty@1', SEPSFA,
+        'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
         'cross_check_tcp@1', 'check_system_slice', SEP,
+        # for inspecting the slice while it runs the first time
+        #'fail',
         # check slices are turned off properly
-        'empty_slices', 'ssh_slice_off', SEP,
+        'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
         # check they are properly re-created with the same name
-        'fill_slices', 'ssh_slice_again_ignore', SEP,
+        'fill_slices', 'ssh_slice_again', SEP,
         'gather_logs_force', SEP,
         ]
     other_steps = [ 
-        'export', 'show_boxes', SEP,
+        'export', 'show_boxes', 'super_speed_up_slices', SEP,
         'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
         'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
         'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
@@ -175,9 +184,10 @@ class TestPlc:
         'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
        'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
         'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
+        'sfa_get_expires', SEPSFA,
         'plc_db_dump' , 'plc_db_restore', SEP,
         'check_netflow','check_drl', SEP,
-        'debug_nodemanager', SEP,
+        'debug_nodemanager', 'slice_fs_present', SEP,
         'standby_1_through_20','yes','no',SEP,
         ]
 
@@ -269,11 +279,14 @@ class TestPlc:
     # see e.g. plc_start esp. the version for f14
     #command gets run in the plc's vm
     def host_to_guest(self,command):
+        vservername=self.vservername
+        personality=self.options.personality
+        raw="%(personality)s virsh -c lxc:/// lxc-enter-namespace %(vservername)s"%locals()
         # f14 still needs some extra help
         if self.options.fcdistro == 'f14':
-            raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %s" %(self.vservername,command)
+            raw +=" -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %(command)s" %locals()
         else:
-            raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env %s" %(self.vservername,command)
+            raw +=" -- /usr/bin/env %(command)s"%locals()
         return raw
     
     # this /vservers thing is legacy...
@@ -309,9 +322,9 @@ class TestPlc:
         return self.yum_check_installed (rpms)
 
     def auth_root (self):
-       return {'Username':self.plc_spec['PLC_ROOT_USER'],
+       return {'Username':self.plc_spec['settings']['PLC_ROOT_USER'],
                'AuthMethod':'password',
-               'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
+               'AuthString':self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
                 'Role' : self.plc_spec['role']
                 }
     def locate_site (self,sitename):
@@ -586,7 +599,7 @@ class TestPlc:
         print '+ MyPLC',plc_spec['name']
         # WARNING this would not be right for lxc-based PLC's - should be harmless though
         print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
-        print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
+        print '+\tIP = %s/%s'%(plc_spec['settings']['PLC_API_HOST'],plc_spec['vserverip'])
         for site_spec in plc_spec['sites']:
             for node_spec in site_spec['nodes']:
                 TestPlc.display_mapping_node(node_spec)
@@ -699,26 +712,8 @@ class TestPlc:
         "run plc-config-tty"
         tmpname='%s.plc-config-tty'%(self.name())
         fileconf=open(tmpname,'w')
-        for var in [ 'PLC_NAME',
-                     'PLC_ROOT_USER',
-                     'PLC_ROOT_PASSWORD',
-                     'PLC_SLICE_PREFIX',
-                     'PLC_MAIL_ENABLED',
-                     'PLC_MAIL_SUPPORT_ADDRESS',
-                     'PLC_DB_HOST',
-#                     'PLC_DB_PASSWORD',
-                    # Above line was added for integrating SFA Testing
-                     'PLC_API_HOST',
-                     'PLC_WWW_HOST',
-                     'PLC_BOOT_HOST',
-                     'PLC_NET_DNS1',
-                     'PLC_NET_DNS2',
-                     'PLC_RESERVATION_GRANULARITY',
-                     'PLC_OMF_ENABLED',
-                     'PLC_OMF_XMPP_SERVER',
-                     'PLC_VSYS_DEFAULTS',
-                     ]:
-            fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
+        for (var,value) in self.plc_spec['settings'].iteritems():
+            fileconf.write ('e %s\n%s\n'%(var,value))
         fileconf.write('w\n')
         fileconf.write('q\n')
         fileconf.close()
@@ -823,7 +818,7 @@ class TestPlc:
         sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
         for site in sites:
             # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
-            if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
+            if site['login_base']==self.plc_spec['settings']['PLC_SLICE_PREFIX']: continue
             site_id=site['site_id']
             print 'Deleting site_id',site_id
             self.apiserver.DeleteSite(self.auth_root(),site_id)
@@ -1035,8 +1030,8 @@ class TestPlc:
                     return False
             def message (self):
                 return "CompleterTaskBootState with node %s"%self.hostname
-            def failure_message (self):
-                return "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
+            def failure_epilogue (self):
+                print "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
                 
         timeout = timedelta(minutes=timeout_minutes)
         graceout = timedelta(minutes=silent_minutes)
@@ -1045,46 +1040,40 @@ class TestPlc:
         utils.header("checking nodes boot state (expected %s)"%target_boot_state)
         tasks = [ CompleterTaskBootState (self,hostname) \
                       for (hostname,_) in self.all_node_infos() ]
-        return Completer (tasks).run (timeout, graceout, period)
+        message = 'check_boot_state={}'.format(target_boot_state)
+        return Completer (tasks, message=message).run (timeout, graceout, period)
 
     def nodes_booted(self):
         return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
 
+    def probe_kvm_iptables (self):
+        (_,kvmbox) = self.all_node_infos()[0]
+        TestSsh(kvmbox).run("iptables-save")
+        return True
+
     # probing nodes
-    def check_nodes_ping(self,timeout_seconds=120,period_seconds=10):
-        class CompleterTaskPingNode (CompleterTask):
+    def check_nodes_ping(self, timeout_seconds=30, period_seconds=10):
+        class CompleterTaskPingNode(CompleterTask):
             def __init__ (self, hostname):
                 self.hostname=hostname
-            def run(self,silent):
+            def run(self, silent):
                 command="ping -c 1 -w 1 %s >& /dev/null"%self.hostname
                 return utils.system (command, silent=silent)==0
-            def failure_message (self):
-                return "Cannot ping node with name %s"%self.hostname
+            def failure_epilogue (self):
+                print "Cannot ping node with name %s"%self.hostname
         timeout=timedelta (seconds=timeout_seconds)
         graceout=timeout
         period=timedelta (seconds=period_seconds)
         node_infos = self.all_node_infos()
         tasks = [ CompleterTaskPingNode (h) for (h,_) in node_infos ]
-        return Completer (tasks).run (timeout, graceout, period)
+        return Completer (tasks, message='ping_node').run (timeout, graceout, period)
 
     # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
     def ping_node (self):
         "Ping nodes"
         return self.check_nodes_ping ()
 
-    def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
-        class CompleterTaskNodeSsh (CompleterTask):
-            def __init__ (self, hostname, qemuname, boot_state, local_key):
-                self.hostname=hostname
-                self.qemuname=qemuname
-                self.boot_state=boot_state
-                self.local_key=local_key
-            def run (self, silent):
-                command = TestSsh (self.hostname,key=self.local_key).actual_command("hostname;uname -a")
-                return utils.system (command, silent=silent)==0
-            def failure_message (self):
-                return "Cannot reach %s @ %s in %s mode"%(self.hostname, self.qemuname, self.boot_state)
-
+    def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
         # various delays 
         timeout  = timedelta(minutes=timeout_minutes)
         graceout = timedelta(minutes=silent_minutes)
@@ -1092,15 +1081,18 @@ class TestPlc:
         vservername=self.vservername
         if debug: 
             message="debug"
+            completer_message = 'ssh_node_debug'
             local_key = "keys/%(vservername)s-debug.rsa"%locals()
         else: 
             message="boot"
+            completer_message = 'ssh_node_boot'
            local_key = "keys/key_admin.rsa"
         utils.header("checking ssh access to nodes (expected in %s mode)"%message)
         node_infos = self.all_node_infos()
-        tasks = [ CompleterTaskNodeSsh (nodename, qemuname, message, local_key) \
+        tasks = [ CompleterTaskNodeSsh (nodename, qemuname, local_key,
+                                        boot_state=message, dry_run=self.options.dry_run) \
                       for (nodename,qemuname) in node_infos ]
-        return Completer (tasks).run (timeout, graceout, period)
+        return Completer (tasks, message=completer_message).run (timeout, graceout, period)
         
     def ssh_node_debug(self):
         "Tries to ssh into nodes in debug mode with the debug ssh key"
@@ -1148,15 +1140,15 @@ class TestPlc:
     ### initscripts
     def do_check_initscripts(self):
         class CompleterTaskInitscript (CompleterTask):
-            def __init__ (self, test_sliver, stamp):
+            def __init__(self, test_sliver, stamp):
                 self.test_sliver=test_sliver
                 self.stamp=stamp
-            def actual_run (self):
-                return self.test_sliver.check_initscript_stamp (self.stamp)
-            def message (self):
+            def actual_run(self):
+                return self.test_sliver.check_initscript_stamp(self.stamp)
+            def message(self):
                 return "initscript checker for %s"%self.test_sliver.name()
-            def failure_message (self):
-                return "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
+            def failure_epilogue(self):
+                print "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
             
         tasks=[]
         for slice_spec in self.plc_spec['slices']:
@@ -1172,8 +1164,9 @@ class TestPlc:
                 test_slice = TestSlice (self,test_site,slice_spec)
                 test_node = TestNode (self,test_site,node)
                 test_sliver = TestSliver (self, test_node, test_slice)
-                tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
-        return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
+                tasks.append(CompleterTaskInitscript(test_sliver, stamp))
+        return Completer(tasks, message='check_initscripts').\
+            run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
            
     def check_initscripts(self):
         "check that the initscripts have triggered"
@@ -1234,15 +1227,18 @@ class TestPlc:
     def ssh_slice(self): pass
     @slice_mapper__tasks(20,19,15)
     def ssh_slice_off (self): pass
+    @slice_mapper__tasks(1,1,15)
+    def slice_fs_present(self): pass
+    @slice_mapper__tasks(1,1,15)
+    def slice_fs_deleted(self): pass
 
     # use another name so we can exclude/ignore it from the tests on the nightly command line
     def ssh_slice_again(self): return self.ssh_slice()
-    # note that simply doing ssh_slice_again=ssh_slice would kind od work too
+    # note that simply doing ssh_slice_again=ssh_slice would kind of work too
     # but for some reason the ignore-wrapping thing would not
 
     @slice_mapper
     def ssh_slice_basics(self): pass
-
     @slice_mapper
     def check_vsys_defaults(self): pass
 
@@ -1253,20 +1249,28 @@ class TestPlc:
         return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
 
     def speed_up_slices (self):
-        "tweak nodemanager settings on all nodes using a conf file"
+        "tweak nodemanager cycle (wait time) to 30+/-10 s"
+        return self._speed_up_slices (30,10)
+    def super_speed_up_slices (self):
+        "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
+        return self._speed_up_slices (5,1)
+
+    def _speed_up_slices (self, p, r):
         # create the template on the server-side 
         template="%s.nodemanager"%self.name()
         template_file = open (template,"w")
-        template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
+        template_file.write('OPTIONS="-p %s -r %s -d"\n'%(p,r))
         template_file.close()
         in_vm="/var/www/html/PlanetLabConf/nodemanager"
         remote="%s/%s"%(self.vm_root_in_host(),in_vm)
         self.test_ssh.copy_abs(template,remote)
         # Add a conf file
-        self.apiserver.AddConfFile (self.auth_root(),
-                                    {'dest':'/etc/sysconfig/nodemanager',
-                                     'source':'PlanetLabConf/nodemanager',
-                                     'postinstall_cmd':'service nm restart',})
+        if not self.apiserver.GetConfFiles (self.auth_root(),
+                                        {'dest':'/etc/sysconfig/nodemanager'}):
+            self.apiserver.AddConfFile (self.auth_root(),
+                                        {'dest':'/etc/sysconfig/nodemanager',
+                                         'source':'PlanetLabConf/nodemanager',
+                                         'postinstall_cmd':'service nm restart',})
         return True
 
     def debug_nodemanager (self):
@@ -1303,24 +1307,55 @@ class TestPlc:
             utils.header ("check_tcp: no/empty config found")
             return True
         specs = self.plc_spec['tcp_specs']
-        overall=True
+        overall = True
+
+        # first wait for the network to be up and ready from the slices
+        class CompleterTaskNetworkReadyInSliver(CompleterTask):
+            def __init__(self, test_sliver):
+                self.test_sliver = test_sliver
+            def actual_run(self):
+                return self.test_sliver.check_tcp_ready(port=9999)
+            def message(self):
+                return "network ready checker for %s" % self.test_sliver.name()
+            def failure_epilogue(self):
+                print "could not bind port from sliver %s" % self.test_sliver.name()
+
+        tasks = []
+        for spec in specs:
+            # locate the TestSliver instances involved, and cache them in the spec instance
+            spec['s_sliver'] = self.locate_sliver_obj_cross (spec['server_node'], spec['server_slice'], other_plcs)
+            spec['c_sliver'] = self.locate_sliver_obj_cross (spec['client_node'], spec['client_slice'], other_plcs)
+            message = "Will check TCP between s=%s and c=%s" % \
+                      (spec['s_sliver'].name(), spec['c_sliver'].name())
+            if 'client_connect' in spec:
+                message += " (using %s)" % spec['client_connect']
+            utils.header(message)
+            tasks.append(CompleterTaskNetworkReadyInSliver (spec['s_sliver']))
+
+        # wait for the netork to be OK in all server sides
+        if not Completer(tasks, message='check for network readiness in slivers').\
+           run(timedelta(seconds=30), timedelta(seconds=24), period=timedelta(seconds=5)):
+            return False
+            
+        # run server and client
         for spec in specs:
             port = spec['port']
             # server side
-            s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
-            if not s_test_sliver.run_tcp_server(port,timeout=20):
-                overall=False
+            # the issue here is that we have the server run in background
+            # and so we have no clue if it took off properly or not
+            # looks like in some cases it does not
+            if not spec['s_sliver'].run_tcp_server(port, timeout=20):
+                overall = False
                 break
 
             # idem for the client side
-            c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
-            # use nodename from locatesd sliver, unless 'client_connect' is set
+            # use nodename from located sliver, unless 'client_connect' is set
             if 'client_connect' in spec:
                 destination = spec['client_connect']
             else:
-                destination=s_test_sliver.test_node.name()
-            if not c_test_sliver.run_tcp_client(destination,port):
-                overall=False
+                destination = spec['s_sliver'].test_node.name()
+            if not spec['c_sliver'].run_tcp_client(destination, port):
+                overall = False
         return overall
 
     # painfully enough, we need to allow for some time as netflow might show up last
@@ -1345,14 +1380,14 @@ class TestPlc:
                 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
             def message (self): 
                 return "System slice %s @ %s"%(slicename, self.test_node.name())
-            def failure_message (self): 
-                return "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
+            def failure_epilogue (self): 
+                print "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
         timeout = timedelta(minutes=timeout_minutes)
         silent  = timedelta (0)
         period  = timedelta (seconds=period_seconds)
         tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
                       for test_node in self.all_nodes() ]
-        return Completer (tasks) . run (timeout, silent, period)
+        return Completer (tasks, message='_check_system_slice') . run (timeout, silent, period)
 
     def plcsh_stress_test (self):
         "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
@@ -1508,30 +1543,13 @@ class TestPlc:
         "run sfa-config-tty"
         tmpname=self.conffile("sfa-config-tty")
         fileconf=open(tmpname,'w')
-        for var in [ 'SFA_REGISTRY_ROOT_AUTH',
-                     'SFA_INTERFACE_HRN',
-                     'SFA_REGISTRY_LEVEL1_AUTH',
-                    'SFA_REGISTRY_HOST',
-                    'SFA_AGGREGATE_HOST',
-                     'SFA_SM_HOST',
-                    'SFA_PLC_URL',
-                     'SFA_PLC_USER',
-                     'SFA_PLC_PASSWORD',
-                     'SFA_DB_HOST',
-                     'SFA_DB_USER',
-                     'SFA_DB_PASSWORD',
-                     'SFA_DB_NAME',
-                     'SFA_API_LOGLEVEL',
-                     'SFA_GENERIC_FLAVOUR',
-                     'SFA_AGGREGATE_ENABLED',
-                     ]:
-            if self.plc_spec['sfa'].has_key(var):
-                fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
-        # the way plc_config handles booleans just sucks..
-        for var in []:
-            val='false'
-            if self.plc_spec['sfa'][var]: val='true'
-            fileconf.write ('e %s\n%s\n'%(var,val))
+        for (var,value) in self.plc_spec['sfa']['settings'].iteritems():
+            fileconf.write ('e %s\n%s\n'%(var,value))
+#        # the way plc_config handles booleans just sucks..
+#        for var in []:
+#            val='false'
+#            if self.plc_spec['sfa'][var]: val='true'
+#            fileconf.write ('e %s\n%s\n'%(var,val))
         fileconf.write('w\n')
         fileconf.write('R\n')
         fileconf.write('q\n')
@@ -1543,11 +1561,11 @@ class TestPlc:
     def aggregate_xml_line(self):
         port=self.plc_spec['sfa']['neighbours-port']
         return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
-            (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
+            (self.vserverip,self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'],port)
 
     def registry_xml_line(self):
         return '<registry addr="%s" hrn="%s" port="12345"/>' % \
-            (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
+            (self.vserverip,self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
 
 
     # a cross step that takes all other plcs in argument
@@ -1565,11 +1583,11 @@ class TestPlc:
                                      " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
         utils.header ("(Over)wrote %s"%reg_fname)
         return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
-            and  self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
+           and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
 
     def sfa_import(self):
         "use sfaadmin to import from plc"
-        auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
+        auth=self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
         return self.run_in_guest('sfaadmin reg import_registry')==0 
 
     def sfa_start(self):
@@ -1603,30 +1621,61 @@ class TestPlc:
         self.run_in_guest("rm -rf /root/sfi")
         return True
 
+    def sfa_rspec_empty(self):
+        "expose a static empty rspec (ships with the tests module) in the sfi directory"
+        filename="empty-rspec.xml"
+        overall=True
+        for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
+            test_slice=TestAuthSfa(self,slice_spec)
+            in_vm = test_slice.sfi_path()
+            remote="%s/%s"%(self.vm_root_in_host(),in_vm)
+            if self.test_ssh.copy_abs (filename, remote) !=0: overall=False
+        return overall
+
     @auth_sfa_mapper
-    def sfa_add_site (self): pass
+    def sfa_register_site (self): pass
     @auth_sfa_mapper
-    def sfa_add_pi (self): pass
+    def sfa_register_pi (self): pass
     @auth_sfa_mapper
-    def sfa_add_user(self): pass
+    def sfa_register_user(self): pass
     @auth_sfa_mapper
     def sfa_update_user(self): pass
     @auth_sfa_mapper
-    def sfa_add_slice(self): pass
+    def sfa_register_slice(self): pass
     @auth_sfa_mapper
     def sfa_renew_slice(self): pass
     @auth_sfa_mapper
+    def sfa_get_expires(self): pass
+    @auth_sfa_mapper
     def sfa_discover(self): pass
     @auth_sfa_mapper
-    def sfa_create_slice(self): pass
+    def sfa_rspec(self): pass
+    @auth_sfa_mapper
+    def sfa_allocate(self): pass
+    @auth_sfa_mapper
+    def sfa_allocate_empty(self): pass
+    @auth_sfa_mapper
+    def sfa_provision(self): pass
+    @auth_sfa_mapper
+    def sfa_provision_empty(self): pass
     @auth_sfa_mapper
     def sfa_check_slice_plc(self): pass
     @auth_sfa_mapper
+    def sfa_check_slice_plc_empty(self): pass
+    @auth_sfa_mapper
     def sfa_update_slice(self): pass
     @auth_sfa_mapper
+    def sfa_remove_user_from_slice(self): pass
+    @auth_sfa_mapper
+    def sfa_insert_user_in_slice(self): pass
+    @auth_sfa_mapper
     def sfi_list(self): pass
     @auth_sfa_mapper
-    def sfi_show(self): pass
+    def sfi_show_site(self): pass
+    @auth_sfa_mapper
+    def sfi_show_slice(self): pass
+    @auth_sfa_mapper
+    def sfi_show_slice_researchers(self): pass
     @auth_sfa_mapper
     def ssh_slice_sfa(self): pass
     @auth_sfa_mapper
@@ -1825,3 +1874,4 @@ class TestPlc:
     # convenience for debugging the test logic
     def yes (self): return True
     def no (self): return False
+    def fail (self): return False