attempt to fix the add_sfa step
[tests.git] / system / TestPlc.py
index 341c142..812b867 100644 (file)
@@ -1,4 +1,6 @@
-# $Id$
+# Thierry Parmentelat <thierry.parmentelat@inria.fr>
+# Copyright (C) 2010 INRIA 
+#
 import os, os.path
 import datetime
 import time
@@ -80,42 +82,57 @@ def slice_mapper_options_sfa (method):
     return actual
 
 SEP='<sep>'
+SEPSFA='<sep_sfa>'
 
 class TestPlc:
 
     default_steps = [
-        'display', 'local_pre', SEP,
-        'delete','create','install', 'configure', 'start', SEP,
+        'display', 'resources_pre', SEP,
+        'delete_vs','create_vs','install', 'configure', 'start', SEP,
         'fetch_keys', 'store_keys', 'clear_known_hosts', SEP,
-        'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', SEP,
+        'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
         'reinstall_node', 'init_node','bootcd', 'configure_qemu', 'export_qemu',
         'kill_all_qemus', 'start_node', SEP,
         # better use of time: do this now that the nodes are taking off
         'plcsh_stress_test', SEP,
-        'nodes_ssh_debug', 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEP,
-       'install_sfa', 'configure_sfa', 'import_sfa', 'start_sfa', SEP,
-        'setup_sfa', 'add_sfa', 'update_sfa', SEP,
-        'view_sfa', 'check_slice_sfa', 'delete_sfa', 'stop_sfa', SEP,
+       'install_sfa', 'configure_sfa', 'import_sfa', 'start_sfa', SEPSFA,
+        'setup_sfa', 'add_sfa', 'update_sfa', 'view_sfa', SEPSFA,
+        'nodes_ssh_debug', 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEPSFA,
+        'check_slice_sfa', 'delete_sfa', 'stop_sfa', SEPSFA,
         'check_tcp',  'check_hooks',  SEP,
-        'force_gather_logs', 'force_local_post',
+        'force_gather_logs', 'force_resources_post', SEP,
         ]
     other_steps = [ 
-        'fresh_install', 'stop', 'vs_start', SEP,
+        'show_boxes', 'resources_list','resources_release','resources_release_plc','resources_release_qemu',SEP,
+        'stop', 'vs_start', SEP,
         'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
         'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP,
+        'clean_leases', 'list_leases', SEP,
         'populate' , SEP,
-        'show_boxes', 'list_all_qemus', 'list_qemus', 'kill_qemus', SEP,
+        'list_all_qemus', 'list_qemus', 'kill_qemus', SEP,
         'db_dump' , 'db_restore', SEP,
-        'local_list','local_cleanup',SEP,
-        'standby_1 through 20',
+        'standby_1 through 20',SEP,
         ]
 
     @staticmethod
     def printable_steps (list):
-        return " ".join(list).replace(" "+SEP+" "," \\\n")
+        single_line=" ".join(list)+" "
+        return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
     @staticmethod
     def valid_step (step):
-        return step != SEP
+        return step != SEP and step != SEPSFA
+
+    # turn off the sfa-related steps when build has skipped SFA
+    # this is originally for centos5 as recent SFAs won't build on this platformb
+    @staticmethod
+    def check_whether_build_has_sfa (rpms_url):
+        retcod=os.system ("curl --silent %s/ | grep -q sfa"%rpms_url)
+        # full builds are expected to return with 0 here
+        if retcod!=0:
+            # move all steps containing 'sfa' from default_steps to other_steps
+            sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
+            TestPlc.other_steps += sfa_steps
+            for step in sfa_steps: TestPlc.default_steps.remove(step)
 
     def __init__ (self,plc_spec,options):
        self.plc_spec=plc_spec
@@ -261,13 +278,14 @@ class TestPlc:
                     
     # a step for checking this stuff
     def show_boxes (self):
+        'print summary of nodes location'
         for (box,nodes) in self.gather_hostBoxes().iteritems():
             print box,":"," + ".join( [ node.name() for node in nodes ] )
         return True
 
     # make this a valid step
     def kill_all_qemus(self):
-        "all qemu boxes: kill all running qemus (even of former runs)"
+        'kill all qemu instances on the qemu boxes involved by this setup'
         # this is the brute force version, kill all qemus on that host box
         for (box,nodes) in self.gather_hostBoxes().iteritems():
             # pass the first nodename, as we don't push template-qemu on testboxes
@@ -277,6 +295,7 @@ class TestPlc:
 
     # make this a valid step
     def list_all_qemus(self):
+        'list all qemu instances on the qemu boxes involved by this setup'
         for (box,nodes) in self.gather_hostBoxes().iteritems():
             # this is the brute force version, kill all qemus on that host box
             TestBox(box,self.options.buildname).list_all_qemus()
@@ -284,6 +303,7 @@ class TestPlc:
 
     # kill only the right qemus
     def list_qemus(self):
+        'list qemu instances for our nodes'
         for (box,nodes) in self.gather_hostBoxes().iteritems():
             # the fine-grain version
             for node in nodes:
@@ -292,6 +312,7 @@ class TestPlc:
 
     # kill only the right qemus
     def kill_qemus(self):
+        'kill the qemu instances for our nodes'
         for (box,nodes) in self.gather_hostBoxes().iteritems():
             # the fine-grain version
             for node in nodes:
@@ -306,8 +327,10 @@ class TestPlc:
         return True
 
     # entry point
+    always_display_keys=['PLC_WWW_HOST','nodes','sites',]
     def display_pass (self,passno):
         for (key,val) in self.plc_spec.iteritems():
+            if not self.options.verbose and key not in TestPlc.always_display_keys: continue
             if passno == 2:
                 if key == 'sites':
                     for site in val:
@@ -324,12 +347,13 @@ class TestPlc:
                     for key in val:
                         self.display_key_spec (key)
             elif passno == 1:
-                if key not in ['sites','initscripts','slices','keys']:
+                if key not in ['sites','initscripts','slices','keys', 'sfa']:
                     print '+   ',key,':',val
 
     def display_site_spec (self,site):
         print '+ ======== site',site['site_fields']['name']
         for (k,v) in site.iteritems():
+            if not self.options.verbose and k not in TestPlc.always_display_keys: continue
             if k=='nodes':
                 if v: 
                     print '+       ','nodes : ',
@@ -347,8 +371,8 @@ class TestPlc:
             elif k == 'address_fields':
                 pass
             else:
-                print '+       ',k,
-                PrettyPrinter(indent=8,depth=2).pprint(v)
+                print '+       ',
+                utils.pprint(k,v)
         
     def display_initscript_spec (self,initscript):
         print '+ ======== initscript',initscript['initscript_fields']['name']
@@ -379,10 +403,11 @@ class TestPlc:
                 print '+       ',k,v
 
     def display_node_spec (self,node):
-        print "+           node",node['name'],"host_box=",node['host_box'],
+        print "+           node=%s host_box=%s"%(node['name'],node['host_box']),
         print "hostname=",node['node_fields']['hostname'],
         print "ip=",node['interface_fields']['ip']
-    
+        if self.options.verbose:
+            utils.pprint("node details",node,depth=3)
 
     # another entry point for just showing the boxes involved
     def display_mapping (self):
@@ -404,34 +429,48 @@ class TestPlc:
         print '+\tqemu box %s'%node_spec['host_box']
         print '+\thostname=%s'%node_spec['node_fields']['hostname']
 
-    def local_pre (self):
+    def resources_pre (self):
         "run site-dependant pre-test script as defined in LocalTestResources"
         from LocalTestResources import local_resources
         return local_resources.step_pre(self)
  
-    def local_post (self):
+    def resources_post (self):
         "run site-dependant post-test script as defined in LocalTestResources"
         from LocalTestResources import local_resources
         return local_resources.step_post(self)
  
-    def local_list (self):
+    def resources_list (self):
         "run site-dependant list script as defined in LocalTestResources"
         from LocalTestResources import local_resources
         return local_resources.step_list(self)
  
-    def local_cleanup (self):
-        "run site-dependant cleanup script as defined in LocalTestResources"
+    def resources_release (self):
+        "run site-dependant release script as defined in LocalTestResources"
+        from LocalTestResources import local_resources
+        return local_resources.step_release(self)
+    def resources_release_plc (self):
+        "run site-dependant release script as defined in LocalTestResources"
         from LocalTestResources import local_resources
-        return local_resources.step_cleanup(self)
+        return local_resources.step_release_plc(self)
  
-    def delete(self):
+    def resources_release_qemu (self):
+        "run site-dependant release script as defined in LocalTestResources"
+        from LocalTestResources import local_resources
+        return local_resources.step_release_qemu(self)
+    def delete_vs(self):
         "vserver delete the test myplc"
         self.run_in_host("vserver --silent %s delete"%self.vservername)
         return True
 
     ### install
-    def create (self):
+    # historically the build was being fetched by the tests
+    # now the build pushes itself as a subdir of the tests workdir
+    # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
+    def create_vs (self):
         "vserver creation (no install done)"
+        # push the local build/ dir to the testplc box 
         if self.is_local():
             # a full path for the local calls
             build_dir=os.path.dirname(sys.argv[0])
@@ -441,10 +480,9 @@ class TestPlc:
         else:
             # use a standard name - will be relative to remote buildname
             build_dir="build"
-       # run checkout in any case - would do an update if already exists
-        build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
-        if self.run_in_host(build_checkout) != 0:
-            return False
+            # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
+            self.test_ssh.rmdir(build_dir)
+            self.test_ssh.copy(build_dir,recursive=True)
         # the repo url is taken from arch-rpms-url 
         # with the last step (i386) removed
         repo_url = self.options.arch_rpms_url
@@ -462,24 +500,36 @@ class TestPlc:
             vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
             vserver_options += " --hostname %s"%vserver_hostname
         except:
-            pass
+            print "Cannot reverse lookup %s"%self.vserverip
+            print "This is considered fatal, as this might pollute the test results"
+            return False
         create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
         return self.run_in_host(create_vserver) == 0
 
     ### install_rpm 
     def install(self):
         "yum install myplc, noderepo, and the plain bootstrapfs"
+
+        # workaround for getting pgsql8.2 on centos5
+        if self.options.fcdistro == "centos5":
+            self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
+
         if self.options.personality == "linux32":
             arch = "i386"
         elif self.options.personality == "linux64":
             arch = "x86_64"
         else:
             raise Exception, "Unsupported personality %r"%self.options.personality
+        
+        nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
+
+        # try to install slicerepo - not fatal yet
+        self.run_in_guest("yum -y install slicerepo-%s"%nodefamily)
+        
         return \
             self.run_in_guest("yum -y install myplc")==0 and \
-            nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
             self.run_in_guest("yum -y install noderepo-%s"%nodefamily)==0 and \
-                self.run_in_guest("yum -y install bootstrapfs-%s-plain"%nodefamily)==0 
+            self.run_in_guest("yum -y install bootstrapfs-%s-plain"%nodefamily)==0 
 
     ### 
     def configure(self):
@@ -487,8 +537,9 @@ class TestPlc:
         tmpname='%s.plc-config-tty'%(self.name())
         fileconf=open(tmpname,'w')
         for var in [ 'PLC_NAME',
-                     'PLC_ROOT_PASSWORD',
                      'PLC_ROOT_USER',
+                     'PLC_ROOT_PASSWORD',
+                     'PLC_SLICE_PREFIX',
                      'PLC_MAIL_ENABLED',
                      'PLC_MAIL_SUPPORT_ADDRESS',
                      'PLC_DB_HOST',
@@ -498,7 +549,10 @@ class TestPlc:
                      'PLC_WWW_HOST',
                      'PLC_BOOT_HOST',
                      'PLC_NET_DNS1',
-                     'PLC_NET_DNS2']:
+                     'PLC_NET_DNS2',
+                     'PLC_RESERVATION_GRANULARITY',
+                     'PLC_OMF_ENABLED',
+                     ]:
             fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
         fileconf.write('w\n')
         fileconf.write('q\n')
@@ -519,6 +573,7 @@ class TestPlc:
         return True
         
     def vs_start (self):
+        "start the PLC vserver"
         self.start_guest()
         return True
 
@@ -530,6 +585,7 @@ class TestPlc:
         return True
 
     def clean_keys(self):
+        "removes keys cached in keys/"
         utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
 
     # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
@@ -572,6 +628,7 @@ class TestPlc:
         return True
 
     def clean_all_sites (self):
+        "Delete all sites in PLC, and related objects"
         print 'auth_root',self.auth_root()
         site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
         for site_id in site_ids:
@@ -609,6 +666,68 @@ class TestPlc:
         "delete nodegroups with PLCAPI"
         return self.do_nodegroups("delete")
 
+    YEAR = 365*24*3600
+    @staticmethod
+    def translate_timestamp (start,grain,timestamp):
+        if timestamp < TestPlc.YEAR:    return start+timestamp*grain
+        else:                           return timestamp
+
+    @staticmethod
+    def timestamp_printable (timestamp):
+        return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
+
+    def leases(self):
+        "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
+        now=int(time.time())
+        grain=self.apiserver.GetLeaseGranularity(self.auth_root())
+        print 'API answered grain=',grain
+        start=(now/grain)*grain
+        start += grain
+        # find out all nodes that are reservable
+        nodes=self.all_reservable_nodenames()
+        if not nodes: 
+            utils.header ("No reservable node found - proceeding without leases")
+            return True
+        ok=True
+        # attach them to the leases as specified in plc_specs
+        # this is where the 'leases' field gets interpreted as relative of absolute
+        for lease_spec in self.plc_spec['leases']:
+            # skip the ones that come with a null slice id
+            if not lease_spec['slice']: continue
+            lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
+            lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
+            lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
+                                                    lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
+            if lease_addition['errors']:
+                utils.header("Cannot create leases, %s"%lease_addition['errors'])
+                ok=False
+            else:
+                utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
+                              (nodes,lease_spec['slice'],
+                               lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
+                               lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
+                
+        return ok
+
+    def clean_leases (self):
+        "remove all leases in the myplc side"
+        lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
+        utils.header("Cleaning leases %r"%lease_ids)
+        self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
+        return True
+
+    def list_leases (self):
+        "list all leases known to the myplc"
+        leases = self.apiserver.GetLeases(self.auth_root())
+        now=int(time.time())
+        for l in leases:
+            current=l['t_until']>=now
+            if self.options.verbose or current:
+                utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
+                                                       TestPlc.timestamp_printable(l['t_from']), 
+                                                       TestPlc.timestamp_printable(l['t_until'])))
+        return True
+
     # create nodegroups if needed, and populate
     def do_nodegroups (self, action="add"):
         # 1st pass to scan contents
@@ -675,12 +794,23 @@ class TestPlc:
                     overall=False
         return overall
 
-    def all_hostnames (self) :
-        hostnames = []
+    # return a list of tuples (nodename,qemuname)
+    def all_node_infos (self) :
+        node_infos = []
         for site_spec in self.plc_spec['sites']:
-            hostnames += [ node_spec['node_fields']['hostname'] \
-                           for node_spec in site_spec['nodes'] ]
-        return hostnames
+            node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
+                                for node_spec in site_spec['nodes'] ]
+        return node_infos
+    
+    def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
+    def all_reservable_nodenames (self): 
+        res=[]
+        for site_spec in self.plc_spec['sites']:
+            for node_spec in site_spec['nodes']:
+                node_fields=node_spec['node_fields']
+                if 'node_type' in node_fields and node_fields['node_type']=='reservable':
+                    res.append(node_fields['hostname'])
+        return res
 
     # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
     def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
@@ -742,31 +872,34 @@ class TestPlc:
         else: 
             message="boot"
            local_key = "keys/key1.rsa"
-        tocheck = self.all_hostnames()
-        utils.header("checking ssh access (expected in %s mode) to nodes %r"%(message,tocheck))
+        node_infos = self.all_node_infos()
+        utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
+        for (nodename,qemuname) in node_infos:
+            utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
         utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
                          (timeout_minutes,silent_minutes,period))
-        while tocheck:
-            for hostname in tocheck:
+        while node_infos:
+            for node_info in node_infos:
+                (hostname,qemuname) = node_info
                 # try to run 'hostname' in the node
                 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
                 # don't spam logs - show the command only after the grace period 
                 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
                 if success==0:
                     utils.header('Successfully entered root@%s (%s)'%(hostname,message))
-                    # refresh tocheck
-                    tocheck.remove(hostname)
+                    # refresh node_infos
+                    node_infos.remove(node_info)
                 else:
                     # we will have tried real nodes once, in case they're up - but if not, just skip
                     (site_spec,node_spec)=self.locate_hostname(hostname)
                     if TestNode.is_real_model(node_spec['node_fields']['model']):
                         utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
-                       tocheck.remove(hostname)
-            if  not tocheck:
+                       node_infos.remove(node_info)
+            if  not node_infos:
                 return True
             if datetime.datetime.now() > timeout:
-                for hostname in tocheck:
-                    utils.header("FAILURE to ssh into %s"%hostname)
+                for (hostname,qemuname) in node_infos:
+                    utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
                 return False
             # otherwise, sleep for a while
             time.sleep(period)
@@ -775,11 +908,11 @@ class TestPlc:
         
     def nodes_ssh_debug(self):
         "Tries to ssh into nodes in debug mode with the debug ssh key"
-        return self.check_nodes_ssh(debug=True,timeout_minutes=30,silent_minutes=10)
+        return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
     
     def nodes_ssh_boot(self):
         "Tries to ssh into nodes in production mode with the root ssh key"
-        return self.check_nodes_ssh(debug=False,timeout_minutes=30,silent_minutes=10)
+        return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
     
     @node_mapper
     def init_node (self): 
@@ -942,7 +1075,7 @@ class TestPlc:
         tmpname='%s.sfa-config-tty'%(self.name())
         fileconf=open(tmpname,'w')
         for var in [ 'SFA_REGISTRY_ROOT_AUTH',
-                     'SFA_REGISTRY_LEVEL1_AUTH',
+#                     'SFA_REGISTRY_LEVEL1_AUTH',
                     'SFA_REGISTRY_HOST',
                     'SFA_AGGREGATE_HOST',
                      'SFA_SM_HOST',
@@ -951,7 +1084,8 @@ class TestPlc:
                      'SFA_PLC_DB_HOST',
                      'SFA_PLC_DB_USER',
                      'SFA_PLC_DB_PASSWORD',
-                    'SFA_PLC_URL']:
+                    'SFA_PLC_URL',
+                     ]:
             fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
         fileconf.write('w\n')
         fileconf.write('R\n')
@@ -965,17 +1099,16 @@ class TestPlc:
     def import_sfa(self):
         "sfa-import-plc"
        auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
-        self.run_in_guest('sfa-import-plc.py')
+        return self.run_in_guest('sfa-import-plc.py')==0
 # not needed anymore
 #        self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
-        return True
 
     def start_sfa(self):
         "service sfa start"
-        self.run_in_guest('service sfa start')
-        return True
+        return self.run_in_guest('service sfa start')==0
 
     def setup_sfa(self):
+        sfa_spec=self.plc_spec['sfa']
         "sfi client configuration"
        dir_name=".sfi"
        if os.path.exists(dir_name):
@@ -988,23 +1121,23 @@ class TestPlc:
 
        file_name=dir_name + os.sep + 'sfi_config'
         fileconf=open(file_name,'w')
-       SFI_AUTH=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']+".main"
+       SFI_AUTH="%s.%s"%(sfa_spec['SFA_REGISTRY_ROOT_AUTH'],sfa_spec['login_base'])
         fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
        fileconf.write('\n')
        SFI_USER=SFI_AUTH+'.fake-pi1'
         fileconf.write ("SFI_USER='%s'"%SFI_USER)
        fileconf.write('\n')
-       SFI_REGISTRY='http://' + self.plc_spec['sfa']['SFA_PLC_DB_HOST'] + ':12345/'
+       SFI_REGISTRY='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12345/'
         fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
        fileconf.write('\n')
-       SFI_SM='http://' + self.plc_spec['sfa']['SFA_PLC_DB_HOST'] + ':12347/'
+       SFI_SM='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12347/'
         fileconf.write ("SFI_SM='%s'"%SFI_SM)
        fileconf.write('\n')
         fileconf.close()
 
        file_name=dir_name + os.sep + 'person.xml'
         fileconf=open(file_name,'w')
-       for record in self.plc_spec['sfa']['sfa_person_xml']:
+       for record in sfa_spec['sfa_person_xml']:
           person_record=record
        fileconf.write(person_record)
        fileconf.write('\n')
@@ -1012,9 +1145,9 @@ class TestPlc:
 
        file_name=dir_name + os.sep + 'slice.xml'
         fileconf=open(file_name,'w')
-       for record in self.plc_spec['sfa']['sfa_slice_xml']:
+       for record in sfa_spec['sfa_slice_xml']:
            slice_record=record
-       #slice_record=self.plc_spec['sfa']['sfa_slice_xml']
+       #slice_record=sfa_spec['sfa_slice_xml']
        fileconf.write(slice_record)
        fileconf.write('\n')
         fileconf.close()
@@ -1022,7 +1155,7 @@ class TestPlc:
        file_name=dir_name + os.sep + 'slice.rspec'
         fileconf=open(file_name,'w')
        slice_rspec=''
-       for (key, value) in self.plc_spec['sfa']['sfa_slice_rspec'].items():
+       for (key, value) in sfa_spec['sfa_slice_rspec'].items():
            slice_rspec +=value 
        fileconf.write(slice_rspec)
        fileconf.write('\n')
@@ -1091,8 +1224,7 @@ class TestPlc:
 
     def stop_sfa(self):
         "service sfa stop"
-        self.run_in_guest('service sfa stop')
-        return True
+        return self.run_in_guest('service sfa stop')==0
 
     def populate (self):
         "creates random entries in the PLCAPI"
@@ -1184,19 +1316,21 @@ class TestPlc:
         return "/root/%s-%s.sql"%(database,name)
 
     def db_dump(self):
-        dump=self.dbfile("planetab4")
-        self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
-        utils.header('Dumped planetlab4 database in %s'%dump)
+        'dump the planetlab5 DB in /root in the PLC - filename has time'
+        dump=self.dbfile("planetab5")
+        self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
+        utils.header('Dumped planetlab5 database in %s'%dump)
         return True
 
     def db_restore(self):
-        dump=self.dbfile("planetab4")
+        'restore the planetlab5 DB - looks broken, but run -n might help'
+        dump=self.dbfile("planetab5")
         ##stop httpd service
         self.run_in_guest('service httpd stop')
         # xxx - need another wrapper
-        self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
-        self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
-        self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
+        self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
+        self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
+        self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
         ##starting httpd service
         self.run_in_guest('service httpd start')