add pyOpenSSL to the list of packages to pip2 install for sfa
[tests.git] / system / TestPlc.py
index 9788085..7befc3f 100644 (file)
@@ -1,5 +1,5 @@
 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
-# Copyright (C) 2010 INRIA 
+# Copyright (C) 2010 INRIA
 #
 import sys
 import time
@@ -7,7 +7,6 @@ import os, os.path
 import traceback
 import socket
 from datetime import datetime, timedelta
-from types import StringTypes
 
 import utils
 from Completer import Completer, CompleterTask
@@ -32,7 +31,7 @@ has_sfa_cache_filename="sfa-cache"
 def standby(minutes, dry_run):
     utils.header('Entering StandBy for {:d} mn'.format(minutes))
     if dry_run:
-        print 'dry_run'
+        print('dry_run')
     else:
         time.sleep(60*minutes)
     return True
@@ -49,7 +48,7 @@ def node_mapper(method):
         node_method = TestNode.__dict__[method.__name__]
         for test_node in self.all_nodes():
             if not node_method(test_node, *args, **kwds):
-                overall=False
+                overall = False
         return overall
     # maintain __name__ for ignore_result
     map_on_nodes.__name__ = method.__name__
@@ -92,7 +91,7 @@ def ignore_result(method):
         ref_name = method.__name__.replace('_ignore', '').replace('force_', '')
         ref_method = TestPlc.__dict__[ref_name]
         result = ref_method(self)
-        print "Actual (but ignored) result for {ref_name} is {result}".format(**locals())
+        print("Actual (but ignored) result for {ref_name} is {result}".format(**locals()))
         return Ignored(result)
     name = method.__name__.replace('_ignore', '').replace('force_', '')
     ignoring.__name__ = name
@@ -153,61 +152,73 @@ class TestPlc:
 
     default_steps = [
         'show', SEP,
-        'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
-        'plc_install', 'plc_configure', 'plc_start', SEP,
+        'plcvm_delete', 'plcvm_timestamp', 'plcvm_create', SEP,
+        'django_install', 'plc_install', 'plc_configure', 'plc_start', SEP,
         'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
-        'plcapi_urls','speed_up_slices', SEP,
+        'plcapi_urls', 'speed_up_slices', SEP,
         'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
-# slices created under plcsh interactively seem to be fine but these ones don't have the tags
-# keep this our of the way for now
-        'check_vsys_defaults_ignore', SEP,
-# run this first off so it's easier to re-run on another qemu box        
-        'qemu_kill_mine', SEP,
-        'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
-        'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP,
-        'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
-        'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
-        'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
-        'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1', 
-        'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
-        'sfa_discover@1', 'sfa_rspec@1', 'sfa_allocate@1', 'sfa_provision@1', SEPSFA,
+# ss # slices created under plcsh interactively seem to be fine but these ones don't have the tags
+# ss # keep this out of the way for now
+# ss         'check_vsys_defaults_ignore', SEP,
+# ss # run this first off so it's easier to re-run on another qemu box
+# ss         'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init',
+# ss         'bootcd', 'qemu_local_config', SEP,
+# ss         'qemu_clean_mine', 'qemu_export', 'qemu_cleanlog', SEP,
+# ss         'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
+        'sfa_install_all', 'sfa_configure', 'cross_sfa_configure',
+        'sfa_start', 'sfa_import', SEPSFA,
+        'sfi_configure@1', 'sfa_register_site@1', 'sfa_register_pi@1', SEPSFA,
+        'sfa_register_user@1', 'sfa_update_user@1',
+        'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
+        'sfa_remove_user_from_slice@1', 'sfi_show_slice_researchers@1',
+        'sfa_insert_user_in_slice@1', 'sfi_show_slice_researchers@1', SEPSFA,
+        'sfa_discover@1', 'sfa_rspec@1', SEPSFA,
+        'sfa_allocate@1', 'sfa_provision@1', 'sfa_describe@1', SEPSFA,
         'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
         'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
         # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
         # but as the stress test might take a while, we sometimes missed the debug mode..
-        'probe_kvm_iptables',
-        'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
-        'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts', SEP,
-        'ssh_slice_sfa@1', SEPSFA, 
-        'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1','sfa_check_slice_plc_empty@1', SEPSFA,
+# ss        'probe_kvm_iptables',
+# ss        'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
+# ss        'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', SEP,
+# ss        'ssh_slice_sfa@1', SEPSFA,
+        'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1',
+        'sfa_check_slice_plc_empty@1', SEPSFA,
         'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
-        'cross_check_tcp@1', 'check_system_slice', SEP,
+# ss        'check_system_slice', SEP,
         # for inspecting the slice while it runs the first time
         #'fail',
         # check slices are turned off properly
-        'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
-        # check they are properly re-created with the same name
-        'fill_slices', 'ssh_slice_again', SEP,
+# ss        'debug_nodemanager',
+# ss        'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
+# ss        # check they are properly re-created with the same name
+# ss        'fill_slices', 'ssh_slice_again', SEP,
         'gather_logs_force', SEP,
         ]
-    other_steps = [ 
+    other_steps = [
         'export', 'show_boxes', 'super_speed_up_slices', SEP,
         'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
-        'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
+        'delete_initscripts', 'delete_nodegroups', 'delete_all_sites', SEP,
         'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
         'delete_leases', 'list_leases', SEP,
         'populate', SEP,
-        'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
+        'nodestate_show', 'nodestate_safeboot', 'nodestate_boot', 'nodestate_upgrade', SEP,
+        'nodedistro_show', 'nodedistro_f14', 'nodedistro_f18', SEP,
+        'nodedistro_f20', 'nodedistro_f21', 'nodedistro_f22', SEP,
         'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
-       'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
-        'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
+        'sfa_install_core', 'sfa_install_sfatables',
+        'sfa_install_plc', 'sfa_install_client', SEPSFA,
+        'sfa_plcclean', 'sfa_dbclean', 'sfa_stop', 'sfa_uninstall', 'sfi_clean', SEPSFA,
         'sfa_get_expires', SEPSFA,
-        'plc_db_dump' , 'plc_db_restore', SEP,
-        'check_netflow','check_drl', SEP,
-        'debug_nodemanager', 'slice_fs_present', SEP,
-        'standby_1_through_20','yes','no',SEP,
+        'plc_db_dump', 'plc_db_restore', SEP,
+        'check_netflow', 'check_drl', SEP,
+        # used to be part of default steps but won't work since f27
+        'cross_check_tcp@1',
+        'slice_fs_present', 'check_initscripts', SEP,
+        'standby_1_through_20', 'yes', 'no', SEP,
+        'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
         ]
-    bonding_steps = [
+    default_bonding_steps = [
         'bonding_init_partial',
         'bonding_add_yum',
         'bonding_install_rpms', SEP,
@@ -227,18 +238,19 @@ class TestPlc:
     @staticmethod
     def _has_sfa_cached(rpms_url):
         if os.path.isfile(has_sfa_cache_filename):
-            cached = file(has_sfa_cache_filename).read() == "yes"
+            with open(has_sfa_cache_filename) as cache:
+                cached = cache.read() == "yes"
             utils.header("build provides SFA (cached):{}".format(cached))
             return cached
         # warning, we're now building 'sface' so let's be a bit more picky
         # full builds are expected to return with 0 here
         utils.header("Checking if build provides SFA package...")
-        retcod = os.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
+        retcod = utils.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
         encoded = 'yes' if retcod else 'no'
-        with open(has_sfa_cache_filename,'w')as out:
-            out.write(encoded)
+        with open(has_sfa_cache_filename,'w') as cache:
+            cache.write(encoded)
         return retcod
-        
+
     @staticmethod
     def check_whether_build_has_sfa(rpms_url):
         has_sfa = TestPlc._has_sfa_cached(rpms_url)
@@ -254,16 +266,17 @@ class TestPlc:
                 TestPlc.default_steps.remove(step)
 
     def __init__(self, plc_spec, options):
-       self.plc_spec = plc_spec
+        self.plc_spec = plc_spec
         self.options = options
-       self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
+        self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
         self.vserverip = plc_spec['vserverip']
         self.vservername = plc_spec['vservername']
+        self.vplchostname = self.vservername.split('-')[-1]
         self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
-       self.apiserver = TestApiserver(self.url, options.dry_run)
+        self.apiserver = TestApiserver(self.url, options.dry_run)
         (self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
         (self.ssh_node_debug_timeout, self.ssh_node_debug_silent) = plc_spec['ssh_node_debug_timers']
-        
+
     def has_addresses_api(self):
         return self.apiserver.has_method('AddIpAddress')
 
@@ -280,25 +293,25 @@ class TestPlc:
     # define the API methods on this object through xmlrpc
     # would help, but not strictly necessary
     def connect(self):
-       pass
+        pass
 
     def actual_command_in_guest(self,command, backslash=False):
         raw1 = self.host_to_guest(command)
         raw2 = self.test_ssh.actual_command(raw1, dry_run=self.options.dry_run, backslash=backslash)
         return raw2
-    
+
     def start_guest(self):
       return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),
                                                        dry_run=self.options.dry_run))
-    
+
     def stop_guest(self):
       return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),
                                                        dry_run=self.options.dry_run))
-    
+
     def run_in_guest(self, command, backslash=False):
         raw = self.actual_command_in_guest(command, backslash)
         return utils.system(raw)
-    
+
     def run_in_host(self,command):
         return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
 
@@ -306,16 +319,9 @@ class TestPlc:
     # see e.g. plc_start esp. the version for f14
     #command gets run in the plc's vm
     def host_to_guest(self, command):
-        vservername = self.vservername
-        personality = self.options.personality
-        raw = "{personality} virsh -c lxc:/// lxc-enter-namespace {vservername}".format(**locals())
-        # f14 still needs some extra help
-        if self.options.fcdistro == 'f14':
-            raw +=" -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin {command}".format(**locals())
-        else:
-            raw +=" -- /usr/bin/env {command}".format(**locals())
-        return raw
-    
+        ssh_leg = TestSsh(self.vplchostname)
+        return ssh_leg.actual_command(command, keep_stdin=True)
+
     # this /vservers thing is legacy...
     def vm_root_in_host(self):
         return "/vservers/{}/".format(self.vservername)
@@ -326,63 +332,69 @@ class TestPlc:
     #start/stop the vserver
     def start_guest_in_host(self):
         return "virsh -c lxc:/// start {}".format(self.vservername)
-    
+
     def stop_guest_in_host(self):
         return "virsh -c lxc:/// destroy {}".format(self.vservername)
-    
+
     # xxx quick n dirty
     def run_in_guest_piped(self,local,remote):
         return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),
                                                                      keep_stdin = True))
 
-    def yum_check_installed(self, rpms):
-        if isinstance(rpms, list): 
+    def dnf_check_installed(self, rpms):
+        if isinstance(rpms, list):
             rpms=" ".join(rpms)
         return self.run_in_guest("rpm -q {}".format(rpms)) == 0
-        
+
     # does a yum install in the vs, ignore yum retcod, check with rpm
-    def yum_install(self, rpms):
-        if isinstance(rpms, list): 
+    def dnf_install(self, rpms):
+        if isinstance(rpms, list):
             rpms=" ".join(rpms)
-        self.run_in_guest("yum -y install {}".format(rpms))
+        yum_mode = self.run_in_guest("dnf -y install {}".format(rpms))
+        if yum_mode != 0:
+            self.run_in_guest("dnf -y install --allowerasing {}".format(rpms))
         # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
-        self.run_in_guest("yum-complete-transaction -y")
-        return self.yum_check_installed(rpms)
+        # nothing similar with dnf, forget about this for now
+        # self.run_in_guest("yum-complete-transaction -y")
+        return self.dnf_check_installed(rpms)
+
+    def pip3_install(self, package):
+        return self.run_in_guest("pip3 install {}".format(package)) == 0
 
     def auth_root(self):
-       return {'Username'   : self.plc_spec['settings']['PLC_ROOT_USER'],
-               'AuthMethod' : 'password',
-               'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
+        return {'Username'   : self.plc_spec['settings']['PLC_ROOT_USER'],
+                'AuthMethod' : 'password',
+                'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
                 'Role'       : self.plc_spec['role'],
                 }
-    
+
     def locate_site(self,sitename):
         for site in self.plc_spec['sites']:
             if site['site_fields']['name'] == sitename:
                 return site
             if site['site_fields']['login_base'] == sitename:
                 return site
-        raise Exception,"Cannot locate site {}".format(sitename)
-        
+        raise Exception("Cannot locate site {}".format(sitename))
+
     def locate_node(self, nodename):
         for site in self.plc_spec['sites']:
             for node in site['nodes']:
                 if node['name'] == nodename:
                     return site, node
-        raise Exception, "Cannot locate node {}".format(nodename)
-        
+        raise Exception("Cannot locate node {}".format(nodename))
+
     def locate_hostname(self, hostname):
         for site in self.plc_spec['sites']:
             for node in site['nodes']:
                 if node['node_fields']['hostname'] == hostname:
                     return(site, node)
-        raise Exception,"Cannot locate hostname {}".format(hostname)
-        
+        raise Exception("Cannot locate hostname {}".format(hostname))
+
     def locate_key(self, key_name):
         for key in self.plc_spec['keys']:
             if key['key_name'] == key_name:
                 return key
-        raise Exception,"Cannot locate key {}".format(key_name)
+        raise Exception("Cannot locate key {}".format(key_name))
 
     def locate_private_key_from_key_names(self, key_names):
         # locate the first avail. key
@@ -403,7 +415,7 @@ class TestPlc:
         for slice in self.plc_spec['slices']:
             if slice['slice_fields']['name'] == slicename:
                 return slice
-        raise Exception,"Cannot locate slice {}".format(slicename)
+        raise Exception("Cannot locate slice {}".format(slicename))
 
     def all_sliver_objs(self):
         result = []
@@ -449,24 +461,24 @@ class TestPlc:
         # transform into a dict { 'host_box' -> [ test_node .. ] }
         result = {}
         for (box,node) in tuples:
-            if not result.has_key(box):
+            if box not in result:
                 result[box] = [node]
             else:
                 result[box].append(node)
         return result
-                    
+
     # a step for checking this stuff
     def show_boxes(self):
         'print summary of nodes location'
-        for box,nodes in self.get_BoxNodes().iteritems():
-            print box,":"," + ".join( [ node.name() for node in nodes ] )
+        for box,nodes in self.get_BoxNodes().items():
+            print(box,":"," + ".join( [ node.name() for node in nodes ] ))
         return True
 
     # make this a valid step
     def qemu_kill_all(self):
         'kill all qemu instances on the qemu boxes involved by this setup'
         # this is the brute force version, kill all qemus on that host box
-        for (box,nodes) in self.get_BoxNodes().iteritems():
+        for (box,nodes) in self.get_BoxNodes().items():
             # pass the first nodename, as we don't push template-qemu on testboxes
             nodedir = nodes[0].nodedir()
             TestBoxQemu(box, self.options.buildname).qemu_kill_all(nodedir)
@@ -475,7 +487,7 @@ class TestPlc:
     # make this a valid step
     def qemu_list_all(self):
         'list all qemu instances on the qemu boxes involved by this setup'
-        for box,nodes in self.get_BoxNodes().iteritems():
+        for box,nodes in self.get_BoxNodes().items():
             # this is the brute force version, kill all qemus on that host box
             TestBoxQemu(box, self.options.buildname).qemu_list_all()
         return True
@@ -483,7 +495,7 @@ class TestPlc:
     # kill only the qemus related to this test
     def qemu_list_mine(self):
         'list qemu instances for our nodes'
-        for (box,nodes) in self.get_BoxNodes().iteritems():
+        for (box,nodes) in self.get_BoxNodes().items():
             # the fine-grain version
             for node in nodes:
                 node.list_qemu()
@@ -492,7 +504,7 @@ class TestPlc:
     # kill only the qemus related to this test
     def qemu_clean_mine(self):
         'cleanup (rm -rf) qemu instances for our nodes'
-        for box,nodes in self.get_BoxNodes().iteritems():
+        for box,nodes in self.get_BoxNodes().items():
             # the fine-grain version
             for node in nodes:
                 node.qemu_clean()
@@ -501,7 +513,7 @@ class TestPlc:
     # kill only the right qemus
     def qemu_kill_mine(self):
         'kill the qemu instances for our nodes'
-        for box,nodes in self.get_BoxNodes().iteritems():
+        for box,nodes in self.get_BoxNodes().items():
             # the fine-grain version
             for node in nodes:
                 node.kill_qemu()
@@ -514,33 +526,32 @@ class TestPlc:
         self.show_pass(2)
         return True
 
-    # uggly hack to make sure 'run export' only reports about the 1st plc 
+    # uggly hack to make sure 'run export' only reports about the 1st plc
     # to avoid confusion - also we use 'inri_slice1' in various aliases..
     exported_id = 1
     def export(self):
         "print cut'n paste-able stuff to export env variables to your shell"
         # guess local domain from hostname
-        if TestPlc.exported_id > 1: 
-            print "export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername'])
+        if TestPlc.exported_id > 1:
+            print("export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername']))
             return True
         TestPlc.exported_id += 1
         domain = socket.gethostname().split('.',1)[1]
         fqdn   = "{}.{}".format(self.plc_spec['host_box'], domain)
-        print "export BUILD={}".format(self.options.buildname)
-        print "export PLCHOSTLXC={}".format(fqdn)
-        print "export GUESTNAME={}".format(self.plc_spec['vservername'])
-        vplcname = self.plc_spec['vservername'].split('-')[-1]
-        print "export GUESTHOSTNAME={}.{}".format(vplcname, domain)
+        print("export BUILD={}".format(self.options.buildname))
+        print("export PLCHOSTLXC={}".format(fqdn))
+        print("export GUESTNAME={}".format(self.vservername))
+        print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
         # find hostname of first node
         hostname, qemubox = self.all_node_infos()[0]
-        print "export KVMHOST={}.{}".format(qemubox, domain)
-        print "export NODE={}".format(hostname)
+        print("export KVMHOST={}.{}".format(qemubox, domain))
+        print("export NODE={}".format(hostname))
         return True
 
     # entry point
     always_display_keys=['PLC_WWW_HOST', 'nodes', 'sites']
     def show_pass(self, passno):
-        for (key,val) in self.plc_spec.iteritems():
+        for (key,val) in self.plc_spec.items():
             if not self.options.verbose and key not in TestPlc.always_display_keys:
                 continue
             if passno == 2:
@@ -560,65 +571,65 @@ class TestPlc:
                         self.display_key_spec(key)
             elif passno == 1:
                 if key not in ['sites', 'initscripts', 'slices', 'keys']:
-                    print '+   ', key, ':', val
+                    print('+   ', key, ':', val)
 
     def display_site_spec(self, site):
-        print '+ ======== site', site['site_fields']['name']
-        for k,v in site.iteritems():
+        print('+ ======== site', site['site_fields']['name'])
+        for k,v in site.items():
             if not self.options.verbose and k not in TestPlc.always_display_keys:
                 continue
             if k == 'nodes':
-                if v: 
-                    print '+       ','nodes : ',
-                    for node in v:  
-                        print node['node_fields']['hostname'],'',
-                    print ''
+                if v:
+                    print('+       ', 'nodes : ', end=' ')
+                    for node in v:
+                        print(node['node_fields']['hostname'],'', end=' ')
+                    print('')
             elif k == 'users':
-                if v: 
-                    print '+       users : ',
-                    for user in v:  
-                        print user['name'],'',
-                    print ''
+                if v:
+                    print('+       users : ', end=' ')
+                    for user in v:
+                        print(user['name'],'', end=' ')
+                    print('')
             elif k == 'site_fields':
-                print '+       login_base', ':', v['login_base']
+                print('+       login_base', ':', v['login_base'])
             elif k == 'address_fields':
                 pass
             else:
-                print '+       ',
+                print('+       ', end=' ')
                 utils.pprint(k, v)
-        
+
     def display_initscript_spec(self, initscript):
-        print '+ ======== initscript', initscript['initscript_fields']['name']
+        print('+ ======== initscript', initscript['initscript_fields']['name'])
 
     def display_key_spec(self, key):
-        print '+ ======== key', key['key_name']
+        print('+ ======== key', key['key_name'])
 
     def display_slice_spec(self, slice):
-        print '+ ======== slice', slice['slice_fields']['name']
-        for k,v in slice.iteritems():
+        print('+ ======== slice', slice['slice_fields']['name'])
+        for k,v in slice.items():
             if k == 'nodenames':
-                if v: 
-                    print '+       nodes : ',
-                    for nodename in v:  
-                        print nodename,'',
-                    print ''
+                if v:
+                    print('+       nodes : ', end=' ')
+                    for nodename in v:
+                        print(nodename,'', end=' ')
+                    print('')
             elif k == 'usernames':
-                if v: 
-                    print '+       users : ',
-                    for username in v:  
-                        print username,'',
-                    print ''
+                if v:
+                    print('+       users : ', end=' ')
+                    for username in v:
+                        print(username,'', end=' ')
+                    print('')
             elif k == 'slice_fields':
-                print '+       fields',':',
-                print 'max_nodes=',v['max_nodes'],
-                print ''
+                print('+       fields', ':', end=' ')
+                print('max_nodes=',v['max_nodes'], end=' ')
+                print('')
             else:
-                print '+       ',k,v
+                print('+       ',k,v)
 
     def display_node_spec(self, node):
-        print "+           node={} host_box={}".format(node['name'], node['host_box']),
-        print "hostname=", node['node_fields']['hostname'],
-        print "ip=", node['interface_fields']['ip']
+        print("+           node={} host_box={}".format(node['name'], node['host_box']), end=' ')
+        print("hostname=", node['node_fields']['hostname'], end=' ')
+        print("ip=", node['interface_fields']['ip'])
         if self.options.verbose:
             utils.pprint("node details", node, depth=3)
 
@@ -629,19 +640,19 @@ class TestPlc:
 
     @staticmethod
     def display_mapping_plc(plc_spec):
-        print '+ MyPLC',plc_spec['name']
+        print('+ MyPLC',plc_spec['name'])
         # WARNING this would not be right for lxc-based PLC's - should be harmless though
-        print '+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername'])
-        print '+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip'])
+        print('+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername']))
+        print('+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip']))
         for site_spec in plc_spec['sites']:
             for node_spec in site_spec['nodes']:
                 TestPlc.display_mapping_node(node_spec)
 
     @staticmethod
     def display_mapping_node(node_spec):
-        print '+   NODE {}'.format(node_spec['name'])
-        print '+\tqemu box {}'.format(node_spec['host_box'])
-        print '+\thostname={}'.format(node_spec['node_fields']['hostname'])
+        print('+   NODE {}'.format(node_spec['name']))
+        print('+\tqemu box {}'.format(node_spec['host_box']))
+        print('+\thostname={}'.format(node_spec['node_fields']['hostname']))
 
     # write a timestamp in /vservers/<>.timestamp
     # cannot be inside the vserver, that causes vserver .. build to cough
@@ -654,15 +665,15 @@ class TestPlc:
         stamp_dir = os.path.dirname(stamp_path)
         utils.system(self.test_ssh.actual_command("mkdir -p {}".format(stamp_dir)))
         return utils.system(self.test_ssh.actual_command("echo {:d} > {}".format(now, stamp_path))) == 0
-        
-    # this is called inconditionnally at the beginning of the test sequence 
+
+    # this is called inconditionnally at the beginning of the test sequence
     # just in case this is a rerun, so if the vm is not running it's fine
     def plcvm_delete(self):
         "vserver delete the test myplc"
         stamp_path = self.vm_timestamp_path()
         self.run_in_host("rm -f {}".format(stamp_path))
-        self.run_in_host("virsh -c lxc:// destroy {}".format(self.vservername))
-        self.run_in_host("virsh -c lxc:// undefine {}".format(self.vservername))
+        self.run_in_host("virsh -c lxc:/// destroy {}".format(self.vservername))
+        self.run_in_host("virsh -c lxc:/// undefine {}".format(self.vservername))
         self.run_in_host("rm -fr /vservers/{}".format(self.vservername))
         return True
 
@@ -672,7 +683,7 @@ class TestPlc:
     # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
     def plcvm_create(self):
         "vserver creation (no install done)"
-        # push the local build/ dir to the testplc box 
+        # push the local build/ dir to the testplc box
         if self.is_local():
             # a full path for the local calls
             build_dir = os.path.dirname(sys.argv[0])
@@ -686,11 +697,11 @@ class TestPlc:
             # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
             self.test_ssh.rmdir(build_dir)
             self.test_ssh.copy(build_dir, recursive=True)
-        # the repo url is taken from arch-rpms-url 
+        # the repo url is taken from arch-rpms-url
         # with the last step (i386) removed
         repo_url = self.options.arch_rpms_url
         for level in [ 'arch' ]:
-           repo_url = os.path.dirname(repo_url)
+            repo_url = os.path.dirname(repo_url)
 
         # invoke initvm (drop support for vs)
         script = "lbuild-initvm.sh"
@@ -705,19 +716,26 @@ class TestPlc:
             vserver_hostname = socket.gethostbyaddr(self.vserverip)[0]
             script_options += " -n {}".format(vserver_hostname)
         except:
-            print "Cannot reverse lookup {}".format(self.vserverip)
-            print "This is considered fatal, as this might pollute the test results"
+            print("Cannot reverse lookup {}".format(self.vserverip))
+            print("This is considered fatal, as this might pollute the test results")
             return False
         create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
         return self.run_in_host(create_vserver) == 0
 
-    ### install_rpm 
-    def plc_install(self):
-        "yum install myplc, noderepo, and the plain bootstrapfs"
+    ### install django through pip
+    def django_install(self):
+        # plcapi requires Django, that is no longer provided py fedora as an rpm
+        # so we use pip instead
+        """
+        pip install Django
+        """
+        return self.pip3_install('Django')
 
-        # workaround for getting pgsql8.2 on centos5
-        if self.options.fcdistro == "centos5":
-            self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
+    ### install_rpm
+    def plc_install(self):
+        """
+        yum install myplc, noderepo
+        """
 
         # compute nodefamily
         if self.options.personality == "linux32":
@@ -725,28 +743,64 @@ class TestPlc:
         elif self.options.personality == "linux64":
             arch = "x86_64"
         else:
-            raise Exception, "Unsupported personality {}".format(self.options.personality)
+            raise Exception("Unsupported personality {}".format(self.options.personality))
         nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
 
-        pkgs_list=[]
-        pkgs_list.append("slicerepo-{}".format(nodefamily))
+        # check it's possible to install just 'myplc-core' first
+        if not self.dnf_install("myplc-core"):
+            return False
+
+        pkgs_list = []
         pkgs_list.append("myplc")
-        pkgs_list.append("noderepo-{}".format(nodefamily))
-        pkgs_list.append("nodeimage-{}-plain".format(nodefamily))
+        # pkgs_list.append("slicerepo-{}".format(nodefamily))
+        # pkgs_list.append("noderepo-{}".format(nodefamily))
         pkgs_string=" ".join(pkgs_list)
-        return self.yum_install(pkgs_list)
+        return self.dnf_install(pkgs_list)
+
+    def install_syslinux6(self):
+        """
+        install syslinux6 from the fedora21 release
+        """
+        key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
+
+        rpms = [
+            'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
+            'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
+            'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
+        ]
+        # this can be done several times
+        self.run_in_guest("rpm --import {key}".format(**locals()))
+        return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
+
+    def bonding_builds(self):
+        """
+        list /etc/yum.repos.d on the myplc side
+        """
+        self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
+        return True
+
+    def bonding_nodes(self):
+        """
+        List nodes known to the myplc together with their nodefamiliy
+        """
+        print("---------------------------------------- nodes")
+        for node in self.apiserver.GetNodes(self.auth_root()):
+            print("{} -> {}".format(node['hostname'],
+                                    self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily']))
+        print("---------------------------------------- nodes")
+
 
     ###
     def mod_python(self):
         """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
-        return self.yum_install( ['mod_python'] )
+        return self.dnf_install( ['mod_python'] )
 
-    ### 
+    ###
     def plc_configure(self):
         "run plc-config-tty"
         tmpname = '{}.plc-config-tty'.format(self.name())
         with open(tmpname,'w') as fileconf:
-            for (var,value) in self.plc_spec['settings'].iteritems():
+            for var, value in self.plc_spec['settings'].items():
                 fileconf.write('e {}\n{}\n'.format(var, value))
             fileconf.write('w\n')
             fileconf.write('q\n')
@@ -755,36 +809,18 @@ class TestPlc:
         utils.system('rm {}'.format(tmpname))
         return True
 
-# f14 is a bit odd in this respect, although this worked fine in guests up to f18
-# however using a vplc guest under f20 requires this trick
-# the symptom is this: service plc start
-# Starting plc (via systemctl):  Failed to get D-Bus connection: \
-#    Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
-# weird thing is the doc says f14 uses upstart by default and not systemd
-# so this sounds kind of harmless
-    def start_service(self, service):
-        return self.start_stop_service(service, 'start')
-    def stop_service(self, service):
-        return self.start_stop_service(service, 'stop')
-
-    def start_stop_service(self, service, start_or_stop):
-        "utility to start/stop a service with the special trick for f14"
-        if self.options.fcdistro != 'f14':
-            return self.run_in_guest("service {} {}".format(service, start_or_stop)) == 0
-        else:
-            # patch /sbin/service so it does not reset environment
-            self.run_in_guest('sed -i -e \\"s,env -i,env,\\" /sbin/service')
-            # this is because our own scripts in turn call service 
-            return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service {} {}"\
-                                     .format(service, start_or_stop)) == 0
+    # care only about f>=27
+    def start_stop_systemd(self, service, start_or_stop):
+        "utility to start/stop a systemd-defined service (sfa)"
+        return self.run_in_guest("systemctl {} {}".format(start_or_stop, service)) == 0
 
     def plc_start(self):
-        "service plc start"
-        return self.start_service('plc')
+        "start plc through systemclt"
+        return self.start_stop_systemd('plc', 'start')
 
     def plc_stop(self):
-        "service plc stop"
-        return self.stop_service('plc')
+        "stop plc through systemctl"
+        return self.start_stop_systemd('plc', 'stop')
 
     def plcvm_start(self):
         "start the PLC vserver"
@@ -800,7 +836,7 @@ class TestPlc:
     def keys_store(self):
         "stores test users ssh keys in keys/"
         for key_spec in self.plc_spec['keys']:
-               TestKey(self,key_spec).store_key()
+                TestKey(self,key_spec).store_key()
         return True
 
     def keys_clean(self):
@@ -829,11 +865,11 @@ class TestPlc:
     def sites(self):
         "create sites with PLCAPI"
         return self.do_sites()
-    
+
     def delete_sites(self):
         "delete sites with PLCAPI"
         return self.do_sites(action="delete")
-    
+
     def do_sites(self, action="add"):
         for site_spec in self.plc_spec['sites']:
             test_site = TestSite(self,site_spec)
@@ -851,14 +887,14 @@ class TestPlc:
 
     def delete_all_sites(self):
         "Delete all sites in PLC, and related objects"
-        print 'auth_root', self.auth_root()
-        sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
+        print('auth_root', self.auth_root())
+        sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id', 'login_base'])
         for site in sites:
             # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
             if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
                 continue
             site_id = site['site_id']
-            print 'Deleting site_id', site_id
+            print('Deleting site_id', site_id)
             self.apiserver.DeleteSite(self.auth_root(), site_id)
         return True
 
@@ -897,7 +933,7 @@ class TestPlc:
     @staticmethod
     def translate_timestamp(start, grain, timestamp):
         if timestamp < TestPlc.YEAR:
-            return start+timestamp*grain
+            return start + timestamp*grain
         else:
             return timestamp
 
@@ -909,12 +945,12 @@ class TestPlc:
         "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
         now = int(time.time())
         grain = self.apiserver.GetLeaseGranularity(self.auth_root())
-        print 'API answered grain=', grain
-        start = (now/grain)*grain
+        print('API answered grain=', grain)
+        start = (now//grain)*grain
         start += grain
         # find out all nodes that are reservable
         nodes = self.all_reservable_nodenames()
-        if not nodes: 
+        if not nodes:
             utils.header("No reservable node found - proceeding without leases")
             return True
         ok = True
@@ -927,7 +963,7 @@ class TestPlc:
             lease_spec['t_from']  = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
             lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
             lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
-                                                      lease_spec['t_from'],lease_spec['t_until'])
+                                                      lease_spec['t_from'], lease_spec['t_until'])
             if lease_addition['errors']:
                 utils.header("Cannot create leases, {}".format(lease_addition['errors']))
                 ok = False
@@ -936,7 +972,7 @@ class TestPlc:
                              .format(nodes, lease_spec['slice'],
                                      lease_spec['t_from'],  TestPlc.timestamp_printable(lease_spec['t_from']),
                                      lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
-                
+
         return ok
 
     def delete_leases(self):
@@ -955,7 +991,7 @@ class TestPlc:
             if self.options.verbose or current:
                 utils.header("{} {} from {} until {}"\
                              .format(l['hostname'], l['name'],
-                                     TestPlc.timestamp_printable(l['t_from']), 
+                                     TestPlc.timestamp_printable(l['t_from']),
                                      TestPlc.timestamp_printable(l['t_until'])))
         return True
 
@@ -967,20 +1003,20 @@ class TestPlc:
             test_site = TestSite(self,site_spec)
             for node_spec in site_spec['nodes']:
                 test_node = TestNode(self, test_site, node_spec)
-                if node_spec.has_key('nodegroups'):
+                if 'nodegroups' in node_spec:
                     nodegroupnames = node_spec['nodegroups']
-                    if isinstance(nodegroupnames, StringTypes):
+                    if isinstance(nodegroupnames, str):
                         nodegroupnames = [ nodegroupnames ]
                     for nodegroupname in nodegroupnames:
-                        if not groups_dict.has_key(nodegroupname):
+                        if nodegroupname not in groups_dict:
                             groups_dict[nodegroupname] = []
                         groups_dict[nodegroupname].append(test_node.name())
         auth = self.auth_root()
         overall = True
-        for (nodegroupname,group_nodes) in groups_dict.iteritems():
+        for (nodegroupname,group_nodes) in groups_dict.items():
             if action == "add":
-                print 'nodegroups:', 'dealing with nodegroup',\
-                    nodegroupname, 'on nodes', group_nodes
+                print('nodegroups:', 'dealing with nodegroup',\
+                    nodegroupname, 'on nodes', group_nodes)
                 # first, check if the nodetagtype is here
                 tag_types = self.apiserver.GetTagTypes(auth, {'tagname':nodegroupname})
                 if tag_types:
@@ -990,20 +1026,20 @@ class TestPlc:
                                                             {'tagname' : nodegroupname,
                                                              'description' : 'for nodegroup {}'.format(nodegroupname),
                                                              'category' : 'test'})
-                print 'located tag (type)', nodegroupname, 'as', tag_type_id
+                print('located tag (type)', nodegroupname, 'as', tag_type_id)
                 # create nodegroup
                 nodegroups = self.apiserver.GetNodeGroups(auth, {'groupname' : nodegroupname})
                 if not nodegroups:
                     self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
-                    print 'created nodegroup', nodegroupname, \
-                        'from tagname', nodegroupname, 'and value', 'yes'
+                    print('created nodegroup', nodegroupname, \
+                        'from tagname', nodegroupname, 'and value', 'yes')
                 # set node tag on all nodes, value='yes'
                 for nodename in group_nodes:
                     try:
                         self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
                     except:
                         traceback.print_exc()
-                        print 'node', nodename, 'seems to already have tag', nodegroupname
+                        print('node', nodename, 'seems to already have tag', nodegroupname)
                     # check anyway
                     try:
                         expect_yes = self.apiserver.GetNodeTags(auth,
@@ -1011,15 +1047,15 @@ class TestPlc:
                                                                  'tagname'  : nodegroupname},
                                                                 ['value'])[0]['value']
                         if expect_yes != "yes":
-                            print 'Mismatch node tag on node',nodename,'got',expect_yes
+                            print('Mismatch node tag on node',nodename,'got',expect_yes)
                             overall = False
                     except:
                         if not self.options.dry_run:
-                            print 'Cannot find tag', nodegroupname, 'on node', nodename
+                            print('Cannot find tag', nodegroupname, 'on node', nodename)
                             overall = False
             else:
                 try:
-                    print 'cleaning nodegroup', nodegroupname
+                    print('cleaning nodegroup', nodegroupname)
                     self.apiserver.DeleteNodeGroup(auth, nodegroupname)
                 except:
                     traceback.print_exc()
@@ -1042,10 +1078,10 @@ class TestPlc:
             node_infos += [ (node_spec['node_fields']['hostname'], node_spec['host_box']) \
                                 for node_spec in site_spec['nodes'] ]
         return node_infos
-    
+
     def all_nodenames(self):
         return [ x[0] for x in self.all_node_infos() ]
-    def all_reservable_nodenames(self): 
+    def all_reservable_nodenames(self):
         res = []
         for site_spec in self.plc_spec['sites']:
             for node_spec in site_spec['nodes']:
@@ -1058,7 +1094,7 @@ class TestPlc:
     def nodes_check_boot_state(self, target_boot_state, timeout_minutes,
                                silent_minutes, period_seconds = 15):
         if self.options.dry_run:
-            print 'dry_run'
+            print('dry_run')
             return True
 
         class CompleterTaskBootState(CompleterTask):
@@ -1071,16 +1107,16 @@ class TestPlc:
                     node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(),
                                                             [ self.hostname ],
                                                             ['boot_state'])[0]
-                    self.last_boot_state = node['boot_state'] 
+                    self.last_boot_state = node['boot_state']
                     return self.last_boot_state == target_boot_state
                 except:
                     return False
             def message(self):
                 return "CompleterTaskBootState with node {}".format(self.hostname)
             def failure_epilogue(self):
-                print "node {} in state {} - expected {}"\
-                    .format(self.hostname, self.last_boot_state, target_boot_state)
-                
+                print("node {} in state {} - expected {}"\
+                    .format(self.hostname, self.last_boot_state, target_boot_state))
+
         timeout = timedelta(minutes=timeout_minutes)
         graceout = timedelta(minutes=silent_minutes)
         period   = timedelta(seconds=period_seconds)
@@ -1100,7 +1136,7 @@ class TestPlc:
         return True
 
     # probing nodes
-    def check_nodes_ping(self, timeout_seconds=30, period_seconds=10):
+    def check_nodes_ping(self, timeout_seconds=60, period_seconds=10):
         class CompleterTaskPingNode(CompleterTask):
             def __init__(self, hostname):
                 self.hostname = hostname
@@ -1108,7 +1144,7 @@ class TestPlc:
                 command="ping -c 1 -w 1 {} >& /dev/null".format(self.hostname)
                 return utils.system(command, silent=silent) == 0
             def failure_epilogue(self):
-                print "Cannot ping node with name {}".format(self.hostname)
+                print("Cannot ping node with name {}".format(self.hostname))
         timeout = timedelta(seconds = timeout_seconds)
         graceout = timeout
         period = timedelta(seconds = period_seconds)
@@ -1122,32 +1158,32 @@ class TestPlc:
         return self.check_nodes_ping()
 
     def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
-        # various delays 
+        # various delays
         timeout  = timedelta(minutes=timeout_minutes)
         graceout = timedelta(minutes=silent_minutes)
         period   = timedelta(seconds=period_seconds)
         vservername = self.vservername
-        if debug: 
+        if debug:
             message = "debug"
             completer_message = 'ssh_node_debug'
             local_key = "keys/{vservername}-debug.rsa".format(**locals())
-        else: 
+        else:
             message = "boot"
             completer_message = 'ssh_node_boot'
-           local_key = "keys/key_admin.rsa"
+            local_key = "keys/key_admin.rsa"
         utils.header("checking ssh access to nodes (expected in {} mode)".format(message))
         node_infos = self.all_node_infos()
         tasks = [ CompleterTaskNodeSsh(nodename, qemuname, local_key,
                                         boot_state=message, dry_run=self.options.dry_run) \
                       for (nodename, qemuname) in node_infos ]
         return Completer(tasks, message=completer_message).run(timeout, graceout, period)
-        
+
     def ssh_node_debug(self):
         "Tries to ssh into nodes in debug mode with the debug ssh key"
         return self.check_nodes_ssh(debug = True,
                                     timeout_minutes = self.ssh_node_debug_timeout,
                                     silent_minutes = self.ssh_node_debug_silent)
-    
+
     def ssh_node_boot(self):
         "Tries to ssh into nodes in production mode with the root ssh key"
         return self.check_nodes_ssh(debug = False,
@@ -1157,7 +1193,7 @@ class TestPlc:
     def node_bmlogs(self):
         "Checks that there's a non-empty dir. /var/log/bm/raw"
         return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw")) == 0
-    
+
     @node_mapper
     def qemu_local_init(self): pass
     @node_mapper
@@ -1165,22 +1201,38 @@ class TestPlc:
     @node_mapper
     def qemu_local_config(self): pass
     @node_mapper
+    def qemu_export(self): pass
+    @node_mapper
+    def qemu_cleanlog(self): pass
+    @node_mapper
     def nodestate_reinstall(self): pass
     @node_mapper
+    def nodestate_upgrade(self): pass
+    @node_mapper
     def nodestate_safeboot(self): pass
     @node_mapper
     def nodestate_boot(self): pass
     @node_mapper
     def nodestate_show(self): pass
     @node_mapper
-    def qemu_export(self): pass
-        
+    def nodedistro_f14(self): pass
+    @node_mapper
+    def nodedistro_f18(self): pass
+    @node_mapper
+    def nodedistro_f20(self): pass
+    @node_mapper
+    def nodedistro_f21(self): pass
+    @node_mapper
+    def nodedistro_f22(self): pass
+    @node_mapper
+    def nodedistro_show(self): pass
+
     ### check hooks : invoke scripts from hooks/{node,slice}
-    def check_hooks_node(self): 
+    def check_hooks_node(self):
         return self.locate_first_node().check_hooks()
-    def check_hooks_sliver(self) : 
+    def check_hooks_sliver(self) :
         return self.locate_first_sliver().check_hooks()
-    
+
     def check_hooks(self):
         "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
         return self.check_hooks_node() and self.check_hooks_sliver()
@@ -1196,17 +1248,17 @@ class TestPlc:
             def message(self):
                 return "initscript checker for {}".format(self.test_sliver.name())
             def failure_epilogue(self):
-                print "initscript stamp {} not found in sliver {}"\
-                    .format(self.stamp, self.test_sliver.name())
-            
+                print("initscript stamp {} not found in sliver {}"\
+                    .format(self.stamp, self.test_sliver.name()))
+
         tasks = []
         for slice_spec in self.plc_spec['slices']:
-            if not slice_spec.has_key('initscriptstamp'):
+            if 'initscriptstamp' not in slice_spec:
                 continue
             stamp = slice_spec['initscriptstamp']
             slicename = slice_spec['slice_fields']['name']
             for nodename in slice_spec['nodenames']:
-                print 'nodename', nodename, 'slicename', slicename, 'stamp', stamp
+                print('nodename', nodename, 'slicename', slicename, 'stamp', stamp)
                 site,node = self.locate_node(nodename)
                 # xxx - passing the wrong site - probably harmless
                 test_site = TestSite(self, site)
@@ -1216,11 +1268,11 @@ class TestPlc:
                 tasks.append(CompleterTaskInitscript(test_sliver, stamp))
         return Completer(tasks, message='check_initscripts').\
             run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
-           
+
     def check_initscripts(self):
         "check that the initscripts have triggered"
         return self.do_check_initscripts()
-    
+
     def initscripts(self):
         "create initscripts with PLCAPI"
         for initscript in self.plc_spec['initscripts']:
@@ -1232,12 +1284,12 @@ class TestPlc:
         "delete initscripts with PLCAPI"
         for initscript in self.plc_spec['initscripts']:
             initscript_name = initscript['initscript_fields']['name']
-            print('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name']))
+            print(('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name'])))
             try:
                 self.apiserver.DeleteInitScript(self.auth_root(), initscript_name)
-                print initscript_name, 'deleted'
+                print(initscript_name, 'deleted')
             except:
-                print 'deletion went wrong - probably did not exist'
+                print('deletion went wrong - probably did not exist')
         return True
 
     ### manage slices
@@ -1271,7 +1323,7 @@ class TestPlc:
             else:
                 test_slice.create_slice()
         return True
-        
+
     @slice_mapper__tasks(20, 10, 15)
     def ssh_slice(self): pass
     @slice_mapper__tasks(20, 19, 15)
@@ -1293,8 +1345,11 @@ class TestPlc:
 
     @node_mapper
     def keys_clear_known_hosts(self): pass
-    
+
     def plcapi_urls(self):
+        """
+        attempts to reach the PLCAPI with various forms for the URL
+        """
         return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
 
     def speed_up_slices(self):
@@ -1305,7 +1360,7 @@ class TestPlc:
         return self._speed_up_slices(5, 1)
 
     def _speed_up_slices(self, p, r):
-        # create the template on the server-side 
+        # create the template on the server-side
         template = "{}.nodemanager".format(self.name())
         with open(template,"w") as template_file:
             template_file.write('OPTIONS="-p {} -r {} -d"\n'.format(p, r))
@@ -1337,6 +1392,9 @@ class TestPlc:
     @node_mapper
     def qemu_timestamp(self) : pass
 
+    @node_mapper
+    def qemu_nodefamily(self): pass
+
     # when a spec refers to a node possibly on another plc
     def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
         for plc in [ self ] + other_plcs:
@@ -1344,13 +1402,13 @@ class TestPlc:
                 return plc.locate_sliver_obj(nodename, slicename)
             except:
                 pass
-        raise Exception, "Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename)
+        raise Exception("Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename))
 
     # implement this one as a cross step so that we can take advantage of different nodes
     # in multi-plcs mode
     def cross_check_tcp(self, other_plcs):
         "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
-        if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']: 
+        if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
             utils.header("check_tcp: no/empty config found")
             return True
         specs = self.plc_spec['tcp_specs']
@@ -1365,7 +1423,7 @@ class TestPlc:
             def message(self):
                 return "network ready checker for {}".format(self.test_sliver.name())
             def failure_epilogue(self):
-                print "could not bind port from sliver {}".format(self.test_sliver.name())
+                print("could not bind port from sliver {}".format(self.test_sliver.name()))
 
         sliver_specs = {}
         tasks = []
@@ -1391,7 +1449,7 @@ class TestPlc:
         if not Completer(tasks, message='check for network readiness in slivers').\
            run(timedelta(seconds=30), timedelta(seconds=24), period=timedelta(seconds=5)):
             return False
-            
+
         # run server and client
         for spec in specs:
             port = spec['port']
@@ -1399,7 +1457,8 @@ class TestPlc:
             # the issue here is that we have the server run in background
             # and so we have no clue if it took off properly or not
             # looks like in some cases it does not
-            if not spec['s_sliver'].run_tcp_server(port, timeout=20):
+            address = spec['s_sliver'].test_node.name()
+            if not spec['s_sliver'].run_tcp_server(address, port, timeout=20):
                 overall = False
                 break
 
@@ -1414,13 +1473,13 @@ class TestPlc:
         return overall
 
     # painfully enough, we need to allow for some time as netflow might show up last
-    def check_system_slice(self): 
+    def check_system_slice(self):
         "all nodes: check that a system slice is alive"
         # netflow currently not working in the lxc distro
         # drl not built at all in the wtx distro
         # if we find either of them we're happy
         return self.check_netflow() or self.check_drl()
-    
+
     # expose these
     def check_netflow(self): return self._check_system_slice('netflow')
     def check_drl(self): return self._check_system_slice('drl')
@@ -1428,15 +1487,15 @@ class TestPlc:
     # we have the slices up already here, so it should not take too long
     def _check_system_slice(self, slicename, timeout_minutes=5, period_seconds=15):
         class CompleterTaskSystemSlice(CompleterTask):
-            def __init__(self, test_node, dry_run): 
+            def __init__(self, test_node, dry_run):
                 self.test_node = test_node
                 self.dry_run = dry_run
-            def actual_run(self): 
+            def actual_run(self):
                 return self.test_node._check_system_slice(slicename, dry_run=self.dry_run)
-            def message(self): 
+            def message(self):
                 return "System slice {} @ {}".format(slicename, self.test_node.name())
-            def failure_epilogue(self): 
-                print "COULD not find system slice {} @ {}".format(slicename, self.test_node.name())
+            def failure_epilogue(self):
+                print("COULD not find system slice {} @ {}".format(slicename, self.test_node.name()))
         timeout = timedelta(minutes=timeout_minutes)
         silent  = timedelta(0)
         period  = timedelta(seconds=period_seconds)
@@ -1460,21 +1519,82 @@ class TestPlc:
     # in particular runs with --preserve (dont cleanup) and without --check
     # also it gets run twice, once with the --foreign option for creating fake foreign entries
 
+    def install_pip2(self):
+
+        # xxx could make sense to mirror this one
+
+        replacements = [
+            "https://acc.dl.osdn.jp/storage/g/u/un/unitedrpms/32/x86_64/python2-pip-19.1.1-7.fc32.noarch.rpm",
+        ]
+
+        return (
+               self.run_in_guest("pip2 --version") == 0
+            or self.run_in_guest("dnf install python2-pip") == 0
+            or self.run_in_guest("dnf localinstall -y " + " ".join(replacements)) == 0)
+
+
+    def install_m2crypto(self):
+
+        # installing m2crypto for python2 is increasingly difficult
+        # f29 and f31: dnf install python2-m2crypto
+        # f33: no longer available but the f31 repos below do the job just fine
+        # note that using pip2 does not look like a viable option because it does
+        # an install from sources and that's quite awkward
+
+        replacements = [
+            "http://mirror.onelab.eu/fedora/releases/31/Everything/x86_64/os/Packages/p/python2-typing-3.6.2-5.fc31.noarch.rpm",
+            "http://mirror.onelab.eu/fedora/releases/31/Everything/x86_64/os/Packages/p/python2-m2crypto-0.35.2-2.fc31.x86_64.rpm",
+        ]
+
+        return (
+               self.run_in_guest('python2 -c "import M2Crypto"', backslash=True) == 0
+            or self.run_in_guest("pip2 install python2-m2crypto") == 0
+            or self.run_in_guest("dnf localinstall -y " + " ".join(replacements)) == 0)
+
+        # about pip2:
+        # we can try and use
+        # that qould then need to be mirrored
+        # so the logic goes like this
+        # check for pip2 command
+        # if not, try dnf install python2-pip
+        # if still not, dnf localinstall the above
+
+
     def sfa_install_all(self):
         "yum install sfa sfa-plc sfa-sfatables sfa-client"
-        return self.yum_install("sfa sfa-plc sfa-sfatables sfa-client")
+
+        # the rpm/dnf packages named in python2-* are getting deprecated
+        # we use pip2 instead
+        # but that's not good for m2crypto
+
+        pip_dependencies = [
+            'sqlalchemy-migrate',
+            'lxml',
+            'python-dateutil',
+            'psycopg2-binary',
+            'pyOpenSSL',
+        ]
+
+        return (
+                    self.install_pip2()
+                and self.install_m2crypto()
+                and all((self.run_in_guest(f"pip2 install {dep}") == 0)
+                        for dep in pip_dependencies)
+                and self.dnf_install("sfa sfa-plc sfa-sfatables sfa-client")
+                and self.run_in_guest("systemctl enable sfa-registry")==0
+                and self.run_in_guest("systemctl enable sfa-aggregate")==0)
 
     def sfa_install_core(self):
         "yum install sfa"
-        return self.yum_install("sfa")
-        
+        return self.dnf_install("sfa")
+
     def sfa_install_plc(self):
         "yum install sfa-plc"
-        return self.yum_install("sfa-plc")
-        
+        return self.dnf_install("sfa-plc")
+
     def sfa_install_sfatables(self):
         "yum install sfa-sfatables"
-        return self.yum_install("sfa-sfatables")
+        return self.dnf_install("sfa-sfatables")
 
     # for some very odd reason, this sometimes fails with the following symptom
     # # yum install sfa-client
@@ -1489,7 +1609,7 @@ class TestPlc:
     # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
     # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
     # even though in the same context I have
-    # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h 
+    # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
     # Filesystem            Size  Used Avail Use% Mounted on
     # /dev/hdv1             806G  264G  501G  35% /
     # none                   16M   36K   16M   1% /tmp
@@ -1497,23 +1617,23 @@ class TestPlc:
     # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
     def sfa_install_client(self):
         "yum install sfa-client"
-        first_try = self.yum_install("sfa-client")
+        first_try = self.dnf_install("sfa-client")
         if first_try:
             return True
         utils.header("********** Regular yum failed - special workaround in place, 2nd chance")
         code, cached_rpm_path = \
                 utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
         utils.header("rpm_path=<<{}>>".format(rpm_path))
-        # just for checking 
+        # just for checking
         self.run_in_guest("rpm -i {}".format(cached_rpm_path))
-        return self.yum_check_installed("sfa-client")
+        return self.dnf_check_installed("sfa-client")
 
     def sfa_dbclean(self):
         "thoroughly wipes off the SFA database"
         return self.run_in_guest("sfaadmin reg nuke") == 0 or \
             self.run_in_guest("sfa-nuke.py") == 0 or \
             self.run_in_guest("sfa-nuke-plc.py") == 0 or \
-            self.run_in_guest("sfaadmin registry nuke") == 0             
+            self.run_in_guest("sfaadmin registry nuke") == 0
 
     def sfa_fsclean(self):
         "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
@@ -1522,7 +1642,7 @@ class TestPlc:
 
     def sfa_plcclean(self):
         "cleans the PLC entries that were created as a side effect of running the script"
-        # ignore result 
+        # ignore result
         sfa_spec = self.plc_spec['sfa']
 
         for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
@@ -1530,19 +1650,19 @@ class TestPlc:
             try:
                 self.apiserver.DeleteSite(self.auth_root(),login_base)
             except:
-                print "Site {} already absent from PLC db".format(login_base)
+                print("Site {} already absent from PLC db".format(login_base))
 
-            for spec_name in ['pi_spec','user_spec']:
+            for spec_name in ['pi_spec', 'user_spec']:
                 user_spec = auth_sfa_spec[spec_name]
                 username = user_spec['email']
                 try:
                     self.apiserver.DeletePerson(self.auth_root(),username)
-                except: 
+                except:
                     # this in fact is expected as sites delete their members
                     #print "User {} already absent from PLC db".format(username)
                     pass
 
-        print "REMEMBER TO RUN sfa_import AGAIN"
+        print("REMEMBER TO RUN sfa_import AGAIN")
         return True
 
     def sfa_uninstall(self):
@@ -1551,7 +1671,7 @@ class TestPlc:
         self.run_in_guest("rm -rf /var/lib/sfa")
         self.run_in_guest("rm -rf /etc/sfa")
         self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
-        # xxx tmp 
+        # xxx tmp
         self.run_in_guest("rpm -e --noscripts sfa-plc")
         return True
 
@@ -1566,10 +1686,10 @@ class TestPlc:
     # if the yum install phase fails, consider the test is successful
     # other combinations will eventually run it hopefully
     def sfa_utest(self):
-        "yum install sfa-tests and run SFA unittests"
-        self.run_in_guest("yum -y install sfa-tests")
+        "dnf install sfa-tests and run SFA unittests"
+        self.run_in_guest("dnf -y install sfa-tests")
         # failed to install - forget it
-        if self.run_in_guest("rpm -q sfa-tests") != 0: 
+        if self.run_in_guest("rpm -q sfa-tests") != 0:
             utils.header("WARNING: SFA unit tests failed to install, ignoring")
             return True
         return self.run_in_guest("/usr/share/sfa/tests/testAll.py") == 0
@@ -1580,7 +1700,7 @@ class TestPlc:
         if not os.path.isdir(dirname):
             utils.system("mkdir -p {}".format(dirname))
         if not os.path.isdir(dirname):
-            raise Exception,"Cannot create config dir for plc {}".format(self.name())
+            raise Exception("Cannot create config dir for plc {}".format(self.name()))
         return dirname
 
     def conffile(self, filename):
@@ -1589,22 +1709,22 @@ class TestPlc:
         subdirname = "{}/{}".format(self.confdir(), dirname)
         if clean:
             utils.system("rm -rf {}".format(subdirname))
-        if not os.path.isdir(subdirname): 
+        if not os.path.isdir(subdirname):
             utils.system("mkdir -p {}".format(subdirname))
         if not dry_run and not os.path.isdir(subdirname):
             raise "Cannot create config subdir {} for plc {}".format(dirname, self.name())
         return subdirname
-        
+
     def conffile_clean(self, filename):
         filename=self.conffile(filename)
         return utils.system("rm -rf {}".format(filename))==0
-    
+
     ###
     def sfa_configure(self):
         "run sfa-config-tty"
         tmpname = self.conffile("sfa-config-tty")
         with open(tmpname,'w') as fileconf:
-            for (var,value) in self.plc_spec['sfa']['settings'].iteritems():
+            for var, value in self.plc_spec['sfa']['settings'].items():
                 fileconf.write('e {}\n{}\n'.format(var, value))
             fileconf.write('w\n')
             fileconf.write('R\n')
@@ -1647,16 +1767,18 @@ class TestPlc:
     def sfa_import(self):
         "use sfaadmin to import from plc"
         auth = self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
-        return self.run_in_guest('sfaadmin reg import_registry') == 0 
+        return self.run_in_guest('sfaadmin reg import_registry') == 0
 
     def sfa_start(self):
-        "service sfa start"
-        return self.start_service('sfa')
+        "start SFA through systemctl - also install dependencies"
+
+        return (self.start_stop_systemd('sfa-registry', 'start')
+            and self.start_stop_systemd('sfa-aggregate', 'start'))
 
 
     def sfi_configure(self):
         "Create /root/sfi on the plc side for sfi client configuration"
-        if self.options.dry_run: 
+        if self.options.dry_run:
             utils.header("DRY RUN - skipping step")
             return True
         sfa_spec = self.plc_spec['sfa']
@@ -1720,6 +1842,8 @@ class TestPlc:
     @auth_sfa_mapper
     def sfa_provision_empty(self): pass
     @auth_sfa_mapper
+    def sfa_describe(self): pass
+    @auth_sfa_mapper
     def sfa_check_slice_plc(self): pass
     @auth_sfa_mapper
     def sfa_check_slice_plc_empty(self): pass
@@ -1745,8 +1869,9 @@ class TestPlc:
     def sfa_delete_slice(self): pass
 
     def sfa_stop(self):
-        "service sfa stop"
-        return self.stop_service('sfa')
+        "stop sfa through systemclt"
+        return (self.start_stop_systemd('sfa-aggregate', 'stop') and
+                self.start_stop_systemd('sfa-registry', 'stop'))
 
     def populate(self):
         "creates random entries in the PLCAPI"
@@ -1784,26 +1909,26 @@ class TestPlc:
         # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
         # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
         # (1.a)
-        print "-------------------- TestPlc.gather_logs : PLC's /var/log"
+        print("-------------------- TestPlc.gather_logs : PLC's /var/log")
         self.gather_var_logs()
         # (1.b)
-        print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
+        print("-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/")
         self.gather_pgsql_logs()
         # (1.c)
-        print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
+        print("-------------------- TestPlc.gather_logs : PLC's /root/sfi/")
         self.gather_root_sfi()
-        # (2) 
-        print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
+        # (2)
+        print("-------------------- TestPlc.gather_logs : nodes's QEMU logs")
         for site_spec in self.plc_spec['sites']:
             test_site = TestSite(self,site_spec)
             for node_spec in site_spec['nodes']:
                 test_node = TestNode(self, test_site, node_spec)
                 test_node.gather_qemu_logs()
         # (3)
-        print "-------------------- TestPlc.gather_logs : nodes's /var/log"
+        print("-------------------- TestPlc.gather_logs : nodes's /var/log")
         self.gather_nodes_var_logs()
         # (4)
-        print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
+        print("-------------------- TestPlc.gather_logs : sample sliver's /var/log")
         self.gather_slivers_var_logs()
         return True
 
@@ -1817,7 +1942,7 @@ class TestPlc:
 
     def gather_var_logs(self):
         utils.system("mkdir -p logs/myplc.var-log.{}".format(self.name()))
-        to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")        
+        to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
         command = to_plc + "| tar -C logs/myplc.var-log.{} -xf -".format(self.name())
         utils.system(command)
         command = "chmod a+r,a+x logs/myplc.var-log.{}/httpd".format(self.name())
@@ -1825,13 +1950,13 @@ class TestPlc:
 
     def gather_pgsql_logs(self):
         utils.system("mkdir -p logs/myplc.pgsql-log.{}".format(self.name()))
-        to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")        
+        to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
         command = to_plc + "| tar -C logs/myplc.pgsql-log.{} -xf -".format(self.name())
         utils.system(command)
 
     def gather_root_sfi(self):
         utils.system("mkdir -p logs/sfi.{}".format(self.name()))
-        to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")        
+        to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
         command = to_plc + "| tar -C logs/sfi.{} -xf -".format(self.name())
         utils.system(command)
 
@@ -1852,7 +1977,7 @@ class TestPlc:
         # uses options.dbname if it is found
         try:
             name = self.options.dbname
-            if not isinstance(name, StringTypes):
+            if not isinstance(name, str):
                 raise Exception
         except:
             t = datetime.now()
@@ -1870,14 +1995,13 @@ class TestPlc:
     def plc_db_restore(self):
         'restore the planetlab5 DB - looks broken, but run -n might help'
         dump = self.dbfile("planetab5")
-        ##stop httpd service
-        self.run_in_guest('service httpd stop')
+        self.run_in_guest('systemctl stop httpd')
         # xxx - need another wrapper
         self.run_in_guest_piped('echo drop database planetlab5', 'psql --user=pgsqluser template1')
         self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
         self.run_in_guest('psql -U pgsqluser planetlab5 -f ' + dump)
         ##starting httpd service
-        self.run_in_guest('service httpd start')
+        self.run_in_guest('systemctl start httpd')
 
         utils.header('Database restored from ' + dump)
 
@@ -1888,7 +2012,7 @@ class TestPlc:
             if '@' in step:
                 step, qualifier = step.split('@')
             # or be defined as forced or ignored by default
-            for keyword in ['_ignore','_force']:
+            for keyword in ['_ignore', '_force']:
                 if step.endswith(keyword):
                     step=step.replace(keyword,'')
             if step == SEP or step == SEPSFA :
@@ -1898,54 +2022,54 @@ class TestPlc:
             wrapped = ignore_result(method)
 #            wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
             setattr(TestPlc, name, wrapped)
-            
+
 #    @ignore_result
 #    def ssh_slice_again_ignore (self): pass
 #    @ignore_result
 #    def check_initscripts_ignore (self): pass
-    
+
     def standby_1_through_20(self):
         """convenience function to wait for a specified number of minutes"""
         pass
-    @standby_generic 
+    @standby_generic
     def standby_1(): pass
-    @standby_generic 
+    @standby_generic
     def standby_2(): pass
-    @standby_generic 
+    @standby_generic
     def standby_3(): pass
-    @standby_generic 
+    @standby_generic
     def standby_4(): pass
-    @standby_generic 
+    @standby_generic
     def standby_5(): pass
-    @standby_generic 
+    @standby_generic
     def standby_6(): pass
-    @standby_generic 
+    @standby_generic
     def standby_7(): pass
-    @standby_generic 
+    @standby_generic
     def standby_8(): pass
-    @standby_generic 
+    @standby_generic
     def standby_9(): pass
-    @standby_generic 
+    @standby_generic
     def standby_10(): pass
-    @standby_generic 
+    @standby_generic
     def standby_11(): pass
-    @standby_generic 
+    @standby_generic
     def standby_12(): pass
-    @standby_generic 
+    @standby_generic
     def standby_13(): pass
-    @standby_generic 
+    @standby_generic
     def standby_14(): pass
-    @standby_generic 
+    @standby_generic
     def standby_15(): pass
-    @standby_generic 
+    @standby_generic
     def standby_16(): pass
-    @standby_generic 
+    @standby_generic
     def standby_17(): pass
-    @standby_generic 
+    @standby_generic
     def standby_18(): pass
-    @standby_generic 
+    @standby_generic
     def standby_19(): pass
-    @standby_generic 
+    @standby_generic
     def standby_20(): pass
 
     # convenience for debugging the test logic