no more lxc-enter-namespace - use ssh
[tests.git] / system / TestPlc.py
index a60f619..b61cce2 100644 (file)
@@ -7,7 +7,6 @@ import os, os.path
 import traceback
 import socket
 from datetime import datetime, timedelta
-from types import StringTypes
 
 import utils
 from Completer import Completer, CompleterTask
@@ -30,9 +29,9 @@ has_sfa_cache_filename="sfa-cache"
 # step methods must take (self) and return a boolean (options is a member of the class)
 
 def standby(minutes, dry_run):
-    utils.header('Entering StandBy for %d mn'%minutes)
+    utils.header('Entering StandBy for {:d} mn'.format(minutes))
     if dry_run:
-        print 'dry_run'
+        print('dry_run')
     else:
         time.sleep(60*minutes)
     return True
@@ -92,7 +91,7 @@ def ignore_result(method):
         ref_name = method.__name__.replace('_ignore', '').replace('force_', '')
         ref_method = TestPlc.__dict__[ref_name]
         result = ref_method(self)
-        print "Actual (but ignored) result for %(ref_name)s is %(result)s" % locals()
+        print("Actual (but ignored) result for {ref_name} is {result}".format(**locals()))
         return Ignored(result)
     name = method.__name__.replace('_ignore', '').replace('force_', '')
     ignoring.__name__ = name
@@ -199,7 +198,7 @@ class TestPlc:
         'populate', SEP,
         'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
         'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
-       'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
+        'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
         'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
         'sfa_get_expires', SEPSFA,
         'plc_db_dump' , 'plc_db_restore', SEP,
@@ -227,16 +226,17 @@ class TestPlc:
     @staticmethod
     def _has_sfa_cached(rpms_url):
         if os.path.isfile(has_sfa_cache_filename):
-            cached = file(has_sfa_cache_filename).read() == "yes"
-            utils.header("build provides SFA (cached):%s" % cached)
+            with open(has_sfa_cache_filename) as cache:
+                cached = cache.read() == "yes"
+            utils.header("build provides SFA (cached):{}".format(cached))
             return cached
         # warning, we're now building 'sface' so let's be a bit more picky
         # full builds are expected to return with 0 here
         utils.header("Checking if build provides SFA package...")
-        retcod = os.system("curl --silent %s/ | grep -q sfa-"%rpms_url) == 0
+        retcod = utils.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
         encoded = 'yes' if retcod else 'no'
-        with open(has_sfa_cache_filename,'w')as out:
-            out.write(encoded)
+        with open(has_sfa_cache_filename,'w') as cache:
+            cache.write(encoded)
         return retcod
         
     @staticmethod
@@ -254,13 +254,14 @@ class TestPlc:
                 TestPlc.default_steps.remove(step)
 
     def __init__(self, plc_spec, options):
-       self.plc_spec = plc_spec
+        self.plc_spec = plc_spec
         self.options = options
-       self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
+        self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
         self.vserverip = plc_spec['vserverip']
         self.vservername = plc_spec['vservername']
-        self.url = "https://%s:443/PLCAPI/" % plc_spec['vserverip']
-       self.apiserver = TestApiserver(self.url, options.dry_run)
+        self.vplchostname = self.vservername.split('-')[-1]
+        self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
+        self.apiserver = TestApiserver(self.url, options.dry_run)
         (self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
         (self.ssh_node_debug_timeout, self.ssh_node_debug_silent) = plc_spec['ssh_node_debug_timers']
         
@@ -269,7 +270,7 @@ class TestPlc:
 
     def name(self):
         name = self.plc_spec['name']
-        return "%s.%s" % (name,self.vservername)
+        return "{}.{}".format(name,self.vservername)
 
     def hostname(self):
         return self.plc_spec['host_box']
@@ -280,7 +281,7 @@ class TestPlc:
     # define the API methods on this object through xmlrpc
     # would help, but not strictly necessary
     def connect(self):
-       pass
+        pass
 
     def actual_command_in_guest(self,command, backslash=False):
         raw1 = self.host_to_guest(command)
@@ -306,29 +307,22 @@ class TestPlc:
     # see e.g. plc_start esp. the version for f14
     #command gets run in the plc's vm
     def host_to_guest(self, command):
-        vservername = self.vservername
-        personality = self.options.personality
-        raw = "%(personality)s virsh -c lxc:/// lxc-enter-namespace %(vservername)s" % locals()
-        # f14 still needs some extra help
-        if self.options.fcdistro == 'f14':
-            raw +=" -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %(command)s" % locals()
-        else:
-            raw +=" -- /usr/bin/env %(command)s" % locals()
-        return raw
+        ssh_leg = TestSsh(self.vplchostname)
+        return ssh_leg.actual_command(command)
     
     # this /vservers thing is legacy...
     def vm_root_in_host(self):
-        return "/vservers/%s/" % (self.vservername)
+        return "/vservers/{}/".format(self.vservername)
 
     def vm_timestamp_path(self):
-        return "/vservers/%s/%s.timestamp" % (self.vservername,self.vservername)
+        return "/vservers/{}/{}.timestamp".format(self.vservername, self.vservername)
 
     #start/stop the vserver
     def start_guest_in_host(self):
-        return "virsh -c lxc:/// start %s" % (self.vservername)
+        return "virsh -c lxc:/// start {}".format(self.vservername)
     
     def stop_guest_in_host(self):
-        return "virsh -c lxc:/// destroy %s" % (self.vservername)
+        return "virsh -c lxc:/// destroy {}".format(self.vservername)
     
     # xxx quick n dirty
     def run_in_guest_piped(self,local,remote):
@@ -338,21 +332,21 @@ class TestPlc:
     def yum_check_installed(self, rpms):
         if isinstance(rpms, list): 
             rpms=" ".join(rpms)
-        return self.run_in_guest("rpm -q %s"%rpms) == 0
+        return self.run_in_guest("rpm -q {}".format(rpms)) == 0
         
     # does a yum install in the vs, ignore yum retcod, check with rpm
     def yum_install(self, rpms):
         if isinstance(rpms, list): 
             rpms=" ".join(rpms)
-        self.run_in_guest("yum -y install %s" % rpms)
+        self.run_in_guest("yum -y install {}".format(rpms))
         # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
         self.run_in_guest("yum-complete-transaction -y")
         return self.yum_check_installed(rpms)
 
     def auth_root(self):
-       return {'Username'   : self.plc_spec['settings']['PLC_ROOT_USER'],
-               'AuthMethod' : 'password',
-               'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
+        return {'Username'   : self.plc_spec['settings']['PLC_ROOT_USER'],
+                'AuthMethod' : 'password',
+                'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
                 'Role'       : self.plc_spec['role'],
                 }
     
@@ -362,27 +356,27 @@ class TestPlc:
                 return site
             if site['site_fields']['login_base'] == sitename:
                 return site
-        raise Exception,"Cannot locate site %s" % sitename
+        raise Exception("Cannot locate site {}".format(sitename))
         
     def locate_node(self, nodename):
         for site in self.plc_spec['sites']:
             for node in site['nodes']:
                 if node['name'] == nodename:
                     return site, node
-        raise Exception, "Cannot locate node %s" % nodename
+        raise Exception("Cannot locate node {}".format(nodename))
         
     def locate_hostname(self, hostname):
         for site in self.plc_spec['sites']:
             for node in site['nodes']:
                 if node['node_fields']['hostname'] == hostname:
                     return(site, node)
-        raise Exception,"Cannot locate hostname %s" % hostname
+        raise Exception("Cannot locate hostname {}".format(hostname))
         
     def locate_key(self, key_name):
         for key in self.plc_spec['keys']:
             if key['key_name'] == key_name:
                 return key
-        raise Exception,"Cannot locate key %s" % key_name
+        raise Exception("Cannot locate key {}".format(key_name))
 
     def locate_private_key_from_key_names(self, key_names):
         # locate the first avail. key
@@ -403,7 +397,7 @@ class TestPlc:
         for slice in self.plc_spec['slices']:
             if slice['slice_fields']['name'] == slicename:
                 return slice
-        raise Exception,"Cannot locate slice %s" % slicename
+        raise Exception("Cannot locate slice {}".format(slicename))
 
     def all_sliver_objs(self):
         result = []
@@ -449,7 +443,7 @@ class TestPlc:
         # transform into a dict { 'host_box' -> [ test_node .. ] }
         result = {}
         for (box,node) in tuples:
-            if not result.has_key(box):
+            if box not in result:
                 result[box] = [node]
             else:
                 result[box].append(node)
@@ -458,15 +452,15 @@ class TestPlc:
     # a step for checking this stuff
     def show_boxes(self):
         'print summary of nodes location'
-        for box,nodes in self.get_BoxNodes().iteritems():
-            print box,":"," + ".join( [ node.name() for node in nodes ] )
+        for box,nodes in self.get_BoxNodes().items():
+            print(box,":"," + ".join( [ node.name() for node in nodes ] ))
         return True
 
     # make this a valid step
     def qemu_kill_all(self):
         'kill all qemu instances on the qemu boxes involved by this setup'
         # this is the brute force version, kill all qemus on that host box
-        for (box,nodes) in self.get_BoxNodes().iteritems():
+        for (box,nodes) in self.get_BoxNodes().items():
             # pass the first nodename, as we don't push template-qemu on testboxes
             nodedir = nodes[0].nodedir()
             TestBoxQemu(box, self.options.buildname).qemu_kill_all(nodedir)
@@ -475,7 +469,7 @@ class TestPlc:
     # make this a valid step
     def qemu_list_all(self):
         'list all qemu instances on the qemu boxes involved by this setup'
-        for box,nodes in self.get_BoxNodes().iteritems():
+        for box,nodes in self.get_BoxNodes().items():
             # this is the brute force version, kill all qemus on that host box
             TestBoxQemu(box, self.options.buildname).qemu_list_all()
         return True
@@ -483,7 +477,7 @@ class TestPlc:
     # kill only the qemus related to this test
     def qemu_list_mine(self):
         'list qemu instances for our nodes'
-        for (box,nodes) in self.get_BoxNodes().iteritems():
+        for (box,nodes) in self.get_BoxNodes().items():
             # the fine-grain version
             for node in nodes:
                 node.list_qemu()
@@ -492,7 +486,7 @@ class TestPlc:
     # kill only the qemus related to this test
     def qemu_clean_mine(self):
         'cleanup (rm -rf) qemu instances for our nodes'
-        for box,nodes in self.get_BoxNodes().iteritems():
+        for box,nodes in self.get_BoxNodes().items():
             # the fine-grain version
             for node in nodes:
                 node.qemu_clean()
@@ -501,7 +495,7 @@ class TestPlc:
     # kill only the right qemus
     def qemu_kill_mine(self):
         'kill the qemu instances for our nodes'
-        for box,nodes in self.get_BoxNodes().iteritems():
+        for box,nodes in self.get_BoxNodes().items():
             # the fine-grain version
             for node in nodes:
                 node.kill_qemu()
@@ -521,26 +515,25 @@ class TestPlc:
         "print cut'n paste-able stuff to export env variables to your shell"
         # guess local domain from hostname
         if TestPlc.exported_id > 1: 
-            print "export GUESTHOSTNAME%d=%s" % (TestPlc.exported_id, self.plc_spec['vservername'])
+            print("export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername']))
             return True
         TestPlc.exported_id += 1
         domain = socket.gethostname().split('.',1)[1]
-        fqdn   = "%s.%s" % (self.plc_spec['host_box'], domain)
-        print "export BUILD=%s" % self.options.buildname
-        print "export PLCHOSTLXC=%s" % fqdn
-        print "export GUESTNAME=%s" % self.plc_spec['vservername']
-        vplcname = self.plc_spec['vservername'].split('-')[-1]
-        print "export GUESTHOSTNAME=%s.%s"%(vplcname, domain)
+        fqdn   = "{}.{}".format(self.plc_spec['host_box'], domain)
+        print("export BUILD={}".format(self.options.buildname))
+        print("export PLCHOSTLXC={}".format(fqdn))
+        print("export GUESTNAME={}".format(self.vservername))
+        print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
         # find hostname of first node
         hostname, qemubox = self.all_node_infos()[0]
-        print "export KVMHOST=%s.%s" % (qemubox, domain)
-        print "export NODE=%s" % (hostname)
+        print("export KVMHOST={}.{}".format(qemubox, domain))
+        print("export NODE={}".format(hostname))
         return True
 
     # entry point
     always_display_keys=['PLC_WWW_HOST', 'nodes', 'sites']
     def show_pass(self, passno):
-        for (key,val) in self.plc_spec.iteritems():
+        for (key,val) in self.plc_spec.items():
             if not self.options.verbose and key not in TestPlc.always_display_keys:
                 continue
             if passno == 2:
@@ -560,65 +553,65 @@ class TestPlc:
                         self.display_key_spec(key)
             elif passno == 1:
                 if key not in ['sites', 'initscripts', 'slices', 'keys']:
-                    print '+   ', key, ':', val
+                    print('+   ', key, ':', val)
 
     def display_site_spec(self, site):
-        print '+ ======== site', site['site_fields']['name']
-        for k,v in site.iteritems():
+        print('+ ======== site', site['site_fields']['name'])
+        for k,v in site.items():
             if not self.options.verbose and k not in TestPlc.always_display_keys:
                 continue
             if k == 'nodes':
                 if v: 
-                    print '+       ','nodes : ',
+                    print('+       ','nodes : ', end=' ')
                     for node in v:  
-                        print node['node_fields']['hostname'],'',
-                    print ''
+                        print(node['node_fields']['hostname'],'', end=' ')
+                    print('')
             elif k == 'users':
                 if v: 
-                    print '+       users : ',
+                    print('+       users : ', end=' ')
                     for user in v:  
-                        print user['name'],'',
-                    print ''
+                        print(user['name'],'', end=' ')
+                    print('')
             elif k == 'site_fields':
-                print '+       login_base', ':', v['login_base']
+                print('+       login_base', ':', v['login_base'])
             elif k == 'address_fields':
                 pass
             else:
-                print '+       ',
+                print('+       ', end=' ')
                 utils.pprint(k, v)
         
     def display_initscript_spec(self, initscript):
-        print '+ ======== initscript', initscript['initscript_fields']['name']
+        print('+ ======== initscript', initscript['initscript_fields']['name'])
 
     def display_key_spec(self, key):
-        print '+ ======== key', key['key_name']
+        print('+ ======== key', key['key_name'])
 
     def display_slice_spec(self, slice):
-        print '+ ======== slice', slice['slice_fields']['name']
-        for k,v in slice.iteritems():
+        print('+ ======== slice', slice['slice_fields']['name'])
+        for k,v in slice.items():
             if k == 'nodenames':
                 if v: 
-                    print '+       nodes : ',
+                    print('+       nodes : ', end=' ')
                     for nodename in v:  
-                        print nodename,'',
-                    print ''
+                        print(nodename,'', end=' ')
+                    print('')
             elif k == 'usernames':
                 if v: 
-                    print '+       users : ',
+                    print('+       users : ', end=' ')
                     for username in v:  
-                        print username,'',
-                    print ''
+                        print(username,'', end=' ')
+                    print('')
             elif k == 'slice_fields':
-                print '+       fields',':',
-                print 'max_nodes=',v['max_nodes'],
-                print ''
+                print('+       fields',':', end=' ')
+                print('max_nodes=',v['max_nodes'], end=' ')
+                print('')
             else:
-                print '+       ',k,v
+                print('+       ',k,v)
 
     def display_node_spec(self, node):
-        print "+           node=%s host_box=%s" % (node['name'],node['host_box']),
-        print "hostname=", node['node_fields']['hostname'],
-        print "ip=", node['interface_fields']['ip']
+        print("+           node={} host_box={}".format(node['name'], node['host_box']), end=' ')
+        print("hostname=", node['node_fields']['hostname'], end=' ')
+        print("ip=", node['interface_fields']['ip'])
         if self.options.verbose:
             utils.pprint("node details", node, depth=3)
 
@@ -629,19 +622,19 @@ class TestPlc:
 
     @staticmethod
     def display_mapping_plc(plc_spec):
-        print '+ MyPLC',plc_spec['name']
+        print('+ MyPLC',plc_spec['name'])
         # WARNING this would not be right for lxc-based PLC's - should be harmless though
-        print '+\tvserver address = root@%s:/vservers/%s' % (plc_spec['host_box'], plc_spec['vservername'])
-        print '+\tIP = %s/%s' % (plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip'])
+        print('+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername']))
+        print('+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip']))
         for site_spec in plc_spec['sites']:
             for node_spec in site_spec['nodes']:
                 TestPlc.display_mapping_node(node_spec)
 
     @staticmethod
     def display_mapping_node(node_spec):
-        print '+   NODE %s' % (node_spec['name'])
-        print '+\tqemu box %s' % node_spec['host_box']
-        print '+\thostname=%s' % node_spec['node_fields']['hostname']
+        print('+   NODE {}'.format(node_spec['name']))
+        print('+\tqemu box {}'.format(node_spec['host_box']))
+        print('+\thostname={}'.format(node_spec['node_fields']['hostname']))
 
     # write a timestamp in /vservers/<>.timestamp
     # cannot be inside the vserver, that causes vserver .. build to cough
@@ -652,18 +645,18 @@ class TestPlc:
         # a first approx. is to store the timestamp close to the VM root like vs does
         stamp_path = self.vm_timestamp_path()
         stamp_dir = os.path.dirname(stamp_path)
-        utils.system(self.test_ssh.actual_command("mkdir -p %s" % stamp_dir))
-        return utils.system(self.test_ssh.actual_command("echo %d > %s" % (now, stamp_path))) == 0
+        utils.system(self.test_ssh.actual_command("mkdir -p {}".format(stamp_dir)))
+        return utils.system(self.test_ssh.actual_command("echo {:d} > {}".format(now, stamp_path))) == 0
         
     # this is called inconditionnally at the beginning of the test sequence 
     # just in case this is a rerun, so if the vm is not running it's fine
     def plcvm_delete(self):
         "vserver delete the test myplc"
         stamp_path = self.vm_timestamp_path()
-        self.run_in_host("rm -f %s" % stamp_path)
-        self.run_in_host("virsh -c lxc:// destroy %s" % self.vservername)
-        self.run_in_host("virsh -c lxc:// undefine %s" % self.vservername)
-        self.run_in_host("rm -fr /vservers/%s" % self.vservername)
+        self.run_in_host("rm -f {}".format(stamp_path))
+        self.run_in_host("virsh -c lxc:// destroy {}".format(self.vservername))
+        self.run_in_host("virsh -c lxc:// undefine {}".format(self.vservername))
+        self.run_in_host("rm -fr /vservers/{}".format(self.vservername))
         return True
 
     ### install
@@ -690,25 +683,25 @@ class TestPlc:
         # with the last step (i386) removed
         repo_url = self.options.arch_rpms_url
         for level in [ 'arch' ]:
-           repo_url = os.path.dirname(repo_url)
+            repo_url = os.path.dirname(repo_url)
 
         # invoke initvm (drop support for vs)
         script = "lbuild-initvm.sh"
         script_options = ""
         # pass the vbuild-nightly options to [lv]test-initvm
-        script_options += " -p %s" % self.options.personality
-        script_options += " -d %s" % self.options.pldistro
-        script_options += " -f %s" % self.options.fcdistro
-        script_options += " -r %s" % repo_url
+        script_options += " -p {}".format(self.options.personality)
+        script_options += " -d {}".format(self.options.pldistro)
+        script_options += " -f {}".format(self.options.fcdistro)
+        script_options += " -r {}".format(repo_url)
         vserver_name = self.vservername
         try:
             vserver_hostname = socket.gethostbyaddr(self.vserverip)[0]
-            script_options += " -n %s" % vserver_hostname
+            script_options += " -n {}".format(vserver_hostname)
         except:
-            print "Cannot reverse lookup %s" % self.vserverip
-            print "This is considered fatal, as this might pollute the test results"
+            print("Cannot reverse lookup {}".format(self.vserverip))
+            print("This is considered fatal, as this might pollute the test results")
             return False
-        create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s" % locals()
+        create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
         return self.run_in_host(create_vserver) == 0
 
     ### install_rpm 
@@ -725,14 +718,14 @@ class TestPlc:
         elif self.options.personality == "linux64":
             arch = "x86_64"
         else:
-            raise Exception, "Unsupported personality %r"%self.options.personality
-        nodefamily = "%s-%s-%s" % (self.options.pldistro, self.options.fcdistro, arch)
+            raise Exception("Unsupported personality {}".format(self.options.personality))
+        nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
 
         pkgs_list=[]
-        pkgs_list.append("slicerepo-%s" % nodefamily)
+        pkgs_list.append("slicerepo-{}".format(nodefamily))
         pkgs_list.append("myplc")
-        pkgs_list.append("noderepo-%s" % nodefamily)
-        pkgs_list.append("nodeimage-%s-plain" % nodefamily)
+        pkgs_list.append("noderepo-{}".format(nodefamily))
+        pkgs_list.append("nodeimage-{}-plain".format(nodefamily))
         pkgs_string=" ".join(pkgs_list)
         return self.yum_install(pkgs_list)
 
@@ -744,15 +737,15 @@ class TestPlc:
     ### 
     def plc_configure(self):
         "run plc-config-tty"
-        tmpname = '%s.plc-config-tty' % self.name()
+        tmpname = '{}.plc-config-tty'.format(self.name())
         with open(tmpname,'w') as fileconf:
-            for (var,value) in self.plc_spec['settings'].iteritems():
-                fileconf.write('e %s\n%s\n'%(var,value))
+            for (var,value) in self.plc_spec['settings'].items():
+                fileconf.write('e {}\n{}\n'.format(var, value))
             fileconf.write('w\n')
             fileconf.write('q\n')
-        utils.system('cat %s' % tmpname)
-        self.run_in_guest_piped('cat %s' % tmpname, 'plc-config-tty')
-        utils.system('rm %s' % tmpname)
+        utils.system('cat {}'.format(tmpname))
+        self.run_in_guest_piped('cat {}'.format(tmpname), 'plc-config-tty')
+        utils.system('rm {}'.format(tmpname))
         return True
 
 # f14 is a bit odd in this respect, although this worked fine in guests up to f18
@@ -770,13 +763,13 @@ class TestPlc:
     def start_stop_service(self, service, start_or_stop):
         "utility to start/stop a service with the special trick for f14"
         if self.options.fcdistro != 'f14':
-            return self.run_in_guest("service %s %s" % (service, start_or_stop)) == 0
+            return self.run_in_guest("service {} {}".format(service, start_or_stop)) == 0
         else:
             # patch /sbin/service so it does not reset environment
             self.run_in_guest('sed -i -e \\"s,env -i,env,\\" /sbin/service')
             # this is because our own scripts in turn call service 
-            return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service %s %s" % \
-                                     (service, start_or_stop)) == 0
+            return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service {} {}"\
+                                     .format(service, start_or_stop)) == 0
 
     def plc_start(self):
         "service plc start"
@@ -800,7 +793,7 @@ class TestPlc:
     def keys_store(self):
         "stores test users ssh keys in keys/"
         for key_spec in self.plc_spec['keys']:
-               TestKey(self,key_spec).store_key()
+                TestKey(self,key_spec).store_key()
         return True
 
     def keys_clean(self):
@@ -820,8 +813,8 @@ class TestPlc:
         overall = True
         prefix = 'debug_ssh_key'
         for ext in ['pub', 'rsa'] :
-            src = "%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s" % locals()
-            dst = "keys/%(vservername)s-debug.%(ext)s" % locals()
+            src = "{vm_root}/etc/planetlab/{prefix}.{ext}".format(**locals())
+            dst = "keys/{vservername}-debug.{ext}".format(**locals())
             if self.test_ssh.fetch(src, dst) != 0:
                 overall=False
         return overall
@@ -838,27 +831,27 @@ class TestPlc:
         for site_spec in self.plc_spec['sites']:
             test_site = TestSite(self,site_spec)
             if (action != "add"):
-                utils.header("Deleting site %s in %s" % (test_site.name(), self.name()))
+                utils.header("Deleting site {} in {}".format(test_site.name(), self.name()))
                 test_site.delete_site()
                 # deleted with the site
                 #test_site.delete_users()
                 continue
             else:
-                utils.header("Creating site %s & users in %s" % (test_site.name(), self.name()))
+                utils.header("Creating site {} & users in {}".format(test_site.name(), self.name()))
                 test_site.create_site()
                 test_site.create_users()
         return True
 
     def delete_all_sites(self):
         "Delete all sites in PLC, and related objects"
-        print 'auth_root', self.auth_root()
+        print('auth_root', self.auth_root())
         sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
         for site in sites:
             # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
             if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
                 continue
             site_id = site['site_id']
-            print 'Deleting site_id', site_id
+            print('Deleting site_id', site_id)
             self.apiserver.DeleteSite(self.auth_root(), site_id)
         return True
 
@@ -873,15 +866,15 @@ class TestPlc:
         for site_spec in self.plc_spec['sites']:
             test_site = TestSite(self, site_spec)
             if action != "add":
-                utils.header("Deleting nodes in site %s" % test_site.name())
+                utils.header("Deleting nodes in site {}".format(test_site.name()))
                 for node_spec in site_spec['nodes']:
                     test_node = TestNode(self, test_site, node_spec)
-                    utils.header("Deleting %s" % test_node.name())
+                    utils.header("Deleting {}".format(test_node.name()))
                     test_node.delete_node()
             else:
-                utils.header("Creating nodes for site %s in %s" % (test_site.name(), self.name()))
+                utils.header("Creating nodes for site {} in {}".format(test_site.name(), self.name()))
                 for node_spec in site_spec['nodes']:
-                    utils.pprint('Creating node %s' % node_spec, node_spec)
+                    utils.pprint('Creating node {}'.format(node_spec), node_spec)
                     test_node = TestNode(self, test_site, node_spec)
                     test_node.create_node()
         return True
@@ -897,7 +890,7 @@ class TestPlc:
     @staticmethod
     def translate_timestamp(start, grain, timestamp):
         if timestamp < TestPlc.YEAR:
-            return start+timestamp*grain
+            return start + timestamp*grain
         else:
             return timestamp
 
@@ -909,8 +902,8 @@ class TestPlc:
         "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
         now = int(time.time())
         grain = self.apiserver.GetLeaseGranularity(self.auth_root())
-        print 'API answered grain=', grain
-        start = (now/grain)*grain
+        print('API answered grain=', grain)
+        start = (now//grain)*grain
         start += grain
         # find out all nodes that are reservable
         nodes = self.all_reservable_nodenames()
@@ -927,22 +920,22 @@ class TestPlc:
             lease_spec['t_from']  = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
             lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
             lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
-                                                      lease_spec['t_from'],lease_spec['t_until'])
+                                                      lease_spec['t_from'], lease_spec['t_until'])
             if lease_addition['errors']:
-                utils.header("Cannot create leases, %s"%lease_addition['errors'])
+                utils.header("Cannot create leases, {}".format(lease_addition['errors']))
                 ok = False
             else:
-                utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)' % \
-                              (nodes, lease_spec['slice'],
-                               lease_spec['t_from'],  TestPlc.timestamp_printable(lease_spec['t_from']),
-                               lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
+                utils.header('Leases on nodes {} for {} from {:d} ({}) until {:d} ({})'\
+                             .format(nodes, lease_spec['slice'],
+                                     lease_spec['t_from'],  TestPlc.timestamp_printable(lease_spec['t_from']),
+                                     lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
                 
         return ok
 
     def delete_leases(self):
         "remove all leases in the myplc side"
         lease_ids = [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
-        utils.header("Cleaning leases %r" % lease_ids)
+        utils.header("Cleaning leases {}".format(lease_ids))
         self.apiserver.DeleteLeases(self.auth_root(), lease_ids)
         return True
 
@@ -953,10 +946,10 @@ class TestPlc:
         for l in leases:
             current = l['t_until'] >= now
             if self.options.verbose or current:
-                utils.header("%s %s from %s until %s" % \
-                             (l['hostname'], l['name'],
-                              TestPlc.timestamp_printable(l['t_from']), 
-                              TestPlc.timestamp_printable(l['t_until'])))
+                utils.header("{} {} from {} until {}"\
+                             .format(l['hostname'], l['name'],
+                                     TestPlc.timestamp_printable(l['t_from']), 
+                                     TestPlc.timestamp_printable(l['t_until'])))
         return True
 
     # create nodegroups if needed, and populate
@@ -967,20 +960,20 @@ class TestPlc:
             test_site = TestSite(self,site_spec)
             for node_spec in site_spec['nodes']:
                 test_node = TestNode(self, test_site, node_spec)
-                if node_spec.has_key('nodegroups'):
+                if 'nodegroups' in node_spec:
                     nodegroupnames = node_spec['nodegroups']
-                    if isinstance(nodegroupnames, StringTypes):
+                    if isinstance(nodegroupnames, str):
                         nodegroupnames = [ nodegroupnames ]
                     for nodegroupname in nodegroupnames:
-                        if not groups_dict.has_key(nodegroupname):
+                        if nodegroupname not in groups_dict:
                             groups_dict[nodegroupname] = []
                         groups_dict[nodegroupname].append(test_node.name())
         auth = self.auth_root()
         overall = True
-        for (nodegroupname,group_nodes) in groups_dict.iteritems():
+        for (nodegroupname,group_nodes) in groups_dict.items():
             if action == "add":
-                print 'nodegroups:', 'dealing with nodegroup',\
-                    nodegroupname, 'on nodes', group_nodes
+                print('nodegroups:', 'dealing with nodegroup',\
+                    nodegroupname, 'on nodes', group_nodes)
                 # first, check if the nodetagtype is here
                 tag_types = self.apiserver.GetTagTypes(auth, {'tagname':nodegroupname})
                 if tag_types:
@@ -988,22 +981,22 @@ class TestPlc:
                 else:
                     tag_type_id = self.apiserver.AddTagType(auth,
                                                             {'tagname' : nodegroupname,
-                                                             'description' : 'for nodegroup %s' % nodegroupname,
+                                                             'description' : 'for nodegroup {}'.format(nodegroupname),
                                                              'category' : 'test'})
-                print 'located tag (type)', nodegroupname, 'as', tag_type_id
+                print('located tag (type)', nodegroupname, 'as', tag_type_id)
                 # create nodegroup
                 nodegroups = self.apiserver.GetNodeGroups(auth, {'groupname' : nodegroupname})
                 if not nodegroups:
                     self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
-                    print 'created nodegroup', nodegroupname, \
-                        'from tagname', nodegroupname, 'and value', 'yes'
+                    print('created nodegroup', nodegroupname, \
+                        'from tagname', nodegroupname, 'and value', 'yes')
                 # set node tag on all nodes, value='yes'
                 for nodename in group_nodes:
                     try:
                         self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
                     except:
                         traceback.print_exc()
-                        print 'node', nodename, 'seems to already have tag', nodegroupname
+                        print('node', nodename, 'seems to already have tag', nodegroupname)
                     # check anyway
                     try:
                         expect_yes = self.apiserver.GetNodeTags(auth,
@@ -1011,15 +1004,15 @@ class TestPlc:
                                                                  'tagname'  : nodegroupname},
                                                                 ['value'])[0]['value']
                         if expect_yes != "yes":
-                            print 'Mismatch node tag on node',nodename,'got',expect_yes
+                            print('Mismatch node tag on node',nodename,'got',expect_yes)
                             overall = False
                     except:
                         if not self.options.dry_run:
-                            print 'Cannot find tag', nodegroupname, 'on node', nodename
+                            print('Cannot find tag', nodegroupname, 'on node', nodename)
                             overall = False
             else:
                 try:
-                    print 'cleaning nodegroup', nodegroupname
+                    print('cleaning nodegroup', nodegroupname)
                     self.apiserver.DeleteNodeGroup(auth, nodegroupname)
                 except:
                     traceback.print_exc()
@@ -1058,7 +1051,7 @@ class TestPlc:
     def nodes_check_boot_state(self, target_boot_state, timeout_minutes,
                                silent_minutes, period_seconds = 15):
         if self.options.dry_run:
-            print 'dry_run'
+            print('dry_run')
             return True
 
         class CompleterTaskBootState(CompleterTask):
@@ -1076,16 +1069,16 @@ class TestPlc:
                 except:
                     return False
             def message(self):
-                return "CompleterTaskBootState with node %s" % self.hostname
+                return "CompleterTaskBootState with node {}".format(self.hostname)
             def failure_epilogue(self):
-                print "node %s in state %s - expected %s" %\
-                    (self.hostname, self.last_boot_state, target_boot_state)
+                print("node {} in state {} - expected {}"\
+                    .format(self.hostname, self.last_boot_state, target_boot_state))
                 
         timeout = timedelta(minutes=timeout_minutes)
         graceout = timedelta(minutes=silent_minutes)
         period   = timedelta(seconds=period_seconds)
         # the nodes that haven't checked yet - start with a full list and shrink over time
-        utils.header("checking nodes boot state (expected %s)" % target_boot_state)
+        utils.header("checking nodes boot state (expected {})".format(target_boot_state))
         tasks = [ CompleterTaskBootState(self,hostname) \
                       for (hostname,_) in self.all_node_infos() ]
         message = 'check_boot_state={}'.format(target_boot_state)
@@ -1105,10 +1098,10 @@ class TestPlc:
             def __init__(self, hostname):
                 self.hostname = hostname
             def run(self, silent):
-                command="ping -c 1 -w 1 %s >& /dev/null" % self.hostname
+                command="ping -c 1 -w 1 {} >& /dev/null".format(self.hostname)
                 return utils.system(command, silent=silent) == 0
             def failure_epilogue(self):
-                print "Cannot ping node with name %s" % self.hostname
+                print("Cannot ping node with name {}".format(self.hostname))
         timeout = timedelta(seconds = timeout_seconds)
         graceout = timeout
         period = timedelta(seconds = period_seconds)
@@ -1130,12 +1123,12 @@ class TestPlc:
         if debug: 
             message = "debug"
             completer_message = 'ssh_node_debug'
-            local_key = "keys/%(vservername)s-debug.rsa" % locals()
+            local_key = "keys/{vservername}-debug.rsa".format(**locals())
         else: 
             message = "boot"
             completer_message = 'ssh_node_boot'
-           local_key = "keys/key_admin.rsa"
-        utils.header("checking ssh access to nodes (expected in %s mode)" % message)
+            local_key = "keys/key_admin.rsa"
+        utils.header("checking ssh access to nodes (expected in {} mode)".format(message))
         node_infos = self.all_node_infos()
         tasks = [ CompleterTaskNodeSsh(nodename, qemuname, local_key,
                                         boot_state=message, dry_run=self.options.dry_run) \
@@ -1194,19 +1187,19 @@ class TestPlc:
             def actual_run(self):
                 return self.test_sliver.check_initscript_stamp(self.stamp)
             def message(self):
-                return "initscript checker for %s" % self.test_sliver.name()
+                return "initscript checker for {}".format(self.test_sliver.name())
             def failure_epilogue(self):
-                print "initscript stamp %s not found in sliver %s"%\
-                    (self.stamp, self.test_sliver.name())
+                print("initscript stamp {} not found in sliver {}"\
+                    .format(self.stamp, self.test_sliver.name()))
             
         tasks = []
         for slice_spec in self.plc_spec['slices']:
-            if not slice_spec.has_key('initscriptstamp'):
+            if 'initscriptstamp' not in slice_spec:
                 continue
             stamp = slice_spec['initscriptstamp']
             slicename = slice_spec['slice_fields']['name']
             for nodename in slice_spec['nodenames']:
-                print 'nodename', nodename, 'slicename', slicename, 'stamp', stamp
+                print('nodename', nodename, 'slicename', slicename, 'stamp', stamp)
                 site,node = self.locate_node(nodename)
                 # xxx - passing the wrong site - probably harmless
                 test_site = TestSite(self, site)
@@ -1216,7 +1209,7 @@ class TestPlc:
                 tasks.append(CompleterTaskInitscript(test_sliver, stamp))
         return Completer(tasks, message='check_initscripts').\
             run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
-           
+            
     def check_initscripts(self):
         "check that the initscripts have triggered"
         return self.do_check_initscripts()
@@ -1224,7 +1217,7 @@ class TestPlc:
     def initscripts(self):
         "create initscripts with PLCAPI"
         for initscript in self.plc_spec['initscripts']:
-            utils.pprint('Adding Initscript in plc %s' % self.plc_spec['name'], initscript)
+            utils.pprint('Adding Initscript in plc {}'.format(self.plc_spec['name']), initscript)
             self.apiserver.AddInitScript(self.auth_root(), initscript['initscript_fields'])
         return True
 
@@ -1232,12 +1225,12 @@ class TestPlc:
         "delete initscripts with PLCAPI"
         for initscript in self.plc_spec['initscripts']:
             initscript_name = initscript['initscript_fields']['name']
-            print('Attempting to delete %s in plc %s' % (initscript_name, self.plc_spec['name']))
+            print(('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name'])))
             try:
                 self.apiserver.DeleteInitScript(self.auth_root(), initscript_name)
-                print initscript_name, 'deleted'
+                print(initscript_name, 'deleted')
             except:
-                print 'deletion went wrong - probably did not exist'
+                print('deletion went wrong - probably did not exist')
         return True
 
     ### manage slices
@@ -1306,11 +1299,11 @@ class TestPlc:
 
     def _speed_up_slices(self, p, r):
         # create the template on the server-side 
-        template = "%s.nodemanager" % self.name()
+        template = "{}.nodemanager".format(self.name())
         with open(template,"w") as template_file:
-            template_file.write('OPTIONS="-p %s -r %s -d"\n'%(p,r))
+            template_file.write('OPTIONS="-p {} -r {} -d"\n'.format(p, r))
         in_vm = "/var/www/html/PlanetLabConf/nodemanager"
-        remote = "%s/%s" % (self.vm_root_in_host(), in_vm)
+        remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
         self.test_ssh.copy_abs(template, remote)
         # Add a conf file
         if not self.apiserver.GetConfFiles(self.auth_root(),
@@ -1323,11 +1316,11 @@ class TestPlc:
 
     def debug_nodemanager(self):
         "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
-        template = "%s.nodemanager" % self.name()
+        template = "{}.nodemanager".format(self.name())
         with open(template,"w") as template_file:
             template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
         in_vm = "/var/www/html/PlanetLabConf/nodemanager"
-        remote = "%s/%s" % (self.vm_root_in_host(), in_vm)
+        remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
         self.test_ssh.copy_abs(template, remote)
         return True
 
@@ -1344,7 +1337,7 @@ class TestPlc:
                 return plc.locate_sliver_obj(nodename, slicename)
             except:
                 pass
-        raise Exception, "Cannot locate sliver %s@%s among all PLCs" % (nodename, slicename)
+        raise Exception("Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename))
 
     # implement this one as a cross step so that we can take advantage of different nodes
     # in multi-plcs mode
@@ -1363,9 +1356,9 @@ class TestPlc:
             def actual_run(self):
                 return self.test_sliver.check_tcp_ready(port = 9999)
             def message(self):
-                return "network ready checker for %s" % self.test_sliver.name()
+                return "network ready checker for {}".format(self.test_sliver.name())
             def failure_epilogue(self):
-                print "could not bind port from sliver %s" % self.test_sliver.name()
+                print("could not bind port from sliver {}".format(self.test_sliver.name()))
 
         sliver_specs = {}
         tasks = []
@@ -1374,10 +1367,10 @@ class TestPlc:
             # locate the TestSliver instances involved, and cache them in the spec instance
             spec['s_sliver'] = self.locate_sliver_obj_cross(spec['server_node'], spec['server_slice'], other_plcs)
             spec['c_sliver'] = self.locate_sliver_obj_cross(spec['client_node'], spec['client_slice'], other_plcs)
-            message = "Will check TCP between s=%s and c=%s" % \
-                      (spec['s_sliver'].name(), spec['c_sliver'].name())
+            message = "Will check TCP between s={} and c={}"\
+                      .format(spec['s_sliver'].name(), spec['c_sliver'].name())
             if 'client_connect' in spec:
-                message += " (using %s)" % spec['client_connect']
+                message += " (using {})".format(spec['client_connect'])
             utils.header(message)
             # we need to check network presence in both slivers, but also
             # avoid to insert a sliver several times
@@ -1434,9 +1427,9 @@ class TestPlc:
             def actual_run(self): 
                 return self.test_node._check_system_slice(slicename, dry_run=self.dry_run)
             def message(self): 
-                return "System slice %s @ %s" % (slicename, self.test_node.name())
+                return "System slice {} @ {}".format(slicename, self.test_node.name())
             def failure_epilogue(self): 
-                print "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
+                print("COULD not find system slice {} @ {}".format(slicename, self.test_node.name()))
         timeout = timedelta(minutes=timeout_minutes)
         silent  = timedelta(0)
         period  = timedelta(seconds=period_seconds)
@@ -1448,7 +1441,7 @@ class TestPlc:
         "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
         # install the stress-test in the plc image
         location = "/usr/share/plc_api/plcsh_stress_test.py"
-        remote = "%s/%s" % (self.vm_root_in_host(), location)
+        remote = "{}/{}".format(self.vm_root_in_host(), location)
         self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
         command = location
         command += " -- --check"
@@ -1503,16 +1496,17 @@ class TestPlc:
         utils.header("********** Regular yum failed - special workaround in place, 2nd chance")
         code, cached_rpm_path = \
                 utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
-        utils.header("rpm_path=<<%s>>" % rpm_path)
+        utils.header("rpm_path=<<{}>>".format(rpm_path))
         # just for checking 
-        self.run_in_guest("rpm -i %s" % cached_rpm_path)
+        self.run_in_guest("rpm -i {}".format(cached_rpm_path))
         return self.yum_check_installed("sfa-client")
 
     def sfa_dbclean(self):
         "thoroughly wipes off the SFA database"
         return self.run_in_guest("sfaadmin reg nuke") == 0 or \
             self.run_in_guest("sfa-nuke.py") == 0 or \
-            self.run_in_guest("sfa-nuke-plc.py") == 0
+            self.run_in_guest("sfa-nuke-plc.py") == 0 or \
+            self.run_in_guest("sfaadmin registry nuke") == 0             
 
     def sfa_fsclean(self):
         "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
@@ -1529,7 +1523,7 @@ class TestPlc:
             try:
                 self.apiserver.DeleteSite(self.auth_root(),login_base)
             except:
-                print "Site %s already absent from PLC db"%login_base
+                print("Site {} already absent from PLC db".format(login_base))
 
             for spec_name in ['pi_spec','user_spec']:
                 user_spec = auth_sfa_spec[spec_name]
@@ -1538,10 +1532,10 @@ class TestPlc:
                     self.apiserver.DeletePerson(self.auth_root(),username)
                 except: 
                     # this in fact is expected as sites delete their members
-                    #print "User %s already absent from PLC db"%username
+                    #print "User {} already absent from PLC db".format(username)
                     pass
 
-        print "REMEMBER TO RUN sfa_import AGAIN"
+        print("REMEMBER TO RUN sfa_import AGAIN")
         return True
 
     def sfa_uninstall(self):
@@ -1575,51 +1569,51 @@ class TestPlc:
 
     ###
     def confdir(self):
-        dirname = "conf.%s" % self.plc_spec['name']
+        dirname = "conf.{}".format(self.plc_spec['name'])
         if not os.path.isdir(dirname):
-            utils.system("mkdir -p %s" % dirname)
+            utils.system("mkdir -p {}".format(dirname))
         if not os.path.isdir(dirname):
-            raise Exception,"Cannot create config dir for plc %s" % self.name()
+            raise Exception("Cannot create config dir for plc {}".format(self.name()))
         return dirname
 
     def conffile(self, filename):
-        return "%s/%s" % (self.confdir(),filename)
+        return "{}/{}".format(self.confdir(), filename)
     def confsubdir(self, dirname, clean, dry_run=False):
-        subdirname = "%s/%s" % (self.confdir(),dirname)
+        subdirname = "{}/{}".format(self.confdir(), dirname)
         if clean:
-            utils.system("rm -rf %s" % subdirname)
+            utils.system("rm -rf {}".format(subdirname))
         if not os.path.isdir(subdirname): 
-            utils.system("mkdir -p %s" % subdirname)
+            utils.system("mkdir -p {}".format(subdirname))
         if not dry_run and not os.path.isdir(subdirname):
-            raise "Cannot create config subdir %s for plc %s" % (dirname,self.name())
+            raise "Cannot create config subdir {} for plc {}".format(dirname, self.name())
         return subdirname
         
     def conffile_clean(self, filename):
         filename=self.conffile(filename)
-        return utils.system("rm -rf %s" % filename)==0
+        return utils.system("rm -rf {}".format(filename))==0
     
     ###
     def sfa_configure(self):
         "run sfa-config-tty"
         tmpname = self.conffile("sfa-config-tty")
         with open(tmpname,'w') as fileconf:
-            for (var,value) in self.plc_spec['sfa']['settings'].iteritems():
-                fileconf.write('e %s\n%s\n'%(var,value))
+            for (var,value) in self.plc_spec['sfa']['settings'].items():
+                fileconf.write('e {}\n{}\n'.format(var, value))
             fileconf.write('w\n')
             fileconf.write('R\n')
             fileconf.write('q\n')
-        utils.system('cat %s' % tmpname)
-        self.run_in_guest_piped('cat %s' % tmpname, 'sfa-config-tty')
+        utils.system('cat {}'.format(tmpname))
+        self.run_in_guest_piped('cat {}'.format(tmpname), 'sfa-config-tty')
         return True
 
     def aggregate_xml_line(self):
         port = self.plc_spec['sfa']['neighbours-port']
-        return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
-            (self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
+        return '<aggregate addr="{}" hrn="{}" port="{}"/>'\
+            .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
 
     def registry_xml_line(self):
-        return '<registry addr="%s" hrn="%s" port="12345"/>' % \
-            (self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
+        return '<registry addr="{}" hrn="{}" port="12345"/>'\
+            .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
 
 
     # a cross step that takes all other plcs in argument
@@ -1630,18 +1624,18 @@ class TestPlc:
             return True
         agg_fname = self.conffile("agg.xml")
         with open(agg_fname,"w") as out:
-            out.write("<aggregates>%s</aggregates>\n" % \
-                      " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
-        utils.header("(Over)wrote %s" % agg_fname)
+            out.write("<aggregates>{}</aggregates>\n"\
+                      .format(" ".join([ plc.aggregate_xml_line() for plc in other_plcs ])))
+        utils.header("(Over)wrote {}".format(agg_fname))
         reg_fname=self.conffile("reg.xml")
         with open(reg_fname,"w") as out:
-            out.write("<registries>%s</registries>\n" % \
-                      " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
-        utils.header("(Over)wrote %s" % reg_fname)
+            out.write("<registries>{}</registries>\n"\
+                      .format(" ".join([ plc.registry_xml_line() for plc in other_plcs ])))
+        utils.header("(Over)wrote {}".format(reg_fname))
         return self.test_ssh.copy_abs(agg_fname,
-                                      '/%s/etc/sfa/aggregates.xml' % self.vm_root_in_host()) == 0 \
+                                      '/{}/etc/sfa/aggregates.xml'.format(self.vm_root_in_host())) == 0 \
            and self.test_ssh.copy_abs(reg_fname,
-                                      '/%s/etc/sfa/registries.xml' % self.vm_root_in_host()) == 0
+                                      '/{}/etc/sfa/registries.xml'.format(self.vm_root_in_host())) == 0
 
     def sfa_import(self):
         "use sfaadmin to import from plc"
@@ -1663,12 +1657,12 @@ class TestPlc:
         for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
             test_slice = TestAuthSfa(self, slice_spec)
             dir_basename = os.path.basename(test_slice.sfi_path())
-            dir_name = self.confsubdir("dot-sfi/%s" % dir_basename,
+            dir_name = self.confsubdir("dot-sfi/{}".format(dir_basename),
                                        clean=True, dry_run=self.options.dry_run)
             test_slice.sfi_configure(dir_name)
             # push into the remote /root/sfi area
             location = test_slice.sfi_path()
-            remote = "%s/%s" % (self.vm_root_in_host(), location)
+            remote = "{}/{}".format(self.vm_root_in_host(), location)
             self.test_ssh.mkdir(remote, abs=True)
             # need to strip last level or remote otherwise we get an extra dir level
             self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
@@ -1687,7 +1681,7 @@ class TestPlc:
         for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
             test_slice = TestAuthSfa(self, slice_spec)
             in_vm = test_slice.sfi_path()
-            remote = "%s/%s" % (self.vm_root_in_host(), in_vm)
+            remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
             if self.test_ssh.copy_abs(filename, remote) !=0:
                 overall = False
         return overall
@@ -1751,7 +1745,7 @@ class TestPlc:
         "creates random entries in the PLCAPI"
         # install the stress-test in the plc image
         location = "/usr/share/plc_api/plcsh_stress_test.py"
-        remote = "%s/%s" % (self.vm_root_in_host(), location)
+        remote = "{}/{}".format(self.vm_root_in_host(), location)
         self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
         command = location
         command += " -- --preserve --short-names"
@@ -1783,55 +1777,55 @@ class TestPlc:
         # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
         # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
         # (1.a)
-        print "-------------------- TestPlc.gather_logs : PLC's /var/log"
+        print("-------------------- TestPlc.gather_logs : PLC's /var/log")
         self.gather_var_logs()
         # (1.b)
-        print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
+        print("-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/")
         self.gather_pgsql_logs()
         # (1.c)
-        print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
+        print("-------------------- TestPlc.gather_logs : PLC's /root/sfi/")
         self.gather_root_sfi()
         # (2) 
-        print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
+        print("-------------------- TestPlc.gather_logs : nodes's QEMU logs")
         for site_spec in self.plc_spec['sites']:
             test_site = TestSite(self,site_spec)
             for node_spec in site_spec['nodes']:
                 test_node = TestNode(self, test_site, node_spec)
                 test_node.gather_qemu_logs()
         # (3)
-        print "-------------------- TestPlc.gather_logs : nodes's /var/log"
+        print("-------------------- TestPlc.gather_logs : nodes's /var/log")
         self.gather_nodes_var_logs()
         # (4)
-        print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
+        print("-------------------- TestPlc.gather_logs : sample sliver's /var/log")
         self.gather_slivers_var_logs()
         return True
 
     def gather_slivers_var_logs(self):
         for test_sliver in self.all_sliver_objs():
             remote = test_sliver.tar_var_logs()
-            utils.system("mkdir -p logs/sliver.var-log.%s" % test_sliver.name())
-            command = remote + " | tar -C logs/sliver.var-log.%s -xf -" % test_sliver.name()
+            utils.system("mkdir -p logs/sliver.var-log.{}".format(test_sliver.name()))
+            command = remote + " | tar -C logs/sliver.var-log.{} -xf -".format(test_sliver.name())
             utils.system(command)
         return True
 
     def gather_var_logs(self):
-        utils.system("mkdir -p logs/myplc.var-log.%s" % self.name())
+        utils.system("mkdir -p logs/myplc.var-log.{}".format(self.name()))
         to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")        
-        command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -" % self.name()
+        command = to_plc + "| tar -C logs/myplc.var-log.{} -xf -".format(self.name())
         utils.system(command)
-        command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd" % self.name()
+        command = "chmod a+r,a+x logs/myplc.var-log.{}/httpd".format(self.name())
         utils.system(command)
 
     def gather_pgsql_logs(self):
-        utils.system("mkdir -p logs/myplc.pgsql-log.%s" % self.name())
+        utils.system("mkdir -p logs/myplc.pgsql-log.{}".format(self.name()))
         to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")        
-        command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -" % self.name()
+        command = to_plc + "| tar -C logs/myplc.pgsql-log.{} -xf -".format(self.name())
         utils.system(command)
 
     def gather_root_sfi(self):
-        utils.system("mkdir -p logs/sfi.%s"%self.name())
+        utils.system("mkdir -p logs/sfi.{}".format(self.name()))
         to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")        
-        command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
+        command = to_plc + "| tar -C logs/sfi.{} -xf -".format(self.name())
         utils.system(command)
 
     def gather_nodes_var_logs(self):
@@ -1841,8 +1835,8 @@ class TestPlc:
                 test_node = TestNode(self, test_site, node_spec)
                 test_ssh = TestSsh(test_node.name(), key="keys/key_admin.rsa")
                 command = test_ssh.actual_command("tar -C /var/log -cf - .")
-                command = command + "| tar -C logs/node.var-log.%s -xf -" % test_node.name()
-                utils.system("mkdir -p logs/node.var-log.%s" % test_node.name())
+                command = command + "| tar -C logs/node.var-log.{} -xf -".format(test_node.name())
+                utils.system("mkdir -p logs/node.var-log.{}".format(test_node.name()))
                 utils.system(command)
 
 
@@ -1851,19 +1845,19 @@ class TestPlc:
         # uses options.dbname if it is found
         try:
             name = self.options.dbname
-            if not isinstance(name, StringTypes):
+            if not isinstance(name, str):
                 raise Exception
         except:
             t = datetime.now()
             d = t.date()
             name = str(d)
-        return "/root/%s-%s.sql" % (database, name)
+        return "/root/{}-{}.sql".format(database, name)
 
     def plc_db_dump(self):
         'dump the planetlab5 DB in /root in the PLC - filename has time'
         dump=self.dbfile("planetab5")
         self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
-        utils.header('Dumped planetlab5 database in %s' % dump)
+        utils.header('Dumped planetlab5 database in {}'.format(dump))
         return True
 
     def plc_db_restore(self):