X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=system%2FTestPlc.py;h=687e4d70ca0c7537171d7c6ba7e51caaeeddf4e3;hb=d9a55ada6d0eeb7732863fc50a7564294d57ac70;hp=61ae1df42e0c9990e2ae17801a4fce0b7b2b9716;hpb=ff1fa20416428477d21e053296517839081e316f;p=tests.git
diff --git a/system/TestPlc.py b/system/TestPlc.py
index 61ae1df..687e4d7 100644
--- a/system/TestPlc.py
+++ b/system/TestPlc.py
@@ -7,7 +7,6 @@ import os, os.path
import traceback
import socket
from datetime import datetime, timedelta
-from types import StringTypes
import utils
from Completer import Completer, CompleterTask
@@ -23,14 +22,16 @@ from TestApiserver import TestApiserver
from TestAuthSfa import TestAuthSfa
from PlcapiUrlScanner import PlcapiUrlScanner
+from TestBonding import TestBonding
+
has_sfa_cache_filename="sfa-cache"
# step methods must take (self) and return a boolean (options is a member of the class)
def standby(minutes, dry_run):
- utils.header('Entering StandBy for %d mn'%minutes)
+ utils.header('Entering StandBy for {:d} mn'.format(minutes))
if dry_run:
- print 'dry_run'
+ print('dry_run')
else:
time.sleep(60*minutes)
return True
@@ -72,6 +73,17 @@ def slice_mapper(method):
map_on_slices.__doc__ = TestSlice.__dict__[method.__name__].__doc__
return map_on_slices
+def bonding_redirector(method):
+ bonding_name = method.__name__.replace('bonding_', '')
+ def redirect(self):
+ bonding_method = TestBonding.__dict__[bonding_name]
+ return bonding_method(self.test_bonding)
+ # maintain __name__ for ignore_result
+ redirect.__name__ = method.__name__
+ # restore the doc text
+ redirect.__doc__ = TestBonding.__dict__[bonding_name].__doc__
+ return redirect
+
# run a step but return True so that we can go on
def ignore_result(method):
def ignoring(self):
@@ -79,7 +91,7 @@ def ignore_result(method):
ref_name = method.__name__.replace('_ignore', '').replace('force_', '')
ref_method = TestPlc.__dict__[ref_name]
result = ref_method(self)
- print "Actual (but ignored) result for %(ref_name)s is %(result)s" % locals()
+ print("Actual (but ignored) result for {ref_name} is {result}".format(**locals()))
return Ignored(result)
name = method.__name__.replace('_ignore', '').replace('force_', '')
ignoring.__name__ = name
@@ -149,15 +161,16 @@ class TestPlc:
# keep this our of the way for now
'check_vsys_defaults_ignore', SEP,
# run this first off so it's easier to re-run on another qemu box
- 'qemu_kill_mine', SEP,
- 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
- 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP,
+ 'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
+ 'qemu_clean_mine', 'qemu_export', 'qemu_cleanlog', SEP,
+ 'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
- 'sfa_discover@1', 'sfa_rspec@1', 'sfa_allocate@1', 'sfa_provision@1', SEPSFA,
+ 'sfa_discover@1', 'sfa_rspec@1', SEPSFA,
+ 'sfa_allocate@1', 'sfa_provision@1', 'sfa_describe@1', SEPSFA,
'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
# we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
@@ -184,15 +197,23 @@ class TestPlc:
'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
'delete_leases', 'list_leases', SEP,
'populate', SEP,
- 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
+ 'nodestate_show','nodestate_safeboot','nodestate_boot', 'nodestate_upgrade', SEP,
+ 'nodeflavour_show','nodedistro_f14','nodedistro_f18', SEP,
+ 'nodedistro_f20', 'nodedistro_f21','nodedistro_f22', SEP,
'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
- 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
+ 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
'sfa_get_expires', SEPSFA,
'plc_db_dump' , 'plc_db_restore', SEP,
'check_netflow','check_drl', SEP,
'debug_nodemanager', 'slice_fs_present', SEP,
'standby_1_through_20','yes','no',SEP,
+ 'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
+ ]
+ default_bonding_steps = [
+ 'bonding_init_partial',
+ 'bonding_add_yum',
+ 'bonding_install_rpms', SEP,
]
@staticmethod
@@ -209,16 +230,17 @@ class TestPlc:
@staticmethod
def _has_sfa_cached(rpms_url):
if os.path.isfile(has_sfa_cache_filename):
- cached = file(has_sfa_cache_filename).read() == "yes"
- utils.header("build provides SFA (cached):%s" % cached)
+ with open(has_sfa_cache_filename) as cache:
+ cached = cache.read() == "yes"
+ utils.header("build provides SFA (cached):{}".format(cached))
return cached
# warning, we're now building 'sface' so let's be a bit more picky
# full builds are expected to return with 0 here
utils.header("Checking if build provides SFA package...")
- retcod = os.system("curl --silent %s/ | grep -q sfa-"%rpms_url) == 0
+ retcod = utils.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
encoded = 'yes' if retcod else 'no'
- with open(has_sfa_cache_filename,'w')as out:
- out.write(encoded)
+ with open(has_sfa_cache_filename,'w') as cache:
+ cache.write(encoded)
return retcod
@staticmethod
@@ -236,13 +258,14 @@ class TestPlc:
TestPlc.default_steps.remove(step)
def __init__(self, plc_spec, options):
- self.plc_spec = plc_spec
+ self.plc_spec = plc_spec
self.options = options
- self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
+ self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
self.vserverip = plc_spec['vserverip']
self.vservername = plc_spec['vservername']
- self.url = "https://%s:443/PLCAPI/" % plc_spec['vserverip']
- self.apiserver = TestApiserver(self.url, options.dry_run)
+ self.vplchostname = self.vservername.split('-')[-1]
+ self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
+ self.apiserver = TestApiserver(self.url, options.dry_run)
(self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
(self.ssh_node_debug_timeout, self.ssh_node_debug_silent) = plc_spec['ssh_node_debug_timers']
@@ -251,7 +274,7 @@ class TestPlc:
def name(self):
name = self.plc_spec['name']
- return "%s.%s" % (name,self.vservername)
+ return "{}.{}".format(name,self.vservername)
def hostname(self):
return self.plc_spec['host_box']
@@ -262,7 +285,7 @@ class TestPlc:
# define the API methods on this object through xmlrpc
# would help, but not strictly necessary
def connect(self):
- pass
+ pass
def actual_command_in_guest(self,command, backslash=False):
raw1 = self.host_to_guest(command)
@@ -288,29 +311,22 @@ class TestPlc:
# see e.g. plc_start esp. the version for f14
#command gets run in the plc's vm
def host_to_guest(self, command):
- vservername = self.vservername
- personality = self.options.personality
- raw = "%(personality)s virsh -c lxc:/// lxc-enter-namespace %(vservername)s" % locals()
- # f14 still needs some extra help
- if self.options.fcdistro == 'f14':
- raw +=" -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %(command)s" % locals()
- else:
- raw +=" -- /usr/bin/env %(command)s" % locals()
- return raw
+ ssh_leg = TestSsh(self.vplchostname)
+ return ssh_leg.actual_command(command, keep_stdin=True)
# this /vservers thing is legacy...
def vm_root_in_host(self):
- return "/vservers/%s/" % (self.vservername)
+ return "/vservers/{}/".format(self.vservername)
def vm_timestamp_path(self):
- return "/vservers/%s/%s.timestamp" % (self.vservername,self.vservername)
+ return "/vservers/{}/{}.timestamp".format(self.vservername, self.vservername)
#start/stop the vserver
def start_guest_in_host(self):
- return "virsh -c lxc:/// start %s" % (self.vservername)
+ return "virsh -c lxc:/// start {}".format(self.vservername)
def stop_guest_in_host(self):
- return "virsh -c lxc:/// destroy %s" % (self.vservername)
+ return "virsh -c lxc:/// destroy {}".format(self.vservername)
# xxx quick n dirty
def run_in_guest_piped(self,local,remote):
@@ -320,21 +336,21 @@ class TestPlc:
def yum_check_installed(self, rpms):
if isinstance(rpms, list):
rpms=" ".join(rpms)
- return self.run_in_guest("rpm -q %s"%rpms) == 0
+ return self.run_in_guest("rpm -q {}".format(rpms)) == 0
# does a yum install in the vs, ignore yum retcod, check with rpm
def yum_install(self, rpms):
if isinstance(rpms, list):
rpms=" ".join(rpms)
- self.run_in_guest("yum -y install %s" % rpms)
+ self.run_in_guest("yum -y install {}".format(rpms))
# yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
self.run_in_guest("yum-complete-transaction -y")
return self.yum_check_installed(rpms)
def auth_root(self):
- return {'Username' : self.plc_spec['settings']['PLC_ROOT_USER'],
- 'AuthMethod' : 'password',
- 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
+ return {'Username' : self.plc_spec['settings']['PLC_ROOT_USER'],
+ 'AuthMethod' : 'password',
+ 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
'Role' : self.plc_spec['role'],
}
@@ -344,27 +360,27 @@ class TestPlc:
return site
if site['site_fields']['login_base'] == sitename:
return site
- raise Exception,"Cannot locate site %s" % sitename
+ raise Exception("Cannot locate site {}".format(sitename))
def locate_node(self, nodename):
for site in self.plc_spec['sites']:
for node in site['nodes']:
if node['name'] == nodename:
return site, node
- raise Exception, "Cannot locate node %s" % nodename
+ raise Exception("Cannot locate node {}".format(nodename))
def locate_hostname(self, hostname):
for site in self.plc_spec['sites']:
for node in site['nodes']:
if node['node_fields']['hostname'] == hostname:
return(site, node)
- raise Exception,"Cannot locate hostname %s" % hostname
+ raise Exception("Cannot locate hostname {}".format(hostname))
def locate_key(self, key_name):
for key in self.plc_spec['keys']:
if key['key_name'] == key_name:
return key
- raise Exception,"Cannot locate key %s" % key_name
+ raise Exception("Cannot locate key {}".format(key_name))
def locate_private_key_from_key_names(self, key_names):
# locate the first avail. key
@@ -385,7 +401,7 @@ class TestPlc:
for slice in self.plc_spec['slices']:
if slice['slice_fields']['name'] == slicename:
return slice
- raise Exception,"Cannot locate slice %s" % slicename
+ raise Exception("Cannot locate slice {}".format(slicename))
def all_sliver_objs(self):
result = []
@@ -431,7 +447,7 @@ class TestPlc:
# transform into a dict { 'host_box' -> [ test_node .. ] }
result = {}
for (box,node) in tuples:
- if not result.has_key(box):
+ if box not in result:
result[box] = [node]
else:
result[box].append(node)
@@ -440,15 +456,15 @@ class TestPlc:
# a step for checking this stuff
def show_boxes(self):
'print summary of nodes location'
- for box,nodes in self.get_BoxNodes().iteritems():
- print box,":"," + ".join( [ node.name() for node in nodes ] )
+ for box,nodes in self.get_BoxNodes().items():
+ print(box,":"," + ".join( [ node.name() for node in nodes ] ))
return True
# make this a valid step
def qemu_kill_all(self):
'kill all qemu instances on the qemu boxes involved by this setup'
# this is the brute force version, kill all qemus on that host box
- for (box,nodes) in self.get_BoxNodes().iteritems():
+ for (box,nodes) in self.get_BoxNodes().items():
# pass the first nodename, as we don't push template-qemu on testboxes
nodedir = nodes[0].nodedir()
TestBoxQemu(box, self.options.buildname).qemu_kill_all(nodedir)
@@ -457,7 +473,7 @@ class TestPlc:
# make this a valid step
def qemu_list_all(self):
'list all qemu instances on the qemu boxes involved by this setup'
- for box,nodes in self.get_BoxNodes().iteritems():
+ for box,nodes in self.get_BoxNodes().items():
# this is the brute force version, kill all qemus on that host box
TestBoxQemu(box, self.options.buildname).qemu_list_all()
return True
@@ -465,7 +481,7 @@ class TestPlc:
# kill only the qemus related to this test
def qemu_list_mine(self):
'list qemu instances for our nodes'
- for (box,nodes) in self.get_BoxNodes().iteritems():
+ for (box,nodes) in self.get_BoxNodes().items():
# the fine-grain version
for node in nodes:
node.list_qemu()
@@ -474,7 +490,7 @@ class TestPlc:
# kill only the qemus related to this test
def qemu_clean_mine(self):
'cleanup (rm -rf) qemu instances for our nodes'
- for box,nodes in self.get_BoxNodes().iteritems():
+ for box,nodes in self.get_BoxNodes().items():
# the fine-grain version
for node in nodes:
node.qemu_clean()
@@ -483,7 +499,7 @@ class TestPlc:
# kill only the right qemus
def qemu_kill_mine(self):
'kill the qemu instances for our nodes'
- for box,nodes in self.get_BoxNodes().iteritems():
+ for box,nodes in self.get_BoxNodes().items():
# the fine-grain version
for node in nodes:
node.kill_qemu()
@@ -503,26 +519,25 @@ class TestPlc:
"print cut'n paste-able stuff to export env variables to your shell"
# guess local domain from hostname
if TestPlc.exported_id > 1:
- print "export GUESTHOSTNAME%d=%s" % (TestPlc.exported_id, self.plc_spec['vservername'])
+ print("export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername']))
return True
TestPlc.exported_id += 1
domain = socket.gethostname().split('.',1)[1]
- fqdn = "%s.%s" % (self.plc_spec['host_box'],domain)
- print "export BUILD=%s" % self.options.buildname
- print "export PLCHOSTLXC=%s" % fqdn
- print "export GUESTNAME=%s" % self.plc_spec['vservername']
- vplcname = self.plc_spec['vservername'].split('-')[-1]
- print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
+ fqdn = "{}.{}".format(self.plc_spec['host_box'], domain)
+ print("export BUILD={}".format(self.options.buildname))
+ print("export PLCHOSTLXC={}".format(fqdn))
+ print("export GUESTNAME={}".format(self.vservername))
+ print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
# find hostname of first node
- hostname,qemubox = self.all_node_infos()[0]
- print "export KVMHOST=%s.%s" % (qemubox,domain)
- print "export NODE=%s" % (hostname)
+ hostname, qemubox = self.all_node_infos()[0]
+ print("export KVMHOST={}.{}".format(qemubox, domain))
+ print("export NODE={}".format(hostname))
return True
# entry point
- always_display_keys=['PLC_WWW_HOST','nodes','sites',]
+ always_display_keys=['PLC_WWW_HOST', 'nodes', 'sites']
def show_pass(self, passno):
- for (key,val) in self.plc_spec.iteritems():
+ for (key,val) in self.plc_spec.items():
if not self.options.verbose and key not in TestPlc.always_display_keys:
continue
if passno == 2:
@@ -531,76 +546,76 @@ class TestPlc:
self.display_site_spec(site)
for node in site['nodes']:
self.display_node_spec(node)
- elif key=='initscripts':
+ elif key == 'initscripts':
for initscript in val:
self.display_initscript_spec(initscript)
- elif key=='slices':
+ elif key == 'slices':
for slice in val:
self.display_slice_spec(slice)
- elif key=='keys':
+ elif key == 'keys':
for key in val:
self.display_key_spec(key)
elif passno == 1:
if key not in ['sites', 'initscripts', 'slices', 'keys']:
- print '+ ',key,':',val
+ print('+ ', key, ':', val)
def display_site_spec(self, site):
- print '+ ======== site', site['site_fields']['name']
- for k,v in site.iteritems():
+ print('+ ======== site', site['site_fields']['name'])
+ for k,v in site.items():
if not self.options.verbose and k not in TestPlc.always_display_keys:
continue
if k == 'nodes':
if v:
- print '+ ','nodes : ',
+ print('+ ','nodes : ', end=' ')
for node in v:
- print node['node_fields']['hostname'],'',
- print ''
+ print(node['node_fields']['hostname'],'', end=' ')
+ print('')
elif k == 'users':
if v:
- print '+ users : ',
+ print('+ users : ', end=' ')
for user in v:
- print user['name'],'',
- print ''
+ print(user['name'],'', end=' ')
+ print('')
elif k == 'site_fields':
- print '+ login_base',':',v['login_base']
+ print('+ login_base', ':', v['login_base'])
elif k == 'address_fields':
pass
else:
- print '+ ',
+ print('+ ', end=' ')
utils.pprint(k, v)
def display_initscript_spec(self, initscript):
- print '+ ======== initscript', initscript['initscript_fields']['name']
+ print('+ ======== initscript', initscript['initscript_fields']['name'])
def display_key_spec(self, key):
- print '+ ======== key', key['key_name']
+ print('+ ======== key', key['key_name'])
def display_slice_spec(self, slice):
- print '+ ======== slice', slice['slice_fields']['name']
- for k,v in slice.iteritems():
+ print('+ ======== slice', slice['slice_fields']['name'])
+ for k,v in slice.items():
if k == 'nodenames':
if v:
- print '+ nodes : ',
+ print('+ nodes : ', end=' ')
for nodename in v:
- print nodename,'',
- print ''
+ print(nodename,'', end=' ')
+ print('')
elif k == 'usernames':
if v:
- print '+ users : ',
+ print('+ users : ', end=' ')
for username in v:
- print username,'',
- print ''
+ print(username,'', end=' ')
+ print('')
elif k == 'slice_fields':
- print '+ fields',':',
- print 'max_nodes=',v['max_nodes'],
- print ''
+ print('+ fields',':', end=' ')
+ print('max_nodes=',v['max_nodes'], end=' ')
+ print('')
else:
- print '+ ',k,v
+ print('+ ',k,v)
def display_node_spec(self, node):
- print "+ node=%s host_box=%s" % (node['name'],node['host_box']),
- print "hostname=", node['node_fields']['hostname'],
- print "ip=", node['interface_fields']['ip']
+ print("+ node={} host_box={}".format(node['name'], node['host_box']), end=' ')
+ print("hostname=", node['node_fields']['hostname'], end=' ')
+ print("ip=", node['interface_fields']['ip'])
if self.options.verbose:
utils.pprint("node details", node, depth=3)
@@ -611,19 +626,19 @@ class TestPlc:
@staticmethod
def display_mapping_plc(plc_spec):
- print '+ MyPLC',plc_spec['name']
+ print('+ MyPLC',plc_spec['name'])
# WARNING this would not be right for lxc-based PLC's - should be harmless though
- print '+\tvserver address = root@%s:/vservers/%s' % (plc_spec['host_box'], plc_spec['vservername'])
- print '+\tIP = %s/%s' % (plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip'])
+ print('+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername']))
+ print('+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip']))
for site_spec in plc_spec['sites']:
for node_spec in site_spec['nodes']:
TestPlc.display_mapping_node(node_spec)
@staticmethod
def display_mapping_node(node_spec):
- print '+ NODE %s' % (node_spec['name'])
- print '+\tqemu box %s' % node_spec['host_box']
- print '+\thostname=%s' % node_spec['node_fields']['hostname']
+ print('+ NODE {}'.format(node_spec['name']))
+ print('+\tqemu box {}'.format(node_spec['host_box']))
+ print('+\thostname={}'.format(node_spec['node_fields']['hostname']))
# write a timestamp in /vservers/<>.timestamp
# cannot be inside the vserver, that causes vserver .. build to cough
@@ -634,18 +649,18 @@ class TestPlc:
# a first approx. is to store the timestamp close to the VM root like vs does
stamp_path = self.vm_timestamp_path()
stamp_dir = os.path.dirname(stamp_path)
- utils.system(self.test_ssh.actual_command("mkdir -p %s" % stamp_dir))
- return utils.system(self.test_ssh.actual_command("echo %d > %s" % (now, stamp_path))) == 0
+ utils.system(self.test_ssh.actual_command("mkdir -p {}".format(stamp_dir)))
+ return utils.system(self.test_ssh.actual_command("echo {:d} > {}".format(now, stamp_path))) == 0
# this is called inconditionnally at the beginning of the test sequence
# just in case this is a rerun, so if the vm is not running it's fine
def plcvm_delete(self):
"vserver delete the test myplc"
stamp_path = self.vm_timestamp_path()
- self.run_in_host("rm -f %s" % stamp_path)
- self.run_in_host("virsh -c lxc:// destroy %s" % self.vservername)
- self.run_in_host("virsh -c lxc:// undefine %s" % self.vservername)
- self.run_in_host("rm -fr /vservers/%s" % self.vservername)
+ self.run_in_host("rm -f {}".format(stamp_path))
+ self.run_in_host("virsh -c lxc:// destroy {}".format(self.vservername))
+ self.run_in_host("virsh -c lxc:// undefine {}".format(self.vservername))
+ self.run_in_host("rm -fr /vservers/{}".format(self.vservername))
return True
### install
@@ -672,34 +687,32 @@ class TestPlc:
# with the last step (i386) removed
repo_url = self.options.arch_rpms_url
for level in [ 'arch' ]:
- repo_url = os.path.dirname(repo_url)
+ repo_url = os.path.dirname(repo_url)
# invoke initvm (drop support for vs)
script = "lbuild-initvm.sh"
script_options = ""
# pass the vbuild-nightly options to [lv]test-initvm
- script_options += " -p %s" % self.options.personality
- script_options += " -d %s" % self.options.pldistro
- script_options += " -f %s" % self.options.fcdistro
- script_options += " -r %s" % repo_url
+ script_options += " -p {}".format(self.options.personality)
+ script_options += " -d {}".format(self.options.pldistro)
+ script_options += " -f {}".format(self.options.fcdistro)
+ script_options += " -r {}".format(repo_url)
vserver_name = self.vservername
try:
vserver_hostname = socket.gethostbyaddr(self.vserverip)[0]
- script_options += " -n %s" % vserver_hostname
+ script_options += " -n {}".format(vserver_hostname)
except:
- print "Cannot reverse lookup %s" % self.vserverip
- print "This is considered fatal, as this might pollute the test results"
+ print("Cannot reverse lookup {}".format(self.vserverip))
+ print("This is considered fatal, as this might pollute the test results")
return False
- create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s" % locals()
+ create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
return self.run_in_host(create_vserver) == 0
### install_rpm
def plc_install(self):
- "yum install myplc, noderepo, and the plain bootstrapfs"
-
- # workaround for getting pgsql8.2 on centos5
- if self.options.fcdistro == "centos5":
- self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
+ """
+ yum install myplc, noderepo
+ """
# compute nodefamily
if self.options.personality == "linux32":
@@ -707,17 +720,49 @@ class TestPlc:
elif self.options.personality == "linux64":
arch = "x86_64"
else:
- raise Exception, "Unsupported personality %r"%self.options.personality
- nodefamily = "%s-%s-%s" % (self.options.pldistro, self.options.fcdistro, arch)
+ raise Exception("Unsupported personality {}".format(self.options.personality))
+ nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
pkgs_list=[]
- pkgs_list.append("slicerepo-%s" % nodefamily)
+ pkgs_list.append("slicerepo-{}".format(nodefamily))
pkgs_list.append("myplc")
- pkgs_list.append("noderepo-%s" % nodefamily)
- pkgs_list.append("nodeimage-%s-plain" % nodefamily)
+ pkgs_list.append("noderepo-{}".format(nodefamily))
pkgs_string=" ".join(pkgs_list)
return self.yum_install(pkgs_list)
+ def install_syslinux6(self):
+ """
+ install syslinux6 from the fedora21 release
+ """
+ key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
+
+ rpms = [
+ 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
+ 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
+ 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
+ ]
+ # this can be done several times
+ self.run_in_guest("rpm --import {key}".format(**locals()))
+ return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
+
+ def bonding_builds(self):
+ """
+ list /etc/yum.repos.d on the myplc side
+ """
+ self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
+ return True
+
+ def bonding_nodes(self):
+ """
+ List nodes known to the myplc together with their nodefamiliy
+ """
+ print("---------------------------------------- nodes")
+ for node in self.apiserver.GetNodes(self.auth_root()):
+ print("{} -> {}".format(node['hostname'],
+ self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily']))
+ print("---------------------------------------- nodes")
+
+
###
def mod_python(self):
"""yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
@@ -726,15 +771,15 @@ class TestPlc:
###
def plc_configure(self):
"run plc-config-tty"
- tmpname = '%s.plc-config-tty' % self.name()
+ tmpname = '{}.plc-config-tty'.format(self.name())
with open(tmpname,'w') as fileconf:
- for (var,value) in self.plc_spec['settings'].iteritems():
- fileconf.write('e %s\n%s\n'%(var,value))
+ for (var,value) in self.plc_spec['settings'].items():
+ fileconf.write('e {}\n{}\n'.format(var, value))
fileconf.write('w\n')
fileconf.write('q\n')
- utils.system('cat %s' % tmpname)
- self.run_in_guest_piped('cat %s' % tmpname, 'plc-config-tty')
- utils.system('rm %s' % tmpname)
+ utils.system('cat {}'.format(tmpname))
+ self.run_in_guest_piped('cat {}'.format(tmpname), 'plc-config-tty')
+ utils.system('rm {}'.format(tmpname))
return True
# f14 is a bit odd in this respect, although this worked fine in guests up to f18
@@ -752,13 +797,13 @@ class TestPlc:
def start_stop_service(self, service, start_or_stop):
"utility to start/stop a service with the special trick for f14"
if self.options.fcdistro != 'f14':
- return self.run_in_guest("service %s %s" % (service, start_or_stop)) == 0
+ return self.run_in_guest("service {} {}".format(service, start_or_stop)) == 0
else:
# patch /sbin/service so it does not reset environment
self.run_in_guest('sed -i -e \\"s,env -i,env,\\" /sbin/service')
# this is because our own scripts in turn call service
- return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service %s %s" % \
- (service, start_or_stop)) == 0
+ return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service {} {}"\
+ .format(service, start_or_stop)) == 0
def plc_start(self):
"service plc start"
@@ -782,7 +827,7 @@ class TestPlc:
def keys_store(self):
"stores test users ssh keys in keys/"
for key_spec in self.plc_spec['keys']:
- TestKey(self,key_spec).store_key()
+ TestKey(self,key_spec).store_key()
return True
def keys_clean(self):
@@ -802,8 +847,8 @@ class TestPlc:
overall = True
prefix = 'debug_ssh_key'
for ext in ['pub', 'rsa'] :
- src = "%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s" % locals()
- dst = "keys/%(vservername)s-debug.%(ext)s" % locals()
+ src = "{vm_root}/etc/planetlab/{prefix}.{ext}".format(**locals())
+ dst = "keys/{vservername}-debug.{ext}".format(**locals())
if self.test_ssh.fetch(src, dst) != 0:
overall=False
return overall
@@ -820,27 +865,27 @@ class TestPlc:
for site_spec in self.plc_spec['sites']:
test_site = TestSite(self,site_spec)
if (action != "add"):
- utils.header("Deleting site %s in %s" % (test_site.name(), self.name()))
+ utils.header("Deleting site {} in {}".format(test_site.name(), self.name()))
test_site.delete_site()
# deleted with the site
#test_site.delete_users()
continue
else:
- utils.header("Creating site %s & users in %s" % (test_site.name(), self.name()))
+ utils.header("Creating site {} & users in {}".format(test_site.name(), self.name()))
test_site.create_site()
test_site.create_users()
return True
def delete_all_sites(self):
"Delete all sites in PLC, and related objects"
- print 'auth_root', self.auth_root()
+ print('auth_root', self.auth_root())
sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
for site in sites:
# keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
continue
site_id = site['site_id']
- print 'Deleting site_id', site_id
+ print('Deleting site_id', site_id)
self.apiserver.DeleteSite(self.auth_root(), site_id)
return True
@@ -855,15 +900,15 @@ class TestPlc:
for site_spec in self.plc_spec['sites']:
test_site = TestSite(self, site_spec)
if action != "add":
- utils.header("Deleting nodes in site %s" % test_site.name())
+ utils.header("Deleting nodes in site {}".format(test_site.name()))
for node_spec in site_spec['nodes']:
test_node = TestNode(self, test_site, node_spec)
- utils.header("Deleting %s" % test_node.name())
+ utils.header("Deleting {}".format(test_node.name()))
test_node.delete_node()
else:
- utils.header("Creating nodes for site %s in %s" % (test_site.name(), self.name()))
+ utils.header("Creating nodes for site {} in {}".format(test_site.name(), self.name()))
for node_spec in site_spec['nodes']:
- utils.pprint('Creating node %s' % node_spec, node_spec)
+ utils.pprint('Creating node {}'.format(node_spec), node_spec)
test_node = TestNode(self, test_site, node_spec)
test_node.create_node()
return True
@@ -879,7 +924,7 @@ class TestPlc:
@staticmethod
def translate_timestamp(start, grain, timestamp):
if timestamp < TestPlc.YEAR:
- return start+timestamp*grain
+ return start + timestamp*grain
else:
return timestamp
@@ -891,8 +936,8 @@ class TestPlc:
"create leases (on reservable nodes only, use e.g. run -c default -c resa)"
now = int(time.time())
grain = self.apiserver.GetLeaseGranularity(self.auth_root())
- print 'API answered grain=', grain
- start = (now/grain)*grain
+ print('API answered grain=', grain)
+ start = (now//grain)*grain
start += grain
# find out all nodes that are reservable
nodes = self.all_reservable_nodenames()
@@ -909,22 +954,22 @@ class TestPlc:
lease_spec['t_from'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
- lease_spec['t_from'],lease_spec['t_until'])
+ lease_spec['t_from'], lease_spec['t_until'])
if lease_addition['errors']:
- utils.header("Cannot create leases, %s"%lease_addition['errors'])
+ utils.header("Cannot create leases, {}".format(lease_addition['errors']))
ok = False
else:
- utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)' % \
- (nodes, lease_spec['slice'],
- lease_spec['t_from'], TestPlc.timestamp_printable(lease_spec['t_from']),
- lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
+ utils.header('Leases on nodes {} for {} from {:d} ({}) until {:d} ({})'\
+ .format(nodes, lease_spec['slice'],
+ lease_spec['t_from'], TestPlc.timestamp_printable(lease_spec['t_from']),
+ lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
return ok
def delete_leases(self):
"remove all leases in the myplc side"
lease_ids = [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
- utils.header("Cleaning leases %r" % lease_ids)
+ utils.header("Cleaning leases {}".format(lease_ids))
self.apiserver.DeleteLeases(self.auth_root(), lease_ids)
return True
@@ -935,10 +980,10 @@ class TestPlc:
for l in leases:
current = l['t_until'] >= now
if self.options.verbose or current:
- utils.header("%s %s from %s until %s" % \
- (l['hostname'], l['name'],
- TestPlc.timestamp_printable(l['t_from']),
- TestPlc.timestamp_printable(l['t_until'])))
+ utils.header("{} {} from {} until {}"\
+ .format(l['hostname'], l['name'],
+ TestPlc.timestamp_printable(l['t_from']),
+ TestPlc.timestamp_printable(l['t_until'])))
return True
# create nodegroups if needed, and populate
@@ -949,20 +994,20 @@ class TestPlc:
test_site = TestSite(self,site_spec)
for node_spec in site_spec['nodes']:
test_node = TestNode(self, test_site, node_spec)
- if node_spec.has_key('nodegroups'):
+ if 'nodegroups' in node_spec:
nodegroupnames = node_spec['nodegroups']
- if isinstance(nodegroupnames, StringTypes):
+ if isinstance(nodegroupnames, str):
nodegroupnames = [ nodegroupnames ]
for nodegroupname in nodegroupnames:
- if not groups_dict.has_key(nodegroupname):
+ if nodegroupname not in groups_dict:
groups_dict[nodegroupname] = []
groups_dict[nodegroupname].append(test_node.name())
auth = self.auth_root()
overall = True
- for (nodegroupname,group_nodes) in groups_dict.iteritems():
+ for (nodegroupname,group_nodes) in groups_dict.items():
if action == "add":
- print 'nodegroups:', 'dealing with nodegroup',\
- nodegroupname, 'on nodes', group_nodes
+ print('nodegroups:', 'dealing with nodegroup',\
+ nodegroupname, 'on nodes', group_nodes)
# first, check if the nodetagtype is here
tag_types = self.apiserver.GetTagTypes(auth, {'tagname':nodegroupname})
if tag_types:
@@ -970,22 +1015,22 @@ class TestPlc:
else:
tag_type_id = self.apiserver.AddTagType(auth,
{'tagname' : nodegroupname,
- 'description' : 'for nodegroup %s' % nodegroupname,
+ 'description' : 'for nodegroup {}'.format(nodegroupname),
'category' : 'test'})
- print 'located tag (type)', nodegroupname, 'as', tag_type_id
+ print('located tag (type)', nodegroupname, 'as', tag_type_id)
# create nodegroup
nodegroups = self.apiserver.GetNodeGroups(auth, {'groupname' : nodegroupname})
if not nodegroups:
self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
- print 'created nodegroup', nodegroupname, \
- 'from tagname', nodegroupname, 'and value', 'yes'
+ print('created nodegroup', nodegroupname, \
+ 'from tagname', nodegroupname, 'and value', 'yes')
# set node tag on all nodes, value='yes'
for nodename in group_nodes:
try:
self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
except:
traceback.print_exc()
- print 'node', nodename, 'seems to already have tag', nodegroupname
+ print('node', nodename, 'seems to already have tag', nodegroupname)
# check anyway
try:
expect_yes = self.apiserver.GetNodeTags(auth,
@@ -993,15 +1038,15 @@ class TestPlc:
'tagname' : nodegroupname},
['value'])[0]['value']
if expect_yes != "yes":
- print 'Mismatch node tag on node',nodename,'got',expect_yes
+ print('Mismatch node tag on node',nodename,'got',expect_yes)
overall = False
except:
if not self.options.dry_run:
- print 'Cannot find tag', nodegroupname, 'on node', nodename
+ print('Cannot find tag', nodegroupname, 'on node', nodename)
overall = False
else:
try:
- print 'cleaning nodegroup', nodegroupname
+ print('cleaning nodegroup', nodegroupname)
self.apiserver.DeleteNodeGroup(auth, nodegroupname)
except:
traceback.print_exc()
@@ -1040,7 +1085,7 @@ class TestPlc:
def nodes_check_boot_state(self, target_boot_state, timeout_minutes,
silent_minutes, period_seconds = 15):
if self.options.dry_run:
- print 'dry_run'
+ print('dry_run')
return True
class CompleterTaskBootState(CompleterTask):
@@ -1058,16 +1103,16 @@ class TestPlc:
except:
return False
def message(self):
- return "CompleterTaskBootState with node %s" % self.hostname
+ return "CompleterTaskBootState with node {}".format(self.hostname)
def failure_epilogue(self):
- print "node %s in state %s - expected %s" %\
- (self.hostname, self.last_boot_state, target_boot_state)
+ print("node {} in state {} - expected {}"\
+ .format(self.hostname, self.last_boot_state, target_boot_state))
timeout = timedelta(minutes=timeout_minutes)
graceout = timedelta(minutes=silent_minutes)
period = timedelta(seconds=period_seconds)
# the nodes that haven't checked yet - start with a full list and shrink over time
- utils.header("checking nodes boot state (expected %s)" % target_boot_state)
+ utils.header("checking nodes boot state (expected {})".format(target_boot_state))
tasks = [ CompleterTaskBootState(self,hostname) \
for (hostname,_) in self.all_node_infos() ]
message = 'check_boot_state={}'.format(target_boot_state)
@@ -1082,15 +1127,15 @@ class TestPlc:
return True
# probing nodes
- def check_nodes_ping(self, timeout_seconds=30, period_seconds=10):
+ def check_nodes_ping(self, timeout_seconds=60, period_seconds=10):
class CompleterTaskPingNode(CompleterTask):
def __init__(self, hostname):
self.hostname = hostname
def run(self, silent):
- command="ping -c 1 -w 1 %s >& /dev/null" % self.hostname
+ command="ping -c 1 -w 1 {} >& /dev/null".format(self.hostname)
return utils.system(command, silent=silent) == 0
def failure_epilogue(self):
- print "Cannot ping node with name %s" % self.hostname
+ print("Cannot ping node with name {}".format(self.hostname))
timeout = timedelta(seconds = timeout_seconds)
graceout = timeout
period = timedelta(seconds = period_seconds)
@@ -1112,12 +1157,12 @@ class TestPlc:
if debug:
message = "debug"
completer_message = 'ssh_node_debug'
- local_key = "keys/%(vservername)s-debug.rsa" % locals()
+ local_key = "keys/{vservername}-debug.rsa".format(**locals())
else:
message = "boot"
completer_message = 'ssh_node_boot'
- local_key = "keys/key_admin.rsa"
- utils.header("checking ssh access to nodes (expected in %s mode)" % message)
+ local_key = "keys/key_admin.rsa"
+ utils.header("checking ssh access to nodes (expected in {} mode)".format(message))
node_infos = self.all_node_infos()
tasks = [ CompleterTaskNodeSsh(nodename, qemuname, local_key,
boot_state=message, dry_run=self.options.dry_run) \
@@ -1147,15 +1192,31 @@ class TestPlc:
@node_mapper
def qemu_local_config(self): pass
@node_mapper
+ def qemu_export(self): pass
+ @node_mapper
+ def qemu_cleanlog(self): pass
+ @node_mapper
def nodestate_reinstall(self): pass
@node_mapper
+ def nodestate_upgrade(self): pass
+ @node_mapper
def nodestate_safeboot(self): pass
@node_mapper
def nodestate_boot(self): pass
@node_mapper
def nodestate_show(self): pass
@node_mapper
- def qemu_export(self): pass
+ def nodedistro_f14(self): pass
+ @node_mapper
+ def nodedistro_f18(self): pass
+ @node_mapper
+ def nodedistro_f20(self): pass
+ @node_mapper
+ def nodedistro_f21(self): pass
+ @node_mapper
+ def nodedistro_f22(self): pass
+ @node_mapper
+ def nodeflavour_show(self): pass
### check hooks : invoke scripts from hooks/{node,slice}
def check_hooks_node(self):
@@ -1176,19 +1237,19 @@ class TestPlc:
def actual_run(self):
return self.test_sliver.check_initscript_stamp(self.stamp)
def message(self):
- return "initscript checker for %s" % self.test_sliver.name()
+ return "initscript checker for {}".format(self.test_sliver.name())
def failure_epilogue(self):
- print "initscript stamp %s not found in sliver %s"%\
- (self.stamp, self.test_sliver.name())
+ print("initscript stamp {} not found in sliver {}"\
+ .format(self.stamp, self.test_sliver.name()))
tasks = []
for slice_spec in self.plc_spec['slices']:
- if not slice_spec.has_key('initscriptstamp'):
+ if 'initscriptstamp' not in slice_spec:
continue
stamp = slice_spec['initscriptstamp']
slicename = slice_spec['slice_fields']['name']
for nodename in slice_spec['nodenames']:
- print 'nodename', nodename, 'slicename', slicename, 'stamp', stamp
+ print('nodename', nodename, 'slicename', slicename, 'stamp', stamp)
site,node = self.locate_node(nodename)
# xxx - passing the wrong site - probably harmless
test_site = TestSite(self, site)
@@ -1198,7 +1259,7 @@ class TestPlc:
tasks.append(CompleterTaskInitscript(test_sliver, stamp))
return Completer(tasks, message='check_initscripts').\
run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
-
+
def check_initscripts(self):
"check that the initscripts have triggered"
return self.do_check_initscripts()
@@ -1206,7 +1267,7 @@ class TestPlc:
def initscripts(self):
"create initscripts with PLCAPI"
for initscript in self.plc_spec['initscripts']:
- utils.pprint('Adding Initscript in plc %s' % self.plc_spec['name'], initscript)
+ utils.pprint('Adding Initscript in plc {}'.format(self.plc_spec['name']), initscript)
self.apiserver.AddInitScript(self.auth_root(), initscript['initscript_fields'])
return True
@@ -1214,12 +1275,12 @@ class TestPlc:
"delete initscripts with PLCAPI"
for initscript in self.plc_spec['initscripts']:
initscript_name = initscript['initscript_fields']['name']
- print('Attempting to delete %s in plc %s' % (initscript_name, self.plc_spec['name']))
+ print(('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name'])))
try:
self.apiserver.DeleteInitScript(self.auth_root(), initscript_name)
- print initscript_name, 'deleted'
+ print(initscript_name, 'deleted')
except:
- print 'deletion went wrong - probably did not exist'
+ print('deletion went wrong - probably did not exist')
return True
### manage slices
@@ -1277,6 +1338,9 @@ class TestPlc:
def keys_clear_known_hosts(self): pass
def plcapi_urls(self):
+ """
+ attempts to reach the PLCAPI with various forms for the URL
+ """
return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
def speed_up_slices(self):
@@ -1288,11 +1352,11 @@ class TestPlc:
def _speed_up_slices(self, p, r):
# create the template on the server-side
- template = "%s.nodemanager" % self.name()
+ template = "{}.nodemanager".format(self.name())
with open(template,"w") as template_file:
- template_file.write('OPTIONS="-p %s -r %s -d"\n'%(p,r))
+ template_file.write('OPTIONS="-p {} -r {} -d"\n'.format(p, r))
in_vm = "/var/www/html/PlanetLabConf/nodemanager"
- remote = "%s/%s" % (self.vm_root_in_host(), in_vm)
+ remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
self.test_ssh.copy_abs(template, remote)
# Add a conf file
if not self.apiserver.GetConfFiles(self.auth_root(),
@@ -1305,11 +1369,11 @@ class TestPlc:
def debug_nodemanager(self):
"sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
- template = "%s.nodemanager" % self.name()
+ template = "{}.nodemanager".format(self.name())
with open(template,"w") as template_file:
template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
in_vm = "/var/www/html/PlanetLabConf/nodemanager"
- remote = "%s/%s" % (self.vm_root_in_host(), in_vm)
+ remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
self.test_ssh.copy_abs(template, remote)
return True
@@ -1319,6 +1383,9 @@ class TestPlc:
@node_mapper
def qemu_timestamp(self) : pass
+ @node_mapper
+ def qemu_nodefamily(self): pass
+
# when a spec refers to a node possibly on another plc
def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
for plc in [ self ] + other_plcs:
@@ -1326,7 +1393,7 @@ class TestPlc:
return plc.locate_sliver_obj(nodename, slicename)
except:
pass
- raise Exception, "Cannot locate sliver %s@%s among all PLCs" % (nodename, slicename)
+ raise Exception("Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename))
# implement this one as a cross step so that we can take advantage of different nodes
# in multi-plcs mode
@@ -1345,9 +1412,9 @@ class TestPlc:
def actual_run(self):
return self.test_sliver.check_tcp_ready(port = 9999)
def message(self):
- return "network ready checker for %s" % self.test_sliver.name()
+ return "network ready checker for {}".format(self.test_sliver.name())
def failure_epilogue(self):
- print "could not bind port from sliver %s" % self.test_sliver.name()
+ print("could not bind port from sliver {}".format(self.test_sliver.name()))
sliver_specs = {}
tasks = []
@@ -1356,10 +1423,10 @@ class TestPlc:
# locate the TestSliver instances involved, and cache them in the spec instance
spec['s_sliver'] = self.locate_sliver_obj_cross(spec['server_node'], spec['server_slice'], other_plcs)
spec['c_sliver'] = self.locate_sliver_obj_cross(spec['client_node'], spec['client_slice'], other_plcs)
- message = "Will check TCP between s=%s and c=%s" % \
- (spec['s_sliver'].name(), spec['c_sliver'].name())
+ message = "Will check TCP between s={} and c={}"\
+ .format(spec['s_sliver'].name(), spec['c_sliver'].name())
if 'client_connect' in spec:
- message += " (using %s)" % spec['client_connect']
+ message += " (using {})".format(spec['client_connect'])
utils.header(message)
# we need to check network presence in both slivers, but also
# avoid to insert a sliver several times
@@ -1416,9 +1483,9 @@ class TestPlc:
def actual_run(self):
return self.test_node._check_system_slice(slicename, dry_run=self.dry_run)
def message(self):
- return "System slice %s @ %s" % (slicename, self.test_node.name())
+ return "System slice {} @ {}".format(slicename, self.test_node.name())
def failure_epilogue(self):
- print "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
+ print("COULD not find system slice {} @ {}".format(slicename, self.test_node.name()))
timeout = timedelta(minutes=timeout_minutes)
silent = timedelta(0)
period = timedelta(seconds=period_seconds)
@@ -1430,7 +1497,7 @@ class TestPlc:
"runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
# install the stress-test in the plc image
location = "/usr/share/plc_api/plcsh_stress_test.py"
- remote = "%s/%s" % (self.vm_root_in_host(), location)
+ remote = "{}/{}".format(self.vm_root_in_host(), location)
self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
command = location
command += " -- --check"
@@ -1485,16 +1552,17 @@ class TestPlc:
utils.header("********** Regular yum failed - special workaround in place, 2nd chance")
code, cached_rpm_path = \
utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
- utils.header("rpm_path=<<%s>>" % rpm_path)
+ utils.header("rpm_path=<<{}>>".format(rpm_path))
# just for checking
- self.run_in_guest("rpm -i %s" % cached_rpm_path)
+ self.run_in_guest("rpm -i {}".format(cached_rpm_path))
return self.yum_check_installed("sfa-client")
def sfa_dbclean(self):
"thoroughly wipes off the SFA database"
return self.run_in_guest("sfaadmin reg nuke") == 0 or \
self.run_in_guest("sfa-nuke.py") == 0 or \
- self.run_in_guest("sfa-nuke-plc.py") == 0
+ self.run_in_guest("sfa-nuke-plc.py") == 0 or \
+ self.run_in_guest("sfaadmin registry nuke") == 0
def sfa_fsclean(self):
"cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
@@ -1511,7 +1579,7 @@ class TestPlc:
try:
self.apiserver.DeleteSite(self.auth_root(),login_base)
except:
- print "Site %s already absent from PLC db"%login_base
+ print("Site {} already absent from PLC db".format(login_base))
for spec_name in ['pi_spec','user_spec']:
user_spec = auth_sfa_spec[spec_name]
@@ -1520,10 +1588,10 @@ class TestPlc:
self.apiserver.DeletePerson(self.auth_root(),username)
except:
# this in fact is expected as sites delete their members
- #print "User %s already absent from PLC db"%username
+ #print "User {} already absent from PLC db".format(username)
pass
- print "REMEMBER TO RUN sfa_import AGAIN"
+ print("REMEMBER TO RUN sfa_import AGAIN")
return True
def sfa_uninstall(self):
@@ -1557,51 +1625,51 @@ class TestPlc:
###
def confdir(self):
- dirname = "conf.%s" % self.plc_spec['name']
+ dirname = "conf.{}".format(self.plc_spec['name'])
if not os.path.isdir(dirname):
- utils.system("mkdir -p %s" % dirname)
+ utils.system("mkdir -p {}".format(dirname))
if not os.path.isdir(dirname):
- raise Exception,"Cannot create config dir for plc %s" % self.name()
+ raise Exception("Cannot create config dir for plc {}".format(self.name()))
return dirname
def conffile(self, filename):
- return "%s/%s" % (self.confdir(),filename)
+ return "{}/{}".format(self.confdir(), filename)
def confsubdir(self, dirname, clean, dry_run=False):
- subdirname = "%s/%s" % (self.confdir(),dirname)
+ subdirname = "{}/{}".format(self.confdir(), dirname)
if clean:
- utils.system("rm -rf %s" % subdirname)
+ utils.system("rm -rf {}".format(subdirname))
if not os.path.isdir(subdirname):
- utils.system("mkdir -p %s" % subdirname)
+ utils.system("mkdir -p {}".format(subdirname))
if not dry_run and not os.path.isdir(subdirname):
- raise "Cannot create config subdir %s for plc %s" % (dirname,self.name())
+ raise "Cannot create config subdir {} for plc {}".format(dirname, self.name())
return subdirname
def conffile_clean(self, filename):
filename=self.conffile(filename)
- return utils.system("rm -rf %s" % filename)==0
+ return utils.system("rm -rf {}".format(filename))==0
###
def sfa_configure(self):
"run sfa-config-tty"
tmpname = self.conffile("sfa-config-tty")
with open(tmpname,'w') as fileconf:
- for (var,value) in self.plc_spec['sfa']['settings'].iteritems():
- fileconf.write('e %s\n%s\n'%(var,value))
+ for (var,value) in self.plc_spec['sfa']['settings'].items():
+ fileconf.write('e {}\n{}\n'.format(var, value))
fileconf.write('w\n')
fileconf.write('R\n')
fileconf.write('q\n')
- utils.system('cat %s' % tmpname)
- self.run_in_guest_piped('cat %s' % tmpname, 'sfa-config-tty')
+ utils.system('cat {}'.format(tmpname))
+ self.run_in_guest_piped('cat {}'.format(tmpname), 'sfa-config-tty')
return True
def aggregate_xml_line(self):
port = self.plc_spec['sfa']['neighbours-port']
- return '' % \
- (self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
+ return ''\
+ .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
def registry_xml_line(self):
- return '' % \
- (self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
+ return ''\
+ .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
# a cross step that takes all other plcs in argument
@@ -1612,18 +1680,18 @@ class TestPlc:
return True
agg_fname = self.conffile("agg.xml")
with open(agg_fname,"w") as out:
- out.write("%s\n" % \
- " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
- utils.header("(Over)wrote %s" % agg_fname)
+ out.write("{}\n"\
+ .format(" ".join([ plc.aggregate_xml_line() for plc in other_plcs ])))
+ utils.header("(Over)wrote {}".format(agg_fname))
reg_fname=self.conffile("reg.xml")
with open(reg_fname,"w") as out:
- out.write("%s\n" % \
- " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
- utils.header("(Over)wrote %s" % reg_fname)
+ out.write("{}\n"\
+ .format(" ".join([ plc.registry_xml_line() for plc in other_plcs ])))
+ utils.header("(Over)wrote {}".format(reg_fname))
return self.test_ssh.copy_abs(agg_fname,
- '/%s/etc/sfa/aggregates.xml' % self.vm_root_in_host()) == 0 \
+ '/{}/etc/sfa/aggregates.xml'.format(self.vm_root_in_host())) == 0 \
and self.test_ssh.copy_abs(reg_fname,
- '/%s/etc/sfa/registries.xml' % self.vm_root_in_host()) == 0
+ '/{}/etc/sfa/registries.xml'.format(self.vm_root_in_host())) == 0
def sfa_import(self):
"use sfaadmin to import from plc"
@@ -1645,12 +1713,12 @@ class TestPlc:
for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
test_slice = TestAuthSfa(self, slice_spec)
dir_basename = os.path.basename(test_slice.sfi_path())
- dir_name = self.confsubdir("dot-sfi/%s" % dir_basename,
+ dir_name = self.confsubdir("dot-sfi/{}".format(dir_basename),
clean=True, dry_run=self.options.dry_run)
test_slice.sfi_configure(dir_name)
# push into the remote /root/sfi area
location = test_slice.sfi_path()
- remote = "%s/%s" % (self.vm_root_in_host(), location)
+ remote = "{}/{}".format(self.vm_root_in_host(), location)
self.test_ssh.mkdir(remote, abs=True)
# need to strip last level or remote otherwise we get an extra dir level
self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
@@ -1669,7 +1737,7 @@ class TestPlc:
for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
test_slice = TestAuthSfa(self, slice_spec)
in_vm = test_slice.sfi_path()
- remote = "%s/%s" % (self.vm_root_in_host(), in_vm)
+ remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
if self.test_ssh.copy_abs(filename, remote) !=0:
overall = False
return overall
@@ -1701,6 +1769,8 @@ class TestPlc:
@auth_sfa_mapper
def sfa_provision_empty(self): pass
@auth_sfa_mapper
+ def sfa_describe(self): pass
+ @auth_sfa_mapper
def sfa_check_slice_plc(self): pass
@auth_sfa_mapper
def sfa_check_slice_plc_empty(self): pass
@@ -1733,7 +1803,7 @@ class TestPlc:
"creates random entries in the PLCAPI"
# install the stress-test in the plc image
location = "/usr/share/plc_api/plcsh_stress_test.py"
- remote = "%s/%s" % (self.vm_root_in_host(), location)
+ remote = "{}/{}".format(self.vm_root_in_host(), location)
self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
command = location
command += " -- --preserve --short-names"
@@ -1743,6 +1813,19 @@ class TestPlc:
remote = (self.run_in_guest(command) == 0);
return local and remote
+
+ ####################
+ @bonding_redirector
+ def bonding_init_partial(self): pass
+
+ @bonding_redirector
+ def bonding_add_yum(self): pass
+
+ @bonding_redirector
+ def bonding_install_rpms(self): pass
+
+ ####################
+
def gather_logs(self):
"gets all possible logs from plc's/qemu node's/slice's for future reference"
# (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log./*
@@ -1752,55 +1835,55 @@ class TestPlc:
# (3) get the nodes /var/log and store is as logs/node.var-log./*
# (4) as far as possible get the slice's /var/log as logs/sliver.var-log./*
# (1.a)
- print "-------------------- TestPlc.gather_logs : PLC's /var/log"
+ print("-------------------- TestPlc.gather_logs : PLC's /var/log")
self.gather_var_logs()
# (1.b)
- print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
+ print("-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/")
self.gather_pgsql_logs()
# (1.c)
- print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
+ print("-------------------- TestPlc.gather_logs : PLC's /root/sfi/")
self.gather_root_sfi()
# (2)
- print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
+ print("-------------------- TestPlc.gather_logs : nodes's QEMU logs")
for site_spec in self.plc_spec['sites']:
test_site = TestSite(self,site_spec)
for node_spec in site_spec['nodes']:
test_node = TestNode(self, test_site, node_spec)
test_node.gather_qemu_logs()
# (3)
- print "-------------------- TestPlc.gather_logs : nodes's /var/log"
+ print("-------------------- TestPlc.gather_logs : nodes's /var/log")
self.gather_nodes_var_logs()
# (4)
- print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
+ print("-------------------- TestPlc.gather_logs : sample sliver's /var/log")
self.gather_slivers_var_logs()
return True
def gather_slivers_var_logs(self):
for test_sliver in self.all_sliver_objs():
remote = test_sliver.tar_var_logs()
- utils.system("mkdir -p logs/sliver.var-log.%s" % test_sliver.name())
- command = remote + " | tar -C logs/sliver.var-log.%s -xf -" % test_sliver.name()
+ utils.system("mkdir -p logs/sliver.var-log.{}".format(test_sliver.name()))
+ command = remote + " | tar -C logs/sliver.var-log.{} -xf -".format(test_sliver.name())
utils.system(command)
return True
def gather_var_logs(self):
- utils.system("mkdir -p logs/myplc.var-log.%s" % self.name())
+ utils.system("mkdir -p logs/myplc.var-log.{}".format(self.name()))
to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
- command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -" % self.name()
+ command = to_plc + "| tar -C logs/myplc.var-log.{} -xf -".format(self.name())
utils.system(command)
- command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd" % self.name()
+ command = "chmod a+r,a+x logs/myplc.var-log.{}/httpd".format(self.name())
utils.system(command)
def gather_pgsql_logs(self):
- utils.system("mkdir -p logs/myplc.pgsql-log.%s" % self.name())
+ utils.system("mkdir -p logs/myplc.pgsql-log.{}".format(self.name()))
to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
- command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -" % self.name()
+ command = to_plc + "| tar -C logs/myplc.pgsql-log.{} -xf -".format(self.name())
utils.system(command)
def gather_root_sfi(self):
- utils.system("mkdir -p logs/sfi.%s"%self.name())
+ utils.system("mkdir -p logs/sfi.{}".format(self.name()))
to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
- command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
+ command = to_plc + "| tar -C logs/sfi.{} -xf -".format(self.name())
utils.system(command)
def gather_nodes_var_logs(self):
@@ -1810,8 +1893,8 @@ class TestPlc:
test_node = TestNode(self, test_site, node_spec)
test_ssh = TestSsh(test_node.name(), key="keys/key_admin.rsa")
command = test_ssh.actual_command("tar -C /var/log -cf - .")
- command = command + "| tar -C logs/node.var-log.%s -xf -" % test_node.name()
- utils.system("mkdir -p logs/node.var-log.%s" % test_node.name())
+ command = command + "| tar -C logs/node.var-log.{} -xf -".format(test_node.name())
+ utils.system("mkdir -p logs/node.var-log.{}".format(test_node.name()))
utils.system(command)
@@ -1820,19 +1903,19 @@ class TestPlc:
# uses options.dbname if it is found
try:
name = self.options.dbname
- if not isinstance(name, StringTypes):
+ if not isinstance(name, str):
raise Exception
except:
t = datetime.now()
d = t.date()
name = str(d)
- return "/root/%s-%s.sql" % (database, name)
+ return "/root/{}-{}.sql".format(database, name)
def plc_db_dump(self):
'dump the planetlab5 DB in /root in the PLC - filename has time'
dump=self.dbfile("planetab5")
self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
- utils.header('Dumped planetlab5 database in %s' % dump)
+ utils.header('Dumped planetlab5 database in {}'.format(dump))
return True
def plc_db_restore(self):