# Thierry Parmentelat <thierry.parmentelat@inria.fr>
-# Copyright (C) 2010 INRIA
+# Copyright (C) 2010 INRIA
#
import sys
import time
import traceback
import socket
from datetime import datetime, timedelta
-from types import StringTypes
import utils
from Completer import Completer, CompleterTask
# step methods must take (self) and return a boolean (options is a member of the class)
def standby(minutes, dry_run):
- utils.header('Entering StandBy for %d mn'%minutes)
+ utils.header('Entering StandBy for {:d} mn'.format(minutes))
if dry_run:
- print 'dry_run'
+ print('dry_run')
else:
time.sleep(60*minutes)
return True
node_method = TestNode.__dict__[method.__name__]
for test_node in self.all_nodes():
if not node_method(test_node, *args, **kwds):
- overall=False
+ overall = False
return overall
# maintain __name__ for ignore_result
map_on_nodes.__name__ = method.__name__
ref_name = method.__name__.replace('_ignore', '').replace('force_', '')
ref_method = TestPlc.__dict__[ref_name]
result = ref_method(self)
- print "Actual (but ignored) result for %(ref_name)s is %(result)s" % locals()
+ print("Actual (but ignored) result for {ref_name} is {result}".format(**locals()))
return Ignored(result)
name = method.__name__.replace('_ignore', '').replace('force_', '')
ignoring.__name__ = name
default_steps = [
'show', SEP,
- 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
- 'plc_install', 'plc_configure', 'plc_start', SEP,
+ 'plcvm_delete', 'plcvm_timestamp', 'plcvm_create', SEP,
+ 'django_install', 'plc_install', 'plc_configure', 'plc_start', SEP,
'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
- 'plcapi_urls','speed_up_slices', SEP,
+ 'plcapi_urls', 'speed_up_slices', SEP,
'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
# slices created under plcsh interactively seem to be fine but these ones don't have the tags
-# keep this our of the way for now
+# keep this out of the way for now
'check_vsys_defaults_ignore', SEP,
-# run this first off so it's easier to re-run on another qemu box
- 'qemu_kill_mine', SEP,
- 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
- 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP,
- 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
- 'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
- 'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
- 'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
- 'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
- 'sfa_discover@1', 'sfa_rspec@1', 'sfa_allocate@1', 'sfa_provision@1', SEPSFA,
+# run this first off so it's easier to re-run on another qemu box
+ 'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init',
+ 'bootcd', 'qemu_local_config', SEP,
+ 'qemu_clean_mine', 'qemu_export', 'qemu_cleanlog', SEP,
+ 'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
+ 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure',
+ 'sfa_start', 'sfa_import', SEPSFA,
+ 'sfi_configure@1', 'sfa_register_site@1', 'sfa_register_pi@1', SEPSFA,
+ 'sfa_register_user@1', 'sfa_update_user@1',
+ 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
+ 'sfa_remove_user_from_slice@1', 'sfi_show_slice_researchers@1',
+ 'sfa_insert_user_in_slice@1', 'sfi_show_slice_researchers@1', SEPSFA,
+ 'sfa_discover@1', 'sfa_rspec@1', SEPSFA,
+ 'sfa_allocate@1', 'sfa_provision@1', 'sfa_describe@1', SEPSFA,
'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
# we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
# but as the stress test might take a while, we sometimes missed the debug mode..
'probe_kvm_iptables',
'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
- 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts', SEP,
- 'ssh_slice_sfa@1', SEPSFA,
- 'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1','sfa_check_slice_plc_empty@1', SEPSFA,
+ 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', SEP,
+ 'ssh_slice_sfa@1', SEPSFA,
+ 'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1',
+ 'sfa_check_slice_plc_empty@1', SEPSFA,
'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
- 'cross_check_tcp@1', 'check_system_slice', SEP,
+ 'check_system_slice', SEP,
# for inspecting the slice while it runs the first time
#'fail',
# check slices are turned off properly
+ 'debug_nodemanager',
'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
# check they are properly re-created with the same name
'fill_slices', 'ssh_slice_again', SEP,
'gather_logs_force', SEP,
]
- other_steps = [
+ other_steps = [
'export', 'show_boxes', 'super_speed_up_slices', SEP,
'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
- 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
+ 'delete_initscripts', 'delete_nodegroups', 'delete_all_sites', SEP,
'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
'delete_leases', 'list_leases', SEP,
'populate', SEP,
- 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
+ 'nodestate_show', 'nodestate_safeboot', 'nodestate_boot', 'nodestate_upgrade', SEP,
+ 'nodedistro_show', 'nodedistro_f14', 'nodedistro_f18', SEP,
+ 'nodedistro_f20', 'nodedistro_f21', 'nodedistro_f22', SEP,
'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
- 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
- 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
+ 'sfa_install_core', 'sfa_install_sfatables',
+ 'sfa_install_plc', 'sfa_install_client', SEPSFA,
+ 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop', 'sfa_uninstall', 'sfi_clean', SEPSFA,
'sfa_get_expires', SEPSFA,
- 'plc_db_dump' , 'plc_db_restore', SEP,
- 'check_netflow','check_drl', SEP,
- 'debug_nodemanager', 'slice_fs_present', SEP,
- 'standby_1_through_20','yes','no',SEP,
+ 'plc_db_dump', 'plc_db_restore', SEP,
+ 'check_netflow', 'check_drl', SEP,
+ # used to be part of default steps but won't work since f27
+ 'cross_check_tcp@1',
+ 'slice_fs_present', 'check_initscripts', SEP,
+ 'standby_1_through_20', 'yes', 'no', SEP,
+ 'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
]
- bonding_steps = [
+ default_bonding_steps = [
'bonding_init_partial',
'bonding_add_yum',
'bonding_install_rpms', SEP,
@staticmethod
def _has_sfa_cached(rpms_url):
if os.path.isfile(has_sfa_cache_filename):
- cached = file(has_sfa_cache_filename).read() == "yes"
- utils.header("build provides SFA (cached):%s" % cached)
+ with open(has_sfa_cache_filename) as cache:
+ cached = cache.read() == "yes"
+ utils.header("build provides SFA (cached):{}".format(cached))
return cached
# warning, we're now building 'sface' so let's be a bit more picky
# full builds are expected to return with 0 here
utils.header("Checking if build provides SFA package...")
- retcod = os.system("curl --silent %s/ | grep -q sfa-"%rpms_url) == 0
+ retcod = utils.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
encoded = 'yes' if retcod else 'no'
- with open(has_sfa_cache_filename,'w')as out:
- out.write(encoded)
+ with open(has_sfa_cache_filename,'w') as cache:
+ cache.write(encoded)
return retcod
-
+
@staticmethod
def check_whether_build_has_sfa(rpms_url):
has_sfa = TestPlc._has_sfa_cached(rpms_url)
TestPlc.default_steps.remove(step)
def __init__(self, plc_spec, options):
- self.plc_spec = plc_spec
+ self.plc_spec = plc_spec
self.options = options
- self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
+ self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
self.vserverip = plc_spec['vserverip']
self.vservername = plc_spec['vservername']
- self.url = "https://%s:443/PLCAPI/" % plc_spec['vserverip']
- self.apiserver = TestApiserver(self.url, options.dry_run)
+ self.vplchostname = self.vservername.split('-')[-1]
+ self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
+ self.apiserver = TestApiserver(self.url, options.dry_run)
(self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
(self.ssh_node_debug_timeout, self.ssh_node_debug_silent) = plc_spec['ssh_node_debug_timers']
-
+
def has_addresses_api(self):
return self.apiserver.has_method('AddIpAddress')
def name(self):
name = self.plc_spec['name']
- return "%s.%s" % (name,self.vservername)
+ return "{}.{}".format(name,self.vservername)
def hostname(self):
return self.plc_spec['host_box']
# define the API methods on this object through xmlrpc
# would help, but not strictly necessary
def connect(self):
- pass
+ pass
def actual_command_in_guest(self,command, backslash=False):
raw1 = self.host_to_guest(command)
raw2 = self.test_ssh.actual_command(raw1, dry_run=self.options.dry_run, backslash=backslash)
return raw2
-
+
def start_guest(self):
return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),
dry_run=self.options.dry_run))
-
+
def stop_guest(self):
return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),
dry_run=self.options.dry_run))
-
+
def run_in_guest(self, command, backslash=False):
raw = self.actual_command_in_guest(command, backslash)
return utils.system(raw)
-
+
def run_in_host(self,command):
return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
# see e.g. plc_start esp. the version for f14
#command gets run in the plc's vm
def host_to_guest(self, command):
- vservername = self.vservername
- personality = self.options.personality
- raw = "%(personality)s virsh -c lxc:/// lxc-enter-namespace %(vservername)s" % locals()
- # f14 still needs some extra help
- if self.options.fcdistro == 'f14':
- raw +=" -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %(command)s" % locals()
- else:
- raw +=" -- /usr/bin/env %(command)s" % locals()
- return raw
-
+ ssh_leg = TestSsh(self.vplchostname)
+ return ssh_leg.actual_command(command, keep_stdin=True)
+
# this /vservers thing is legacy...
def vm_root_in_host(self):
- return "/vservers/%s/" % (self.vservername)
+ return "/vservers/{}/".format(self.vservername)
def vm_timestamp_path(self):
- return "/vservers/%s/%s.timestamp" % (self.vservername,self.vservername)
+ return "/vservers/{}/{}.timestamp".format(self.vservername, self.vservername)
#start/stop the vserver
def start_guest_in_host(self):
- return "virsh -c lxc:/// start %s" % (self.vservername)
-
+ return "virsh -c lxc:/// start {}".format(self.vservername)
+
def stop_guest_in_host(self):
- return "virsh -c lxc:/// destroy %s" % (self.vservername)
-
+ return "virsh -c lxc:/// destroy {}".format(self.vservername)
+
# xxx quick n dirty
def run_in_guest_piped(self,local,remote):
return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),
keep_stdin = True))
- def yum_check_installed(self, rpms):
- if isinstance(rpms, list):
+ def dnf_check_installed(self, rpms):
+ if isinstance(rpms, list):
rpms=" ".join(rpms)
- return self.run_in_guest("rpm -q %s"%rpms) == 0
-
+ return self.run_in_guest("rpm -q {}".format(rpms)) == 0
+
# does a yum install in the vs, ignore yum retcod, check with rpm
- def yum_install(self, rpms):
- if isinstance(rpms, list):
+ def dnf_install(self, rpms):
+ if isinstance(rpms, list):
rpms=" ".join(rpms)
- self.run_in_guest("yum -y install %s" % rpms)
+ yum_mode = self.run_in_guest("dnf -y install {}".format(rpms))
+ if yum_mode != 0:
+ self.run_in_guest("dnf -y install --allowerasing {}".format(rpms))
# yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
- self.run_in_guest("yum-complete-transaction -y")
- return self.yum_check_installed(rpms)
+ # nothing similar with dnf, forget about this for now
+ # self.run_in_guest("yum-complete-transaction -y")
+ return self.dnf_check_installed(rpms)
+
+ def pip_install(self, package):
+ return self.run_in_guest("pip3 install {}".format(package)) == 0
def auth_root(self):
- return {'Username' : self.plc_spec['settings']['PLC_ROOT_USER'],
- 'AuthMethod' : 'password',
- 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
+ return {'Username' : self.plc_spec['settings']['PLC_ROOT_USER'],
+ 'AuthMethod' : 'password',
+ 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
'Role' : self.plc_spec['role'],
}
-
+
def locate_site(self,sitename):
for site in self.plc_spec['sites']:
if site['site_fields']['name'] == sitename:
return site
if site['site_fields']['login_base'] == sitename:
return site
- raise Exception,"Cannot locate site %s" % sitename
-
+ raise Exception("Cannot locate site {}".format(sitename))
+
def locate_node(self, nodename):
for site in self.plc_spec['sites']:
for node in site['nodes']:
if node['name'] == nodename:
return site, node
- raise Exception, "Cannot locate node %s" % nodename
-
+ raise Exception("Cannot locate node {}".format(nodename))
+
def locate_hostname(self, hostname):
for site in self.plc_spec['sites']:
for node in site['nodes']:
if node['node_fields']['hostname'] == hostname:
return(site, node)
- raise Exception,"Cannot locate hostname %s" % hostname
-
+ raise Exception("Cannot locate hostname {}".format(hostname))
+
def locate_key(self, key_name):
for key in self.plc_spec['keys']:
if key['key_name'] == key_name:
return key
- raise Exception,"Cannot locate key %s" % key_name
+ raise Exception("Cannot locate key {}".format(key_name))
def locate_private_key_from_key_names(self, key_names):
# locate the first avail. key
for slice in self.plc_spec['slices']:
if slice['slice_fields']['name'] == slicename:
return slice
- raise Exception,"Cannot locate slice %s" % slicename
+ raise Exception("Cannot locate slice {}".format(slicename))
def all_sliver_objs(self):
result = []
# transform into a dict { 'host_box' -> [ test_node .. ] }
result = {}
for (box,node) in tuples:
- if not result.has_key(box):
+ if box not in result:
result[box] = [node]
else:
result[box].append(node)
return result
-
+
# a step for checking this stuff
def show_boxes(self):
'print summary of nodes location'
- for box,nodes in self.get_BoxNodes().iteritems():
- print box,":"," + ".join( [ node.name() for node in nodes ] )
+ for box,nodes in self.get_BoxNodes().items():
+ print(box,":"," + ".join( [ node.name() for node in nodes ] ))
return True
# make this a valid step
def qemu_kill_all(self):
'kill all qemu instances on the qemu boxes involved by this setup'
# this is the brute force version, kill all qemus on that host box
- for (box,nodes) in self.get_BoxNodes().iteritems():
+ for (box,nodes) in self.get_BoxNodes().items():
# pass the first nodename, as we don't push template-qemu on testboxes
nodedir = nodes[0].nodedir()
TestBoxQemu(box, self.options.buildname).qemu_kill_all(nodedir)
# make this a valid step
def qemu_list_all(self):
'list all qemu instances on the qemu boxes involved by this setup'
- for box,nodes in self.get_BoxNodes().iteritems():
+ for box,nodes in self.get_BoxNodes().items():
# this is the brute force version, kill all qemus on that host box
TestBoxQemu(box, self.options.buildname).qemu_list_all()
return True
# kill only the qemus related to this test
def qemu_list_mine(self):
'list qemu instances for our nodes'
- for (box,nodes) in self.get_BoxNodes().iteritems():
+ for (box,nodes) in self.get_BoxNodes().items():
# the fine-grain version
for node in nodes:
node.list_qemu()
# kill only the qemus related to this test
def qemu_clean_mine(self):
'cleanup (rm -rf) qemu instances for our nodes'
- for box,nodes in self.get_BoxNodes().iteritems():
+ for box,nodes in self.get_BoxNodes().items():
# the fine-grain version
for node in nodes:
node.qemu_clean()
# kill only the right qemus
def qemu_kill_mine(self):
'kill the qemu instances for our nodes'
- for box,nodes in self.get_BoxNodes().iteritems():
+ for box,nodes in self.get_BoxNodes().items():
# the fine-grain version
for node in nodes:
node.kill_qemu()
self.show_pass(2)
return True
- # uggly hack to make sure 'run export' only reports about the 1st plc
+ # uggly hack to make sure 'run export' only reports about the 1st plc
# to avoid confusion - also we use 'inri_slice1' in various aliases..
exported_id = 1
def export(self):
"print cut'n paste-able stuff to export env variables to your shell"
# guess local domain from hostname
- if TestPlc.exported_id > 1:
- print "export GUESTHOSTNAME%d=%s" % (TestPlc.exported_id, self.plc_spec['vservername'])
+ if TestPlc.exported_id > 1:
+ print("export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername']))
return True
TestPlc.exported_id += 1
domain = socket.gethostname().split('.',1)[1]
- fqdn = "%s.%s" % (self.plc_spec['host_box'], domain)
- print "export BUILD=%s" % self.options.buildname
- print "export PLCHOSTLXC=%s" % fqdn
- print "export GUESTNAME=%s" % self.plc_spec['vservername']
- vplcname = self.plc_spec['vservername'].split('-')[-1]
- print "export GUESTHOSTNAME=%s.%s"%(vplcname, domain)
+ fqdn = "{}.{}".format(self.plc_spec['host_box'], domain)
+ print("export BUILD={}".format(self.options.buildname))
+ print("export PLCHOSTLXC={}".format(fqdn))
+ print("export GUESTNAME={}".format(self.vservername))
+ print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
# find hostname of first node
hostname, qemubox = self.all_node_infos()[0]
- print "export KVMHOST=%s.%s" % (qemubox, domain)
- print "export NODE=%s" % (hostname)
+ print("export KVMHOST={}.{}".format(qemubox, domain))
+ print("export NODE={}".format(hostname))
return True
# entry point
always_display_keys=['PLC_WWW_HOST', 'nodes', 'sites']
def show_pass(self, passno):
- for (key,val) in self.plc_spec.iteritems():
+ for (key,val) in self.plc_spec.items():
if not self.options.verbose and key not in TestPlc.always_display_keys:
continue
if passno == 2:
self.display_key_spec(key)
elif passno == 1:
if key not in ['sites', 'initscripts', 'slices', 'keys']:
- print '+ ', key, ':', val
+ print('+ ', key, ':', val)
def display_site_spec(self, site):
- print '+ ======== site', site['site_fields']['name']
- for k,v in site.iteritems():
+ print('+ ======== site', site['site_fields']['name'])
+ for k,v in site.items():
if not self.options.verbose and k not in TestPlc.always_display_keys:
continue
if k == 'nodes':
- if v:
- print '+ ','nodes : ',
- for node in v:
- print node['node_fields']['hostname'],'',
- print ''
+ if v:
+ print('+ ', 'nodes : ', end=' ')
+ for node in v:
+ print(node['node_fields']['hostname'],'', end=' ')
+ print('')
elif k == 'users':
- if v:
- print '+ users : ',
- for user in v:
- print user['name'],'',
- print ''
+ if v:
+ print('+ users : ', end=' ')
+ for user in v:
+ print(user['name'],'', end=' ')
+ print('')
elif k == 'site_fields':
- print '+ login_base', ':', v['login_base']
+ print('+ login_base', ':', v['login_base'])
elif k == 'address_fields':
pass
else:
- print '+ ',
+ print('+ ', end=' ')
utils.pprint(k, v)
-
+
def display_initscript_spec(self, initscript):
- print '+ ======== initscript', initscript['initscript_fields']['name']
+ print('+ ======== initscript', initscript['initscript_fields']['name'])
def display_key_spec(self, key):
- print '+ ======== key', key['key_name']
+ print('+ ======== key', key['key_name'])
def display_slice_spec(self, slice):
- print '+ ======== slice', slice['slice_fields']['name']
- for k,v in slice.iteritems():
+ print('+ ======== slice', slice['slice_fields']['name'])
+ for k,v in slice.items():
if k == 'nodenames':
- if v:
- print '+ nodes : ',
- for nodename in v:
- print nodename,'',
- print ''
+ if v:
+ print('+ nodes : ', end=' ')
+ for nodename in v:
+ print(nodename,'', end=' ')
+ print('')
elif k == 'usernames':
- if v:
- print '+ users : ',
- for username in v:
- print username,'',
- print ''
+ if v:
+ print('+ users : ', end=' ')
+ for username in v:
+ print(username,'', end=' ')
+ print('')
elif k == 'slice_fields':
- print '+ fields',':',
- print 'max_nodes=',v['max_nodes'],
- print ''
+ print('+ fields', ':', end=' ')
+ print('max_nodes=',v['max_nodes'], end=' ')
+ print('')
else:
- print '+ ',k,v
+ print('+ ',k,v)
def display_node_spec(self, node):
- print "+ node=%s host_box=%s" % (node['name'],node['host_box']),
- print "hostname=", node['node_fields']['hostname'],
- print "ip=", node['interface_fields']['ip']
+ print("+ node={} host_box={}".format(node['name'], node['host_box']), end=' ')
+ print("hostname=", node['node_fields']['hostname'], end=' ')
+ print("ip=", node['interface_fields']['ip'])
if self.options.verbose:
utils.pprint("node details", node, depth=3)
@staticmethod
def display_mapping_plc(plc_spec):
- print '+ MyPLC',plc_spec['name']
+ print('+ MyPLC',plc_spec['name'])
# WARNING this would not be right for lxc-based PLC's - should be harmless though
- print '+\tvserver address = root@%s:/vservers/%s' % (plc_spec['host_box'], plc_spec['vservername'])
- print '+\tIP = %s/%s' % (plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip'])
+ print('+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername']))
+ print('+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip']))
for site_spec in plc_spec['sites']:
for node_spec in site_spec['nodes']:
TestPlc.display_mapping_node(node_spec)
@staticmethod
def display_mapping_node(node_spec):
- print '+ NODE %s' % (node_spec['name'])
- print '+\tqemu box %s' % node_spec['host_box']
- print '+\thostname=%s' % node_spec['node_fields']['hostname']
+ print('+ NODE {}'.format(node_spec['name']))
+ print('+\tqemu box {}'.format(node_spec['host_box']))
+ print('+\thostname={}'.format(node_spec['node_fields']['hostname']))
# write a timestamp in /vservers/<>.timestamp
# cannot be inside the vserver, that causes vserver .. build to cough
# a first approx. is to store the timestamp close to the VM root like vs does
stamp_path = self.vm_timestamp_path()
stamp_dir = os.path.dirname(stamp_path)
- utils.system(self.test_ssh.actual_command("mkdir -p %s" % stamp_dir))
- return utils.system(self.test_ssh.actual_command("echo %d > %s" % (now, stamp_path))) == 0
-
- # this is called inconditionnally at the beginning of the test sequence
+ utils.system(self.test_ssh.actual_command("mkdir -p {}".format(stamp_dir)))
+ return utils.system(self.test_ssh.actual_command("echo {:d} > {}".format(now, stamp_path))) == 0
+
+ # this is called inconditionnally at the beginning of the test sequence
# just in case this is a rerun, so if the vm is not running it's fine
def plcvm_delete(self):
"vserver delete the test myplc"
stamp_path = self.vm_timestamp_path()
- self.run_in_host("rm -f %s" % stamp_path)
- self.run_in_host("virsh -c lxc:// destroy %s" % self.vservername)
- self.run_in_host("virsh -c lxc:// undefine %s" % self.vservername)
- self.run_in_host("rm -fr /vservers/%s" % self.vservername)
+ self.run_in_host("rm -f {}".format(stamp_path))
+ self.run_in_host("virsh -c lxc:/// destroy {}".format(self.vservername))
+ self.run_in_host("virsh -c lxc:/// undefine {}".format(self.vservername))
+ self.run_in_host("rm -fr /vservers/{}".format(self.vservername))
return True
### install
# so that the tests do not have to worry about extracting the build (svn, git, or whatever)
def plcvm_create(self):
"vserver creation (no install done)"
- # push the local build/ dir to the testplc box
+ # push the local build/ dir to the testplc box
if self.is_local():
# a full path for the local calls
build_dir = os.path.dirname(sys.argv[0])
# remove for safety; do *not* mkdir first, otherwise we end up with build/build/
self.test_ssh.rmdir(build_dir)
self.test_ssh.copy(build_dir, recursive=True)
- # the repo url is taken from arch-rpms-url
+ # the repo url is taken from arch-rpms-url
# with the last step (i386) removed
repo_url = self.options.arch_rpms_url
for level in [ 'arch' ]:
- repo_url = os.path.dirname(repo_url)
+ repo_url = os.path.dirname(repo_url)
# invoke initvm (drop support for vs)
script = "lbuild-initvm.sh"
script_options = ""
# pass the vbuild-nightly options to [lv]test-initvm
- script_options += " -p %s" % self.options.personality
- script_options += " -d %s" % self.options.pldistro
- script_options += " -f %s" % self.options.fcdistro
- script_options += " -r %s" % repo_url
+ script_options += " -p {}".format(self.options.personality)
+ script_options += " -d {}".format(self.options.pldistro)
+ script_options += " -f {}".format(self.options.fcdistro)
+ script_options += " -r {}".format(repo_url)
vserver_name = self.vservername
try:
vserver_hostname = socket.gethostbyaddr(self.vserverip)[0]
- script_options += " -n %s" % vserver_hostname
+ script_options += " -n {}".format(vserver_hostname)
except:
- print "Cannot reverse lookup %s" % self.vserverip
- print "This is considered fatal, as this might pollute the test results"
+ print("Cannot reverse lookup {}".format(self.vserverip))
+ print("This is considered fatal, as this might pollute the test results")
return False
- create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s" % locals()
+ create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
return self.run_in_host(create_vserver) == 0
- ### install_rpm
- def plc_install(self):
- "yum install myplc, noderepo, and the plain bootstrapfs"
+ ### install django through pip
+ def django_install(self):
+ # plcapi requires Django, that is no longer provided py fedora as an rpm
+ # so we use pip instead
+ """
+ pip install Django
+ """
+ return self.pip_install('Django')
- # workaround for getting pgsql8.2 on centos5
- if self.options.fcdistro == "centos5":
- self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
+ ### install_rpm
+ def plc_install(self):
+ """
+ yum install myplc, noderepo
+ """
# compute nodefamily
if self.options.personality == "linux32":
elif self.options.personality == "linux64":
arch = "x86_64"
else:
- raise Exception, "Unsupported personality %r"%self.options.personality
- nodefamily = "%s-%s-%s" % (self.options.pldistro, self.options.fcdistro, arch)
+ raise Exception("Unsupported personality {}".format(self.options.personality))
+ nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
+
+ # check it's possible to install just 'myplc-core' first
+ if not self.dnf_install("myplc-core"):
+ return False
- pkgs_list=[]
- pkgs_list.append("slicerepo-%s" % nodefamily)
+ pkgs_list = []
pkgs_list.append("myplc")
- pkgs_list.append("noderepo-%s" % nodefamily)
- pkgs_list.append("nodeimage-%s-plain" % nodefamily)
+ pkgs_list.append("slicerepo-{}".format(nodefamily))
+ pkgs_list.append("noderepo-{}".format(nodefamily))
pkgs_string=" ".join(pkgs_list)
- return self.yum_install(pkgs_list)
+ return self.dnf_install(pkgs_list)
+
+ def install_syslinux6(self):
+ """
+ install syslinux6 from the fedora21 release
+ """
+ key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
+
+ rpms = [
+ 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
+ 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
+ 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
+ ]
+ # this can be done several times
+ self.run_in_guest("rpm --import {key}".format(**locals()))
+ return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
+
+ def bonding_builds(self):
+ """
+ list /etc/yum.repos.d on the myplc side
+ """
+ self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
+ return True
+
+ def bonding_nodes(self):
+ """
+ List nodes known to the myplc together with their nodefamiliy
+ """
+ print("---------------------------------------- nodes")
+ for node in self.apiserver.GetNodes(self.auth_root()):
+ print("{} -> {}".format(node['hostname'],
+ self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily']))
+ print("---------------------------------------- nodes")
+
###
def mod_python(self):
"""yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
- return self.yum_install( ['mod_python'] )
+ return self.dnf_install( ['mod_python'] )
- ###
+ ###
def plc_configure(self):
"run plc-config-tty"
- tmpname = '%s.plc-config-tty' % self.name()
+ tmpname = '{}.plc-config-tty'.format(self.name())
with open(tmpname,'w') as fileconf:
- for (var,value) in self.plc_spec['settings'].iteritems():
- fileconf.write('e %s\n%s\n'%(var,value))
+ for var, value in self.plc_spec['settings'].items():
+ fileconf.write('e {}\n{}\n'.format(var, value))
fileconf.write('w\n')
fileconf.write('q\n')
- utils.system('cat %s' % tmpname)
- self.run_in_guest_piped('cat %s' % tmpname, 'plc-config-tty')
- utils.system('rm %s' % tmpname)
+ utils.system('cat {}'.format(tmpname))
+ self.run_in_guest_piped('cat {}'.format(tmpname), 'plc-config-tty')
+ utils.system('rm {}'.format(tmpname))
return True
-# f14 is a bit odd in this respect, although this worked fine in guests up to f18
-# however using a vplc guest under f20 requires this trick
-# the symptom is this: service plc start
-# Starting plc (via systemctl): Failed to get D-Bus connection: \
-# Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
-# weird thing is the doc says f14 uses upstart by default and not systemd
-# so this sounds kind of harmless
- def start_service(self, service):
- return self.start_stop_service(service, 'start')
- def stop_service(self, service):
- return self.start_stop_service(service, 'stop')
-
- def start_stop_service(self, service, start_or_stop):
- "utility to start/stop a service with the special trick for f14"
- if self.options.fcdistro != 'f14':
- return self.run_in_guest("service %s %s" % (service, start_or_stop)) == 0
- else:
- # patch /sbin/service so it does not reset environment
- self.run_in_guest('sed -i -e \\"s,env -i,env,\\" /sbin/service')
- # this is because our own scripts in turn call service
- return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service %s %s" % \
- (service, start_or_stop)) == 0
+ # care only about f>=27
+ def start_stop_systemd(self, service, start_or_stop):
+ "utility to start/stop a systemd-defined service (sfa)"
+ return self.run_in_guest("systemctl {} {}".format(start_or_stop, service)) == 0
def plc_start(self):
- "service plc start"
- return self.start_service('plc')
+ "start plc through systemclt"
+ return self.start_stop_systemd('plc', 'start')
def plc_stop(self):
- "service plc stop"
- return self.stop_service('plc')
+ "stop plc through systemctl"
+ return self.start_stop_systemd('plc', 'stop')
def plcvm_start(self):
"start the PLC vserver"
def keys_store(self):
"stores test users ssh keys in keys/"
for key_spec in self.plc_spec['keys']:
- TestKey(self,key_spec).store_key()
+ TestKey(self,key_spec).store_key()
return True
def keys_clean(self):
overall = True
prefix = 'debug_ssh_key'
for ext in ['pub', 'rsa'] :
- src = "%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s" % locals()
- dst = "keys/%(vservername)s-debug.%(ext)s" % locals()
+ src = "{vm_root}/etc/planetlab/{prefix}.{ext}".format(**locals())
+ dst = "keys/{vservername}-debug.{ext}".format(**locals())
if self.test_ssh.fetch(src, dst) != 0:
overall=False
return overall
def sites(self):
"create sites with PLCAPI"
return self.do_sites()
-
+
def delete_sites(self):
"delete sites with PLCAPI"
return self.do_sites(action="delete")
-
+
def do_sites(self, action="add"):
for site_spec in self.plc_spec['sites']:
test_site = TestSite(self,site_spec)
if (action != "add"):
- utils.header("Deleting site %s in %s" % (test_site.name(), self.name()))
+ utils.header("Deleting site {} in {}".format(test_site.name(), self.name()))
test_site.delete_site()
# deleted with the site
#test_site.delete_users()
continue
else:
- utils.header("Creating site %s & users in %s" % (test_site.name(), self.name()))
+ utils.header("Creating site {} & users in {}".format(test_site.name(), self.name()))
test_site.create_site()
test_site.create_users()
return True
def delete_all_sites(self):
"Delete all sites in PLC, and related objects"
- print 'auth_root', self.auth_root()
- sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
+ print('auth_root', self.auth_root())
+ sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id', 'login_base'])
for site in sites:
# keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
continue
site_id = site['site_id']
- print 'Deleting site_id', site_id
+ print('Deleting site_id', site_id)
self.apiserver.DeleteSite(self.auth_root(), site_id)
return True
for site_spec in self.plc_spec['sites']:
test_site = TestSite(self, site_spec)
if action != "add":
- utils.header("Deleting nodes in site %s" % test_site.name())
+ utils.header("Deleting nodes in site {}".format(test_site.name()))
for node_spec in site_spec['nodes']:
test_node = TestNode(self, test_site, node_spec)
- utils.header("Deleting %s" % test_node.name())
+ utils.header("Deleting {}".format(test_node.name()))
test_node.delete_node()
else:
- utils.header("Creating nodes for site %s in %s" % (test_site.name(), self.name()))
+ utils.header("Creating nodes for site {} in {}".format(test_site.name(), self.name()))
for node_spec in site_spec['nodes']:
- utils.pprint('Creating node %s' % node_spec, node_spec)
+ utils.pprint('Creating node {}'.format(node_spec), node_spec)
test_node = TestNode(self, test_site, node_spec)
test_node.create_node()
return True
@staticmethod
def translate_timestamp(start, grain, timestamp):
if timestamp < TestPlc.YEAR:
- return start+timestamp*grain
+ return start + timestamp*grain
else:
return timestamp
"create leases (on reservable nodes only, use e.g. run -c default -c resa)"
now = int(time.time())
grain = self.apiserver.GetLeaseGranularity(self.auth_root())
- print 'API answered grain=', grain
- start = (now/grain)*grain
+ print('API answered grain=', grain)
+ start = (now//grain)*grain
start += grain
# find out all nodes that are reservable
nodes = self.all_reservable_nodenames()
- if not nodes:
+ if not nodes:
utils.header("No reservable node found - proceeding without leases")
return True
ok = True
lease_spec['t_from'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
- lease_spec['t_from'],lease_spec['t_until'])
+ lease_spec['t_from'], lease_spec['t_until'])
if lease_addition['errors']:
- utils.header("Cannot create leases, %s"%lease_addition['errors'])
+ utils.header("Cannot create leases, {}".format(lease_addition['errors']))
ok = False
else:
- utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)' % \
- (nodes, lease_spec['slice'],
- lease_spec['t_from'], TestPlc.timestamp_printable(lease_spec['t_from']),
- lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
-
+ utils.header('Leases on nodes {} for {} from {:d} ({}) until {:d} ({})'\
+ .format(nodes, lease_spec['slice'],
+ lease_spec['t_from'], TestPlc.timestamp_printable(lease_spec['t_from']),
+ lease_spec['t_until'], TestPlc.timestamp_printable(lease_spec['t_until'])))
+
return ok
def delete_leases(self):
"remove all leases in the myplc side"
lease_ids = [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
- utils.header("Cleaning leases %r" % lease_ids)
+ utils.header("Cleaning leases {}".format(lease_ids))
self.apiserver.DeleteLeases(self.auth_root(), lease_ids)
return True
for l in leases:
current = l['t_until'] >= now
if self.options.verbose or current:
- utils.header("%s %s from %s until %s" % \
- (l['hostname'], l['name'],
- TestPlc.timestamp_printable(l['t_from']),
- TestPlc.timestamp_printable(l['t_until'])))
+ utils.header("{} {} from {} until {}"\
+ .format(l['hostname'], l['name'],
+ TestPlc.timestamp_printable(l['t_from']),
+ TestPlc.timestamp_printable(l['t_until'])))
return True
# create nodegroups if needed, and populate
test_site = TestSite(self,site_spec)
for node_spec in site_spec['nodes']:
test_node = TestNode(self, test_site, node_spec)
- if node_spec.has_key('nodegroups'):
+ if 'nodegroups' in node_spec:
nodegroupnames = node_spec['nodegroups']
- if isinstance(nodegroupnames, StringTypes):
+ if isinstance(nodegroupnames, str):
nodegroupnames = [ nodegroupnames ]
for nodegroupname in nodegroupnames:
- if not groups_dict.has_key(nodegroupname):
+ if nodegroupname not in groups_dict:
groups_dict[nodegroupname] = []
groups_dict[nodegroupname].append(test_node.name())
auth = self.auth_root()
overall = True
- for (nodegroupname,group_nodes) in groups_dict.iteritems():
+ for (nodegroupname,group_nodes) in groups_dict.items():
if action == "add":
- print 'nodegroups:', 'dealing with nodegroup',\
- nodegroupname, 'on nodes', group_nodes
+ print('nodegroups:', 'dealing with nodegroup',\
+ nodegroupname, 'on nodes', group_nodes)
# first, check if the nodetagtype is here
tag_types = self.apiserver.GetTagTypes(auth, {'tagname':nodegroupname})
if tag_types:
else:
tag_type_id = self.apiserver.AddTagType(auth,
{'tagname' : nodegroupname,
- 'description' : 'for nodegroup %s' % nodegroupname,
+ 'description' : 'for nodegroup {}'.format(nodegroupname),
'category' : 'test'})
- print 'located tag (type)', nodegroupname, 'as', tag_type_id
+ print('located tag (type)', nodegroupname, 'as', tag_type_id)
# create nodegroup
nodegroups = self.apiserver.GetNodeGroups(auth, {'groupname' : nodegroupname})
if not nodegroups:
self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
- print 'created nodegroup', nodegroupname, \
- 'from tagname', nodegroupname, 'and value', 'yes'
+ print('created nodegroup', nodegroupname, \
+ 'from tagname', nodegroupname, 'and value', 'yes')
# set node tag on all nodes, value='yes'
for nodename in group_nodes:
try:
self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
except:
traceback.print_exc()
- print 'node', nodename, 'seems to already have tag', nodegroupname
+ print('node', nodename, 'seems to already have tag', nodegroupname)
# check anyway
try:
expect_yes = self.apiserver.GetNodeTags(auth,
'tagname' : nodegroupname},
['value'])[0]['value']
if expect_yes != "yes":
- print 'Mismatch node tag on node',nodename,'got',expect_yes
+ print('Mismatch node tag on node',nodename,'got',expect_yes)
overall = False
except:
if not self.options.dry_run:
- print 'Cannot find tag', nodegroupname, 'on node', nodename
+ print('Cannot find tag', nodegroupname, 'on node', nodename)
overall = False
else:
try:
- print 'cleaning nodegroup', nodegroupname
+ print('cleaning nodegroup', nodegroupname)
self.apiserver.DeleteNodeGroup(auth, nodegroupname)
except:
traceback.print_exc()
node_infos += [ (node_spec['node_fields']['hostname'], node_spec['host_box']) \
for node_spec in site_spec['nodes'] ]
return node_infos
-
+
def all_nodenames(self):
return [ x[0] for x in self.all_node_infos() ]
- def all_reservable_nodenames(self):
+ def all_reservable_nodenames(self):
res = []
for site_spec in self.plc_spec['sites']:
for node_spec in site_spec['nodes']:
def nodes_check_boot_state(self, target_boot_state, timeout_minutes,
silent_minutes, period_seconds = 15):
if self.options.dry_run:
- print 'dry_run'
+ print('dry_run')
return True
class CompleterTaskBootState(CompleterTask):
node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(),
[ self.hostname ],
['boot_state'])[0]
- self.last_boot_state = node['boot_state']
+ self.last_boot_state = node['boot_state']
return self.last_boot_state == target_boot_state
except:
return False
def message(self):
- return "CompleterTaskBootState with node %s" % self.hostname
+ return "CompleterTaskBootState with node {}".format(self.hostname)
def failure_epilogue(self):
- print "node %s in state %s - expected %s" %\
- (self.hostname, self.last_boot_state, target_boot_state)
-
+ print("node {} in state {} - expected {}"\
+ .format(self.hostname, self.last_boot_state, target_boot_state))
+
timeout = timedelta(minutes=timeout_minutes)
graceout = timedelta(minutes=silent_minutes)
period = timedelta(seconds=period_seconds)
# the nodes that haven't checked yet - start with a full list and shrink over time
- utils.header("checking nodes boot state (expected %s)" % target_boot_state)
+ utils.header("checking nodes boot state (expected {})".format(target_boot_state))
tasks = [ CompleterTaskBootState(self,hostname) \
for (hostname,_) in self.all_node_infos() ]
message = 'check_boot_state={}'.format(target_boot_state)
return True
# probing nodes
- def check_nodes_ping(self, timeout_seconds=30, period_seconds=10):
+ def check_nodes_ping(self, timeout_seconds=60, period_seconds=10):
class CompleterTaskPingNode(CompleterTask):
def __init__(self, hostname):
self.hostname = hostname
def run(self, silent):
- command="ping -c 1 -w 1 %s >& /dev/null" % self.hostname
+ command="ping -c 1 -w 1 {} >& /dev/null".format(self.hostname)
return utils.system(command, silent=silent) == 0
def failure_epilogue(self):
- print "Cannot ping node with name %s" % self.hostname
+ print("Cannot ping node with name {}".format(self.hostname))
timeout = timedelta(seconds = timeout_seconds)
graceout = timeout
period = timedelta(seconds = period_seconds)
return self.check_nodes_ping()
def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
- # various delays
+ # various delays
timeout = timedelta(minutes=timeout_minutes)
graceout = timedelta(minutes=silent_minutes)
period = timedelta(seconds=period_seconds)
vservername = self.vservername
- if debug:
+ if debug:
message = "debug"
completer_message = 'ssh_node_debug'
- local_key = "keys/%(vservername)s-debug.rsa" % locals()
- else:
+ local_key = "keys/{vservername}-debug.rsa".format(**locals())
+ else:
message = "boot"
completer_message = 'ssh_node_boot'
- local_key = "keys/key_admin.rsa"
- utils.header("checking ssh access to nodes (expected in %s mode)" % message)
+ local_key = "keys/key_admin.rsa"
+ utils.header("checking ssh access to nodes (expected in {} mode)".format(message))
node_infos = self.all_node_infos()
tasks = [ CompleterTaskNodeSsh(nodename, qemuname, local_key,
boot_state=message, dry_run=self.options.dry_run) \
for (nodename, qemuname) in node_infos ]
return Completer(tasks, message=completer_message).run(timeout, graceout, period)
-
+
def ssh_node_debug(self):
"Tries to ssh into nodes in debug mode with the debug ssh key"
return self.check_nodes_ssh(debug = True,
timeout_minutes = self.ssh_node_debug_timeout,
silent_minutes = self.ssh_node_debug_silent)
-
+
def ssh_node_boot(self):
"Tries to ssh into nodes in production mode with the root ssh key"
return self.check_nodes_ssh(debug = False,
def node_bmlogs(self):
"Checks that there's a non-empty dir. /var/log/bm/raw"
return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw")) == 0
-
+
@node_mapper
def qemu_local_init(self): pass
@node_mapper
@node_mapper
def qemu_local_config(self): pass
@node_mapper
+ def qemu_export(self): pass
+ @node_mapper
+ def qemu_cleanlog(self): pass
+ @node_mapper
def nodestate_reinstall(self): pass
@node_mapper
+ def nodestate_upgrade(self): pass
+ @node_mapper
def nodestate_safeboot(self): pass
@node_mapper
def nodestate_boot(self): pass
@node_mapper
def nodestate_show(self): pass
@node_mapper
- def qemu_export(self): pass
-
+ def nodedistro_f14(self): pass
+ @node_mapper
+ def nodedistro_f18(self): pass
+ @node_mapper
+ def nodedistro_f20(self): pass
+ @node_mapper
+ def nodedistro_f21(self): pass
+ @node_mapper
+ def nodedistro_f22(self): pass
+ @node_mapper
+ def nodedistro_show(self): pass
+
### check hooks : invoke scripts from hooks/{node,slice}
- def check_hooks_node(self):
+ def check_hooks_node(self):
return self.locate_first_node().check_hooks()
- def check_hooks_sliver(self) :
+ def check_hooks_sliver(self) :
return self.locate_first_sliver().check_hooks()
-
+
def check_hooks(self):
"runs unit tests in the node and slice contexts - see hooks/{node,slice}"
return self.check_hooks_node() and self.check_hooks_sliver()
def actual_run(self):
return self.test_sliver.check_initscript_stamp(self.stamp)
def message(self):
- return "initscript checker for %s" % self.test_sliver.name()
+ return "initscript checker for {}".format(self.test_sliver.name())
def failure_epilogue(self):
- print "initscript stamp %s not found in sliver %s"%\
- (self.stamp, self.test_sliver.name())
-
+ print("initscript stamp {} not found in sliver {}"\
+ .format(self.stamp, self.test_sliver.name()))
+
tasks = []
for slice_spec in self.plc_spec['slices']:
- if not slice_spec.has_key('initscriptstamp'):
+ if 'initscriptstamp' not in slice_spec:
continue
stamp = slice_spec['initscriptstamp']
slicename = slice_spec['slice_fields']['name']
for nodename in slice_spec['nodenames']:
- print 'nodename', nodename, 'slicename', slicename, 'stamp', stamp
+ print('nodename', nodename, 'slicename', slicename, 'stamp', stamp)
site,node = self.locate_node(nodename)
# xxx - passing the wrong site - probably harmless
test_site = TestSite(self, site)
tasks.append(CompleterTaskInitscript(test_sliver, stamp))
return Completer(tasks, message='check_initscripts').\
run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
-
+
def check_initscripts(self):
"check that the initscripts have triggered"
return self.do_check_initscripts()
-
+
def initscripts(self):
"create initscripts with PLCAPI"
for initscript in self.plc_spec['initscripts']:
- utils.pprint('Adding Initscript in plc %s' % self.plc_spec['name'], initscript)
+ utils.pprint('Adding Initscript in plc {}'.format(self.plc_spec['name']), initscript)
self.apiserver.AddInitScript(self.auth_root(), initscript['initscript_fields'])
return True
"delete initscripts with PLCAPI"
for initscript in self.plc_spec['initscripts']:
initscript_name = initscript['initscript_fields']['name']
- print('Attempting to delete %s in plc %s' % (initscript_name, self.plc_spec['name']))
+ print(('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name'])))
try:
self.apiserver.DeleteInitScript(self.auth_root(), initscript_name)
- print initscript_name, 'deleted'
+ print(initscript_name, 'deleted')
except:
- print 'deletion went wrong - probably did not exist'
+ print('deletion went wrong - probably did not exist')
return True
### manage slices
else:
test_slice.create_slice()
return True
-
+
@slice_mapper__tasks(20, 10, 15)
def ssh_slice(self): pass
@slice_mapper__tasks(20, 19, 15)
@node_mapper
def keys_clear_known_hosts(self): pass
-
+
def plcapi_urls(self):
+ """
+ attempts to reach the PLCAPI with various forms for the URL
+ """
return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
def speed_up_slices(self):
return self._speed_up_slices(5, 1)
def _speed_up_slices(self, p, r):
- # create the template on the server-side
- template = "%s.nodemanager" % self.name()
+ # create the template on the server-side
+ template = "{}.nodemanager".format(self.name())
with open(template,"w") as template_file:
- template_file.write('OPTIONS="-p %s -r %s -d"\n'%(p,r))
+ template_file.write('OPTIONS="-p {} -r {} -d"\n'.format(p, r))
in_vm = "/var/www/html/PlanetLabConf/nodemanager"
- remote = "%s/%s" % (self.vm_root_in_host(), in_vm)
+ remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
self.test_ssh.copy_abs(template, remote)
# Add a conf file
if not self.apiserver.GetConfFiles(self.auth_root(),
def debug_nodemanager(self):
"sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
- template = "%s.nodemanager" % self.name()
+ template = "{}.nodemanager".format(self.name())
with open(template,"w") as template_file:
template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
in_vm = "/var/www/html/PlanetLabConf/nodemanager"
- remote = "%s/%s" % (self.vm_root_in_host(), in_vm)
+ remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
self.test_ssh.copy_abs(template, remote)
return True
@node_mapper
def qemu_timestamp(self) : pass
+ @node_mapper
+ def qemu_nodefamily(self): pass
+
# when a spec refers to a node possibly on another plc
def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
for plc in [ self ] + other_plcs:
return plc.locate_sliver_obj(nodename, slicename)
except:
pass
- raise Exception, "Cannot locate sliver %s@%s among all PLCs" % (nodename, slicename)
+ raise Exception("Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename))
# implement this one as a cross step so that we can take advantage of different nodes
# in multi-plcs mode
def cross_check_tcp(self, other_plcs):
"check TCP connectivity between 2 slices (or in loopback if only one is defined)"
- if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
+ if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
utils.header("check_tcp: no/empty config found")
return True
specs = self.plc_spec['tcp_specs']
def actual_run(self):
return self.test_sliver.check_tcp_ready(port = 9999)
def message(self):
- return "network ready checker for %s" % self.test_sliver.name()
+ return "network ready checker for {}".format(self.test_sliver.name())
def failure_epilogue(self):
- print "could not bind port from sliver %s" % self.test_sliver.name()
+ print("could not bind port from sliver {}".format(self.test_sliver.name()))
sliver_specs = {}
tasks = []
# locate the TestSliver instances involved, and cache them in the spec instance
spec['s_sliver'] = self.locate_sliver_obj_cross(spec['server_node'], spec['server_slice'], other_plcs)
spec['c_sliver'] = self.locate_sliver_obj_cross(spec['client_node'], spec['client_slice'], other_plcs)
- message = "Will check TCP between s=%s and c=%s" % \
- (spec['s_sliver'].name(), spec['c_sliver'].name())
+ message = "Will check TCP between s={} and c={}"\
+ .format(spec['s_sliver'].name(), spec['c_sliver'].name())
if 'client_connect' in spec:
- message += " (using %s)" % spec['client_connect']
+ message += " (using {})".format(spec['client_connect'])
utils.header(message)
# we need to check network presence in both slivers, but also
# avoid to insert a sliver several times
if not Completer(tasks, message='check for network readiness in slivers').\
run(timedelta(seconds=30), timedelta(seconds=24), period=timedelta(seconds=5)):
return False
-
+
# run server and client
for spec in specs:
port = spec['port']
# the issue here is that we have the server run in background
# and so we have no clue if it took off properly or not
# looks like in some cases it does not
- if not spec['s_sliver'].run_tcp_server(port, timeout=20):
+ address = spec['s_sliver'].test_node.name()
+ if not spec['s_sliver'].run_tcp_server(address, port, timeout=20):
overall = False
break
return overall
# painfully enough, we need to allow for some time as netflow might show up last
- def check_system_slice(self):
+ def check_system_slice(self):
"all nodes: check that a system slice is alive"
# netflow currently not working in the lxc distro
# drl not built at all in the wtx distro
# if we find either of them we're happy
return self.check_netflow() or self.check_drl()
-
+
# expose these
def check_netflow(self): return self._check_system_slice('netflow')
def check_drl(self): return self._check_system_slice('drl')
# we have the slices up already here, so it should not take too long
def _check_system_slice(self, slicename, timeout_minutes=5, period_seconds=15):
class CompleterTaskSystemSlice(CompleterTask):
- def __init__(self, test_node, dry_run):
+ def __init__(self, test_node, dry_run):
self.test_node = test_node
self.dry_run = dry_run
- def actual_run(self):
+ def actual_run(self):
return self.test_node._check_system_slice(slicename, dry_run=self.dry_run)
- def message(self):
- return "System slice %s @ %s" % (slicename, self.test_node.name())
- def failure_epilogue(self):
- print "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
+ def message(self):
+ return "System slice {} @ {}".format(slicename, self.test_node.name())
+ def failure_epilogue(self):
+ print("COULD not find system slice {} @ {}".format(slicename, self.test_node.name()))
timeout = timedelta(minutes=timeout_minutes)
silent = timedelta(0)
period = timedelta(seconds=period_seconds)
"runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
# install the stress-test in the plc image
location = "/usr/share/plc_api/plcsh_stress_test.py"
- remote = "%s/%s" % (self.vm_root_in_host(), location)
+ remote = "{}/{}".format(self.vm_root_in_host(), location)
self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
command = location
command += " -- --check"
def sfa_install_all(self):
"yum install sfa sfa-plc sfa-sfatables sfa-client"
- return self.yum_install("sfa sfa-plc sfa-sfatables sfa-client")
+ return (self.dnf_install("sfa sfa-plc sfa-sfatables sfa-client") and
+ self.run_in_guest("systemctl enable sfa-registry")==0 and
+ self.run_in_guest("systemctl enable sfa-aggregate")==0)
def sfa_install_core(self):
"yum install sfa"
- return self.yum_install("sfa")
-
+ return self.dnf_install("sfa")
+
def sfa_install_plc(self):
"yum install sfa-plc"
- return self.yum_install("sfa-plc")
-
+ return self.dnf_install("sfa-plc")
+
def sfa_install_sfatables(self):
"yum install sfa-sfatables"
- return self.yum_install("sfa-sfatables")
+ return self.dnf_install("sfa-sfatables")
# for some very odd reason, this sometimes fails with the following symptom
# # yum install sfa-client
# installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
# [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
# even though in the same context I have
- # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
+ # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
# Filesystem Size Used Avail Use% Mounted on
# /dev/hdv1 806G 264G 501G 35% /
# none 16M 36K 16M 1% /tmp
# so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
def sfa_install_client(self):
"yum install sfa-client"
- first_try = self.yum_install("sfa-client")
+ first_try = self.dnf_install("sfa-client")
if first_try:
return True
utils.header("********** Regular yum failed - special workaround in place, 2nd chance")
code, cached_rpm_path = \
utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
- utils.header("rpm_path=<<%s>>" % rpm_path)
- # just for checking
- self.run_in_guest("rpm -i %s" % cached_rpm_path)
- return self.yum_check_installed("sfa-client")
+ utils.header("rpm_path=<<{}>>".format(rpm_path))
+ # just for checking
+ self.run_in_guest("rpm -i {}".format(cached_rpm_path))
+ return self.dnf_check_installed("sfa-client")
def sfa_dbclean(self):
"thoroughly wipes off the SFA database"
return self.run_in_guest("sfaadmin reg nuke") == 0 or \
self.run_in_guest("sfa-nuke.py") == 0 or \
- self.run_in_guest("sfa-nuke-plc.py") == 0
+ self.run_in_guest("sfa-nuke-plc.py") == 0 or \
+ self.run_in_guest("sfaadmin registry nuke") == 0
def sfa_fsclean(self):
"cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
def sfa_plcclean(self):
"cleans the PLC entries that were created as a side effect of running the script"
- # ignore result
+ # ignore result
sfa_spec = self.plc_spec['sfa']
for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
try:
self.apiserver.DeleteSite(self.auth_root(),login_base)
except:
- print "Site %s already absent from PLC db"%login_base
+ print("Site {} already absent from PLC db".format(login_base))
- for spec_name in ['pi_spec','user_spec']:
+ for spec_name in ['pi_spec', 'user_spec']:
user_spec = auth_sfa_spec[spec_name]
username = user_spec['email']
try:
self.apiserver.DeletePerson(self.auth_root(),username)
- except:
+ except:
# this in fact is expected as sites delete their members
- #print "User %s already absent from PLC db"%username
+ #print "User {} already absent from PLC db".format(username)
pass
- print "REMEMBER TO RUN sfa_import AGAIN"
+ print("REMEMBER TO RUN sfa_import AGAIN")
return True
def sfa_uninstall(self):
self.run_in_guest("rm -rf /var/lib/sfa")
self.run_in_guest("rm -rf /etc/sfa")
self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
- # xxx tmp
+ # xxx tmp
self.run_in_guest("rpm -e --noscripts sfa-plc")
return True
# if the yum install phase fails, consider the test is successful
# other combinations will eventually run it hopefully
def sfa_utest(self):
- "yum install sfa-tests and run SFA unittests"
- self.run_in_guest("yum -y install sfa-tests")
+ "dnf install sfa-tests and run SFA unittests"
+ self.run_in_guest("dnf -y install sfa-tests")
# failed to install - forget it
- if self.run_in_guest("rpm -q sfa-tests") != 0:
+ if self.run_in_guest("rpm -q sfa-tests") != 0:
utils.header("WARNING: SFA unit tests failed to install, ignoring")
return True
return self.run_in_guest("/usr/share/sfa/tests/testAll.py") == 0
###
def confdir(self):
- dirname = "conf.%s" % self.plc_spec['name']
+ dirname = "conf.{}".format(self.plc_spec['name'])
if not os.path.isdir(dirname):
- utils.system("mkdir -p %s" % dirname)
+ utils.system("mkdir -p {}".format(dirname))
if not os.path.isdir(dirname):
- raise Exception,"Cannot create config dir for plc %s" % self.name()
+ raise Exception("Cannot create config dir for plc {}".format(self.name()))
return dirname
def conffile(self, filename):
- return "%s/%s" % (self.confdir(),filename)
+ return "{}/{}".format(self.confdir(), filename)
def confsubdir(self, dirname, clean, dry_run=False):
- subdirname = "%s/%s" % (self.confdir(),dirname)
+ subdirname = "{}/{}".format(self.confdir(), dirname)
if clean:
- utils.system("rm -rf %s" % subdirname)
- if not os.path.isdir(subdirname):
- utils.system("mkdir -p %s" % subdirname)
+ utils.system("rm -rf {}".format(subdirname))
+ if not os.path.isdir(subdirname):
+ utils.system("mkdir -p {}".format(subdirname))
if not dry_run and not os.path.isdir(subdirname):
- raise "Cannot create config subdir %s for plc %s" % (dirname,self.name())
+ raise "Cannot create config subdir {} for plc {}".format(dirname, self.name())
return subdirname
-
+
def conffile_clean(self, filename):
filename=self.conffile(filename)
- return utils.system("rm -rf %s" % filename)==0
-
+ return utils.system("rm -rf {}".format(filename))==0
+
###
def sfa_configure(self):
"run sfa-config-tty"
tmpname = self.conffile("sfa-config-tty")
with open(tmpname,'w') as fileconf:
- for (var,value) in self.plc_spec['sfa']['settings'].iteritems():
- fileconf.write('e %s\n%s\n'%(var,value))
+ for var, value in self.plc_spec['sfa']['settings'].items():
+ fileconf.write('e {}\n{}\n'.format(var, value))
fileconf.write('w\n')
fileconf.write('R\n')
fileconf.write('q\n')
- utils.system('cat %s' % tmpname)
- self.run_in_guest_piped('cat %s' % tmpname, 'sfa-config-tty')
+ utils.system('cat {}'.format(tmpname))
+ self.run_in_guest_piped('cat {}'.format(tmpname), 'sfa-config-tty')
return True
def aggregate_xml_line(self):
port = self.plc_spec['sfa']['neighbours-port']
- return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
- (self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
+ return '<aggregate addr="{}" hrn="{}" port="{}"/>'\
+ .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'], port)
def registry_xml_line(self):
- return '<registry addr="%s" hrn="%s" port="12345"/>' % \
- (self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
+ return '<registry addr="{}" hrn="{}" port="12345"/>'\
+ .format(self.vserverip, self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
# a cross step that takes all other plcs in argument
return True
agg_fname = self.conffile("agg.xml")
with open(agg_fname,"w") as out:
- out.write("<aggregates>%s</aggregates>\n" % \
- " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
- utils.header("(Over)wrote %s" % agg_fname)
+ out.write("<aggregates>{}</aggregates>\n"\
+ .format(" ".join([ plc.aggregate_xml_line() for plc in other_plcs ])))
+ utils.header("(Over)wrote {}".format(agg_fname))
reg_fname=self.conffile("reg.xml")
with open(reg_fname,"w") as out:
- out.write("<registries>%s</registries>\n" % \
- " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
- utils.header("(Over)wrote %s" % reg_fname)
+ out.write("<registries>{}</registries>\n"\
+ .format(" ".join([ plc.registry_xml_line() for plc in other_plcs ])))
+ utils.header("(Over)wrote {}".format(reg_fname))
return self.test_ssh.copy_abs(agg_fname,
- '/%s/etc/sfa/aggregates.xml' % self.vm_root_in_host()) == 0 \
+ '/{}/etc/sfa/aggregates.xml'.format(self.vm_root_in_host())) == 0 \
and self.test_ssh.copy_abs(reg_fname,
- '/%s/etc/sfa/registries.xml' % self.vm_root_in_host()) == 0
+ '/{}/etc/sfa/registries.xml'.format(self.vm_root_in_host())) == 0
def sfa_import(self):
"use sfaadmin to import from plc"
auth = self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
- return self.run_in_guest('sfaadmin reg import_registry') == 0
+ return self.run_in_guest('sfaadmin reg import_registry') == 0
def sfa_start(self):
- "service sfa start"
- return self.start_service('sfa')
+ "start SFA through systemctl"
+ return (self.start_stop_systemd('sfa-registry', 'start') and
+ self.start_stop_systemd('sfa-aggregate', 'start'))
def sfi_configure(self):
"Create /root/sfi on the plc side for sfi client configuration"
- if self.options.dry_run:
+ if self.options.dry_run:
utils.header("DRY RUN - skipping step")
return True
sfa_spec = self.plc_spec['sfa']
for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
test_slice = TestAuthSfa(self, slice_spec)
dir_basename = os.path.basename(test_slice.sfi_path())
- dir_name = self.confsubdir("dot-sfi/%s" % dir_basename,
+ dir_name = self.confsubdir("dot-sfi/{}".format(dir_basename),
clean=True, dry_run=self.options.dry_run)
test_slice.sfi_configure(dir_name)
# push into the remote /root/sfi area
location = test_slice.sfi_path()
- remote = "%s/%s" % (self.vm_root_in_host(), location)
+ remote = "{}/{}".format(self.vm_root_in_host(), location)
self.test_ssh.mkdir(remote, abs=True)
# need to strip last level or remote otherwise we get an extra dir level
self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
test_slice = TestAuthSfa(self, slice_spec)
in_vm = test_slice.sfi_path()
- remote = "%s/%s" % (self.vm_root_in_host(), in_vm)
+ remote = "{}/{}".format(self.vm_root_in_host(), in_vm)
if self.test_ssh.copy_abs(filename, remote) !=0:
overall = False
return overall
@auth_sfa_mapper
def sfa_provision_empty(self): pass
@auth_sfa_mapper
+ def sfa_describe(self): pass
+ @auth_sfa_mapper
def sfa_check_slice_plc(self): pass
@auth_sfa_mapper
def sfa_check_slice_plc_empty(self): pass
def sfa_delete_slice(self): pass
def sfa_stop(self):
- "service sfa stop"
- return self.stop_service('sfa')
+ "stop sfa through systemclt"
+ return (self.start_stop_systemd('sfa-aggregate', 'stop') and
+ self.start_stop_systemd('sfa-registry', 'stop'))
def populate(self):
"creates random entries in the PLCAPI"
# install the stress-test in the plc image
location = "/usr/share/plc_api/plcsh_stress_test.py"
- remote = "%s/%s" % (self.vm_root_in_host(), location)
+ remote = "{}/{}".format(self.vm_root_in_host(), location)
self.test_ssh.copy_abs("plcsh_stress_test.py", remote)
command = location
command += " -- --preserve --short-names"
# (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
# (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
# (1.a)
- print "-------------------- TestPlc.gather_logs : PLC's /var/log"
+ print("-------------------- TestPlc.gather_logs : PLC's /var/log")
self.gather_var_logs()
# (1.b)
- print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
+ print("-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/")
self.gather_pgsql_logs()
# (1.c)
- print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
+ print("-------------------- TestPlc.gather_logs : PLC's /root/sfi/")
self.gather_root_sfi()
- # (2)
- print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
+ # (2)
+ print("-------------------- TestPlc.gather_logs : nodes's QEMU logs")
for site_spec in self.plc_spec['sites']:
test_site = TestSite(self,site_spec)
for node_spec in site_spec['nodes']:
test_node = TestNode(self, test_site, node_spec)
test_node.gather_qemu_logs()
# (3)
- print "-------------------- TestPlc.gather_logs : nodes's /var/log"
+ print("-------------------- TestPlc.gather_logs : nodes's /var/log")
self.gather_nodes_var_logs()
# (4)
- print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
+ print("-------------------- TestPlc.gather_logs : sample sliver's /var/log")
self.gather_slivers_var_logs()
return True
def gather_slivers_var_logs(self):
for test_sliver in self.all_sliver_objs():
remote = test_sliver.tar_var_logs()
- utils.system("mkdir -p logs/sliver.var-log.%s" % test_sliver.name())
- command = remote + " | tar -C logs/sliver.var-log.%s -xf -" % test_sliver.name()
+ utils.system("mkdir -p logs/sliver.var-log.{}".format(test_sliver.name()))
+ command = remote + " | tar -C logs/sliver.var-log.{} -xf -".format(test_sliver.name())
utils.system(command)
return True
def gather_var_logs(self):
- utils.system("mkdir -p logs/myplc.var-log.%s" % self.name())
- to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
- command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -" % self.name()
+ utils.system("mkdir -p logs/myplc.var-log.{}".format(self.name()))
+ to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
+ command = to_plc + "| tar -C logs/myplc.var-log.{} -xf -".format(self.name())
utils.system(command)
- command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd" % self.name()
+ command = "chmod a+r,a+x logs/myplc.var-log.{}/httpd".format(self.name())
utils.system(command)
def gather_pgsql_logs(self):
- utils.system("mkdir -p logs/myplc.pgsql-log.%s" % self.name())
- to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
- command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -" % self.name()
+ utils.system("mkdir -p logs/myplc.pgsql-log.{}".format(self.name()))
+ to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
+ command = to_plc + "| tar -C logs/myplc.pgsql-log.{} -xf -".format(self.name())
utils.system(command)
def gather_root_sfi(self):
- utils.system("mkdir -p logs/sfi.%s"%self.name())
- to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
- command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
+ utils.system("mkdir -p logs/sfi.{}".format(self.name()))
+ to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
+ command = to_plc + "| tar -C logs/sfi.{} -xf -".format(self.name())
utils.system(command)
def gather_nodes_var_logs(self):
test_node = TestNode(self, test_site, node_spec)
test_ssh = TestSsh(test_node.name(), key="keys/key_admin.rsa")
command = test_ssh.actual_command("tar -C /var/log -cf - .")
- command = command + "| tar -C logs/node.var-log.%s -xf -" % test_node.name()
- utils.system("mkdir -p logs/node.var-log.%s" % test_node.name())
+ command = command + "| tar -C logs/node.var-log.{} -xf -".format(test_node.name())
+ utils.system("mkdir -p logs/node.var-log.{}".format(test_node.name()))
utils.system(command)
# uses options.dbname if it is found
try:
name = self.options.dbname
- if not isinstance(name, StringTypes):
+ if not isinstance(name, str):
raise Exception
except:
t = datetime.now()
d = t.date()
name = str(d)
- return "/root/%s-%s.sql" % (database, name)
+ return "/root/{}-{}.sql".format(database, name)
def plc_db_dump(self):
'dump the planetlab5 DB in /root in the PLC - filename has time'
dump=self.dbfile("planetab5")
self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
- utils.header('Dumped planetlab5 database in %s' % dump)
+ utils.header('Dumped planetlab5 database in {}'.format(dump))
return True
def plc_db_restore(self):
'restore the planetlab5 DB - looks broken, but run -n might help'
dump = self.dbfile("planetab5")
- ##stop httpd service
- self.run_in_guest('service httpd stop')
+ self.run_in_guest('systemctl stop httpd')
# xxx - need another wrapper
self.run_in_guest_piped('echo drop database planetlab5', 'psql --user=pgsqluser template1')
self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
self.run_in_guest('psql -U pgsqluser planetlab5 -f ' + dump)
##starting httpd service
- self.run_in_guest('service httpd start')
+ self.run_in_guest('systemctl start httpd')
utils.header('Database restored from ' + dump)
if '@' in step:
step, qualifier = step.split('@')
# or be defined as forced or ignored by default
- for keyword in ['_ignore','_force']:
+ for keyword in ['_ignore', '_force']:
if step.endswith(keyword):
step=step.replace(keyword,'')
if step == SEP or step == SEPSFA :
wrapped = ignore_result(method)
# wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
setattr(TestPlc, name, wrapped)
-
+
# @ignore_result
# def ssh_slice_again_ignore (self): pass
# @ignore_result
# def check_initscripts_ignore (self): pass
-
+
def standby_1_through_20(self):
"""convenience function to wait for a specified number of minutes"""
pass
- @standby_generic
+ @standby_generic
def standby_1(): pass
- @standby_generic
+ @standby_generic
def standby_2(): pass
- @standby_generic
+ @standby_generic
def standby_3(): pass
- @standby_generic
+ @standby_generic
def standby_4(): pass
- @standby_generic
+ @standby_generic
def standby_5(): pass
- @standby_generic
+ @standby_generic
def standby_6(): pass
- @standby_generic
+ @standby_generic
def standby_7(): pass
- @standby_generic
+ @standby_generic
def standby_8(): pass
- @standby_generic
+ @standby_generic
def standby_9(): pass
- @standby_generic
+ @standby_generic
def standby_10(): pass
- @standby_generic
+ @standby_generic
def standby_11(): pass
- @standby_generic
+ @standby_generic
def standby_12(): pass
- @standby_generic
+ @standby_generic
def standby_13(): pass
- @standby_generic
+ @standby_generic
def standby_14(): pass
- @standby_generic
+ @standby_generic
def standby_15(): pass
- @standby_generic
+ @standby_generic
def standby_16(): pass
- @standby_generic
+ @standby_generic
def standby_17(): pass
- @standby_generic
+ @standby_generic
def standby_18(): pass
- @standby_generic
+ @standby_generic
def standby_19(): pass
- @standby_generic
+ @standby_generic
def standby_20(): pass
# convenience for debugging the test logic