node_method = TestNode.__dict__[method.__name__]
for test_node in self.all_nodes():
if not node_method(test_node, *args, **kwds):
- overall=False
+ overall = False
return overall
# maintain __name__ for ignore_result
map_on_nodes.__name__ = method.__name__
default_steps = [
'show', SEP,
- 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
- 'plc_install', 'plc_configure', 'plc_start', SEP,
+ 'plcvm_delete', 'plcvm_timestamp', 'plcvm_create', SEP,
+ 'django_install', 'plc_install', 'plc_configure', 'plc_start', SEP,
'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
- 'plcapi_urls','speed_up_slices', SEP,
+ 'plcapi_urls', 'speed_up_slices', SEP,
'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
# slices created under plcsh interactively seem to be fine but these ones don't have the tags
-# keep this our of the way for now
+# keep this out of the way for now
'check_vsys_defaults_ignore', SEP,
# run this first off so it's easier to re-run on another qemu box
- 'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
+ 'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init',
+ 'bootcd', 'qemu_local_config', SEP,
'qemu_clean_mine', 'qemu_export', 'qemu_cleanlog', SEP,
'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
- 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
- 'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
- 'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
- 'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
- 'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
+ 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure',
+ 'sfa_start', 'sfa_import', SEPSFA,
+ 'sfi_configure@1', 'sfa_register_site@1', 'sfa_register_pi@1', SEPSFA,
+ 'sfa_register_user@1', 'sfa_update_user@1',
+ 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
+ 'sfa_remove_user_from_slice@1', 'sfi_show_slice_researchers@1',
+ 'sfa_insert_user_in_slice@1', 'sfi_show_slice_researchers@1', SEPSFA,
'sfa_discover@1', 'sfa_rspec@1', SEPSFA,
'sfa_allocate@1', 'sfa_provision@1', 'sfa_describe@1', SEPSFA,
'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
# but as the stress test might take a while, we sometimes missed the debug mode..
'probe_kvm_iptables',
'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
- 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts', SEP,
+ 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', SEP,
'ssh_slice_sfa@1', SEPSFA,
- 'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1','sfa_check_slice_plc_empty@1', SEPSFA,
+ 'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1',
+ 'sfa_check_slice_plc_empty@1', SEPSFA,
'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
- 'cross_check_tcp@1', 'check_system_slice', SEP,
+ 'check_system_slice', SEP,
# for inspecting the slice while it runs the first time
#'fail',
# check slices are turned off properly
other_steps = [
'export', 'show_boxes', 'super_speed_up_slices', SEP,
'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
- 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
+ 'delete_initscripts', 'delete_nodegroups', 'delete_all_sites', SEP,
'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
'delete_leases', 'list_leases', SEP,
'populate', SEP,
- 'nodestate_show','nodestate_safeboot','nodestate_boot', 'nodestate_upgrade', SEP,
- 'nodedistro_show','nodedistro_f14','nodedistro_f18', SEP,
- 'nodedistro_f20', 'nodedistro_f21','nodedistro_f22', SEP,
+ 'nodestate_show', 'nodestate_safeboot', 'nodestate_boot', 'nodestate_upgrade', SEP,
+ 'nodedistro_show', 'nodedistro_f14', 'nodedistro_f18', SEP,
+ 'nodedistro_f20', 'nodedistro_f21', 'nodedistro_f22', SEP,
'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
- 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
- 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
+ 'sfa_install_core', 'sfa_install_sfatables',
+ 'sfa_install_plc', 'sfa_install_client', SEPSFA,
+ 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop', 'sfa_uninstall', 'sfi_clean', SEPSFA,
'sfa_get_expires', SEPSFA,
- 'plc_db_dump' , 'plc_db_restore', SEP,
- 'check_netflow','check_drl', SEP,
- 'slice_fs_present', SEP,
- 'standby_1_through_20','yes','no',SEP,
+ 'plc_db_dump', 'plc_db_restore', SEP,
+ 'check_netflow', 'check_drl', SEP,
+ # used to be part of default steps but won't work since f27
+ 'cross_check_tcp@1',
+ 'slice_fs_present', 'check_initscripts', SEP,
+ 'standby_1_through_20', 'yes', 'no', SEP,
'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
]
default_bonding_steps = [
return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),
keep_stdin = True))
- def yum_check_installed(self, rpms):
+ def dnf_check_installed(self, rpms):
if isinstance(rpms, list):
rpms=" ".join(rpms)
return self.run_in_guest("rpm -q {}".format(rpms)) == 0
# does a yum install in the vs, ignore yum retcod, check with rpm
- def yum_install(self, rpms):
+ def dnf_install(self, rpms):
if isinstance(rpms, list):
rpms=" ".join(rpms)
- yum_mode = self.run_in_guest("yum -y install {}".format(rpms))
+ yum_mode = self.run_in_guest("dnf -y install {}".format(rpms))
if yum_mode != 0:
self.run_in_guest("dnf -y install --allowerasing {}".format(rpms))
# yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
- self.run_in_guest("yum-complete-transaction -y")
- return self.yum_check_installed(rpms)
+ # nothing similar with dnf, forget about this for now
+ # self.run_in_guest("yum-complete-transaction -y")
+ return self.dnf_check_installed(rpms)
+
+ def pip_install(self, package):
+ return self.run_in_guest("pip3 install {}".format(package)) == 0
def auth_root(self):
return {'Username' : self.plc_spec['settings']['PLC_ROOT_USER'],
continue
if k == 'nodes':
if v:
- print('+ ','nodes : ', end=' ')
+ print('+ ', 'nodes : ', end=' ')
for node in v:
print(node['node_fields']['hostname'],'', end=' ')
print('')
print(username,'', end=' ')
print('')
elif k == 'slice_fields':
- print('+ fields',':', end=' ')
+ print('+ fields', ':', end=' ')
print('max_nodes=',v['max_nodes'], end=' ')
print('')
else:
"vserver delete the test myplc"
stamp_path = self.vm_timestamp_path()
self.run_in_host("rm -f {}".format(stamp_path))
- self.run_in_host("virsh -c lxc:// destroy {}".format(self.vservername))
- self.run_in_host("virsh -c lxc:// undefine {}".format(self.vservername))
+ self.run_in_host("virsh -c lxc:/// destroy {}".format(self.vservername))
+ self.run_in_host("virsh -c lxc:/// undefine {}".format(self.vservername))
self.run_in_host("rm -fr /vservers/{}".format(self.vservername))
return True
create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
return self.run_in_host(create_vserver) == 0
+ ### install django through pip
+ def django_install(self):
+ # plcapi requires Django, that is no longer provided py fedora as an rpm
+ # so we use pip instead
+ """
+ pip install Django
+ """
+ return self.pip_install('Django')
+
### install_rpm
def plc_install(self):
"""
raise Exception("Unsupported personality {}".format(self.options.personality))
nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
- pkgs_list=[]
- pkgs_list.append("slicerepo-{}".format(nodefamily))
+ # check it's possible to install just 'myplc-core' first
+ if not self.dnf_install("myplc-core"):
+ return False
+
+ pkgs_list = []
pkgs_list.append("myplc")
+ pkgs_list.append("slicerepo-{}".format(nodefamily))
pkgs_list.append("noderepo-{}".format(nodefamily))
pkgs_string=" ".join(pkgs_list)
- return self.yum_install(pkgs_list)
+ return self.dnf_install(pkgs_list)
def install_syslinux6(self):
"""
###
def mod_python(self):
"""yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
- return self.yum_install( ['mod_python'] )
+ return self.dnf_install( ['mod_python'] )
###
def plc_configure(self):
utils.system('rm {}'.format(tmpname))
return True
-# f14 is a bit odd in this respect, although this worked fine in guests up to f18
-# however using a vplc guest under f20 requires this trick
-# the symptom is this: service plc start
-# Starting plc (via systemctl): Failed to get D-Bus connection: \
-# Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
-# weird thing is the doc says f14 uses upstart by default and not systemd
-# so this sounds kind of harmless
- def start_service(self, service):
- return self.start_stop_service(service, 'start')
- def stop_service(self, service):
- return self.start_stop_service(service, 'stop')
-
- def start_stop_service(self, service, start_or_stop):
- "utility to start/stop a service with the special trick starting with f14"
- has_systemctl = False
- if self.options.fcdistro[0] == 'f':
- number = int(self.options.fcdistro[1:])
- if number >= 14:
- has_systemctl = True
- if not has_systemctl:
- return self.run_in_guest("service {} {}".format(service, start_or_stop)) == 0
- else:
- # patch /sbin/service so it does not reset environment
- self.run_in_guest('sed -i -e \\"s,env -i,env,\\" /sbin/service')
- # this is because our own scripts in turn call service
- return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service {} {}"\
- .format(service, start_or_stop)) == 0
+ # care only about f>=27
+ def start_stop_systemd(self, service, start_or_stop):
+ "utility to start/stop a systemd-defined service (sfa)"
+ return self.run_in_guest("systemctl {} {}".format(start_or_stop, service)) == 0
def plc_start(self):
- "service plc start"
- return self.start_service('plc')
+ "start plc through systemclt"
+ return self.start_stop_systemd('plc', 'start')
def plc_stop(self):
- "service plc stop"
- return self.stop_service('plc')
+ "stop plc through systemctl"
+ return self.start_stop_systemd('plc', 'stop')
def plcvm_start(self):
"start the PLC vserver"
def delete_all_sites(self):
"Delete all sites in PLC, and related objects"
print('auth_root', self.auth_root())
- sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
+ sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id', 'login_base'])
for site in sites:
# keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
# the issue here is that we have the server run in background
# and so we have no clue if it took off properly or not
# looks like in some cases it does not
- if not spec['s_sliver'].run_tcp_server(port, timeout=20):
+ address = spec['s_sliver'].test_node.name()
+ if not spec['s_sliver'].run_tcp_server(address, port, timeout=20):
overall = False
break
def sfa_install_all(self):
"yum install sfa sfa-plc sfa-sfatables sfa-client"
- return self.yum_install("sfa sfa-plc sfa-sfatables sfa-client")
+ return (self.dnf_install("sfa sfa-plc sfa-sfatables sfa-client") and
+ self.run_in_guest("systemctl enable sfa-registry")==0 and
+ self.run_in_guest("systemctl enable sfa-aggregate")==0)
def sfa_install_core(self):
"yum install sfa"
- return self.yum_install("sfa")
+ return self.dnf_install("sfa")
def sfa_install_plc(self):
"yum install sfa-plc"
- return self.yum_install("sfa-plc")
+ return self.dnf_install("sfa-plc")
def sfa_install_sfatables(self):
"yum install sfa-sfatables"
- return self.yum_install("sfa-sfatables")
+ return self.dnf_install("sfa-sfatables")
# for some very odd reason, this sometimes fails with the following symptom
# # yum install sfa-client
# so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
def sfa_install_client(self):
"yum install sfa-client"
- first_try = self.yum_install("sfa-client")
+ first_try = self.dnf_install("sfa-client")
if first_try:
return True
utils.header("********** Regular yum failed - special workaround in place, 2nd chance")
utils.header("rpm_path=<<{}>>".format(rpm_path))
# just for checking
self.run_in_guest("rpm -i {}".format(cached_rpm_path))
- return self.yum_check_installed("sfa-client")
+ return self.dnf_check_installed("sfa-client")
def sfa_dbclean(self):
"thoroughly wipes off the SFA database"
except:
print("Site {} already absent from PLC db".format(login_base))
- for spec_name in ['pi_spec','user_spec']:
+ for spec_name in ['pi_spec', 'user_spec']:
user_spec = auth_sfa_spec[spec_name]
username = user_spec['email']
try:
# if the yum install phase fails, consider the test is successful
# other combinations will eventually run it hopefully
def sfa_utest(self):
- "yum install sfa-tests and run SFA unittests"
- self.run_in_guest("yum -y install sfa-tests")
+ "dnf install sfa-tests and run SFA unittests"
+ self.run_in_guest("dnf -y install sfa-tests")
# failed to install - forget it
if self.run_in_guest("rpm -q sfa-tests") != 0:
utils.header("WARNING: SFA unit tests failed to install, ignoring")
return self.run_in_guest('sfaadmin reg import_registry') == 0
def sfa_start(self):
- "service sfa start"
- return self.start_service('sfa')
+ "start SFA through systemctl"
+ return (self.start_stop_systemd('sfa-registry', 'start') and
+ self.start_stop_systemd('sfa-aggregate', 'start'))
def sfi_configure(self):
def sfa_delete_slice(self): pass
def sfa_stop(self):
- "service sfa stop"
- return self.stop_service('sfa')
+ "stop sfa through systemclt"
+ return (self.start_stop_systemd('sfa-aggregate', 'stop') and
+ self.start_stop_systemd('sfa-registry', 'stop'))
def populate(self):
"creates random entries in the PLCAPI"
def plc_db_restore(self):
'restore the planetlab5 DB - looks broken, but run -n might help'
dump = self.dbfile("planetab5")
- ##stop httpd service
- self.run_in_guest('service httpd stop')
+ self.run_in_guest('systemctl stop httpd')
# xxx - need another wrapper
self.run_in_guest_piped('echo drop database planetlab5', 'psql --user=pgsqluser template1')
self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
self.run_in_guest('psql -U pgsqluser planetlab5 -f ' + dump)
##starting httpd service
- self.run_in_guest('service httpd start')
+ self.run_in_guest('systemctl start httpd')
utils.header('Database restored from ' + dump)
if '@' in step:
step, qualifier = step.split('@')
# or be defined as forced or ignored by default
- for keyword in ['_ignore','_force']:
+ for keyword in ['_ignore', '_force']:
if step.endswith(keyword):
step=step.replace(keyword,'')
if step == SEP or step == SEPSFA :