# keep this our of the way for now
'check_vsys_defaults_ignore', SEP,
# run this first off so it's easier to re-run on another qemu box
- 'qemu_kill_mine', SEP,
- 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
- 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP,
+ 'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
+ 'qemu_clean_mine', 'qemu_export', 'qemu_cleanlog', SEP,
+ 'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
- 'sfa_discover@1', 'sfa_rspec@1', 'sfa_allocate@1', 'sfa_provision@1', SEPSFA,
+ 'sfa_discover@1', 'sfa_rspec@1', SEPSFA,
+ 'sfa_allocate@1', 'sfa_provision@1', 'sfa_describe@1', SEPSFA,
'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
# we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
# for inspecting the slice while it runs the first time
#'fail',
# check slices are turned off properly
+ 'debug_nodemanager',
'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
# check they are properly re-created with the same name
'fill_slices', 'ssh_slice_again', SEP,
'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
'delete_leases', 'list_leases', SEP,
'populate', SEP,
- 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
+ 'nodestate_show','nodestate_safeboot','nodestate_boot', 'nodestate_upgrade', SEP,
+ 'nodedistro_show','nodedistro_f14','nodedistro_f18', SEP,
+ 'nodedistro_f20', 'nodedistro_f21','nodedistro_f22', SEP,
'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
'sfa_get_expires', SEPSFA,
'plc_db_dump' , 'plc_db_restore', SEP,
'check_netflow','check_drl', SEP,
- 'debug_nodemanager', 'slice_fs_present', SEP,
+ 'slice_fs_present', SEP,
'standby_1_through_20','yes','no',SEP,
+ 'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
]
- bonding_steps = [
+ default_bonding_steps = [
'bonding_init_partial',
'bonding_add_yum',
'bonding_install_rpms', SEP,
def yum_install(self, rpms):
if isinstance(rpms, list):
rpms=" ".join(rpms)
- self.run_in_guest("yum -y install {}".format(rpms))
+ yum_mode = self.run_in_guest("yum -y install {}".format(rpms))
+ if yum_mode != 0:
+ self.run_in_guest("dnf -y install --allowerasing {}".format(rpms))
# yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
self.run_in_guest("yum-complete-transaction -y")
return self.yum_check_installed(rpms)
### install_rpm
def plc_install(self):
- "yum install myplc, noderepo, and the plain bootstrapfs"
-
- # workaround for getting pgsql8.2 on centos5
- if self.options.fcdistro == "centos5":
- self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
+ """
+ yum install myplc, noderepo
+ """
# compute nodefamily
if self.options.personality == "linux32":
pkgs_list.append("slicerepo-{}".format(nodefamily))
pkgs_list.append("myplc")
pkgs_list.append("noderepo-{}".format(nodefamily))
- pkgs_list.append("nodeimage-{}-plain".format(nodefamily))
pkgs_string=" ".join(pkgs_list)
return self.yum_install(pkgs_list)
+ def install_syslinux6(self):
+ """
+ install syslinux6 from the fedora21 release
+ """
+ key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
+
+ rpms = [
+ 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
+ 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
+ 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
+ ]
+ # this can be done several times
+ self.run_in_guest("rpm --import {key}".format(**locals()))
+ return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
+
+ def bonding_builds(self):
+ """
+ list /etc/yum.repos.d on the myplc side
+ """
+ self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
+ return True
+
+ def bonding_nodes(self):
+ """
+ List nodes known to the myplc together with their nodefamiliy
+ """
+ print("---------------------------------------- nodes")
+ for node in self.apiserver.GetNodes(self.auth_root()):
+ print("{} -> {}".format(node['hostname'],
+ self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily']))
+ print("---------------------------------------- nodes")
+
+
###
def mod_python(self):
"""yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
"run plc-config-tty"
tmpname = '{}.plc-config-tty'.format(self.name())
with open(tmpname,'w') as fileconf:
- for (var,value) in self.plc_spec['settings'].items():
+ for var, value in self.plc_spec['settings'].items():
fileconf.write('e {}\n{}\n'.format(var, value))
fileconf.write('w\n')
fileconf.write('q\n')
return True
# probing nodes
- def check_nodes_ping(self, timeout_seconds=30, period_seconds=10):
+ def check_nodes_ping(self, timeout_seconds=60, period_seconds=10):
class CompleterTaskPingNode(CompleterTask):
def __init__(self, hostname):
self.hostname = hostname
@node_mapper
def qemu_local_config(self): pass
@node_mapper
+ def qemu_export(self): pass
+ @node_mapper
+ def qemu_cleanlog(self): pass
+ @node_mapper
def nodestate_reinstall(self): pass
@node_mapper
+ def nodestate_upgrade(self): pass
+ @node_mapper
def nodestate_safeboot(self): pass
@node_mapper
def nodestate_boot(self): pass
@node_mapper
def nodestate_show(self): pass
@node_mapper
- def qemu_export(self): pass
+ def nodedistro_f14(self): pass
+ @node_mapper
+ def nodedistro_f18(self): pass
+ @node_mapper
+ def nodedistro_f20(self): pass
+ @node_mapper
+ def nodedistro_f21(self): pass
+ @node_mapper
+ def nodedistro_f22(self): pass
+ @node_mapper
+ def nodedistro_show(self): pass
### check hooks : invoke scripts from hooks/{node,slice}
def check_hooks_node(self):
def keys_clear_known_hosts(self): pass
def plcapi_urls(self):
+ """
+ attempts to reach the PLCAPI with various forms for the URL
+ """
return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
def speed_up_slices(self):
@node_mapper
def qemu_timestamp(self) : pass
+ @node_mapper
+ def qemu_nodefamily(self): pass
+
# when a spec refers to a node possibly on another plc
def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
for plc in [ self ] + other_plcs:
"run sfa-config-tty"
tmpname = self.conffile("sfa-config-tty")
with open(tmpname,'w') as fileconf:
- for (var,value) in self.plc_spec['sfa']['settings'].items():
+ for var, value in self.plc_spec['sfa']['settings'].items():
fileconf.write('e {}\n{}\n'.format(var, value))
fileconf.write('w\n')
fileconf.write('R\n')
@auth_sfa_mapper
def sfa_provision_empty(self): pass
@auth_sfa_mapper
+ def sfa_describe(self): pass
+ @auth_sfa_mapper
def sfa_check_slice_plc(self): pass
@auth_sfa_mapper
def sfa_check_slice_plc_empty(self): pass