# keep this our of the way for now
'check_vsys_defaults_ignore', SEP,
# run this first off so it's easier to re-run on another qemu box
- 'qemu_kill_mine', SEP,
- 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
- 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP,
+ 'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
+ 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP,
'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
'delete_leases', 'list_leases', SEP,
'populate', SEP,
- 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
+ 'nodestate_show','nodestate_safeboot','nodestate_boot', 'nodestate_upgrade', SEP,
+ 'nodeflavour_show','nodedistro_f14','nodedistro_f18', 'nodedistro_f20', 'nodedistro_f21', SEP,
+ 'nodeplain_on','nodeplain_off','nodeplain_show', SEP,
'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
'check_netflow','check_drl', SEP,
'debug_nodemanager', 'slice_fs_present', SEP,
'standby_1_through_20','yes','no',SEP,
+ 'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP,
]
- bonding_steps = [
+ default_bonding_steps = [
'bonding_init_partial',
'bonding_add_yum',
'bonding_install_rpms', SEP,
self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
self.vserverip = plc_spec['vserverip']
self.vservername = plc_spec['vservername']
+ self.vplchostname = self.vservername.split('-')[-1]
self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
self.apiserver = TestApiserver(self.url, options.dry_run)
(self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
# see e.g. plc_start esp. the version for f14
#command gets run in the plc's vm
def host_to_guest(self, command):
- vservername = self.vservername
- personality = self.options.personality
- raw = "{personality} virsh -c lxc:/// lxc-enter-namespace --noseclabel {vservername}".format(**locals())
- # f14 still needs some extra help
- if self.options.fcdistro == 'f14':
- raw +=" -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin {command}".format(**locals())
- else:
- raw +=" -- /usr/bin/env {command}".format(**locals())
- return raw
+ ssh_leg = TestSsh(self.vplchostname)
+ return ssh_leg.actual_command(command, keep_stdin=True)
# this /vservers thing is legacy...
def vm_root_in_host(self):
fqdn = "{}.{}".format(self.plc_spec['host_box'], domain)
print("export BUILD={}".format(self.options.buildname))
print("export PLCHOSTLXC={}".format(fqdn))
- print("export GUESTNAME={}".format(self.plc_spec['vservername']))
- vplcname = self.plc_spec['vservername'].split('-')[-1]
- print("export GUESTHOSTNAME={}.{}".format(vplcname, domain))
+ print("export GUESTNAME={}".format(self.vservername))
+ print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
# find hostname of first node
hostname, qemubox = self.all_node_infos()[0]
print("export KVMHOST={}.{}".format(qemubox, domain))
### install_rpm
def plc_install(self):
- "yum install myplc, noderepo, and the plain bootstrapfs"
-
- # workaround for getting pgsql8.2 on centos5
- if self.options.fcdistro == "centos5":
- self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
+ """
+ yum install myplc, noderepo + plain bootstrapfs as well
+ """
# compute nodefamily
if self.options.personality == "linux32":
pkgs_string=" ".join(pkgs_list)
return self.yum_install(pkgs_list)
+ def install_syslinux6(self):
+ """
+ install syslinux6 from the fedora21 release
+ """
+ key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
+
+ rpms = [
+ 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
+ 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
+ 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
+ ]
+ # this can be done several times
+ self.run_in_guest("rpm --import {key}".format(**locals()))
+ return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
+
+ def bonding_builds(self):
+ """
+ list /etc/yum.repos.d on the myplc side
+ """
+ self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
+ return True
+
+ def bonding_nodes(self):
+ """
+ List nodes known to the myplc together with their nodefamiliy
+ """
+ print("---------------------------------------- nodes")
+ for node in self.apiserver.GetNodes(self.auth_root()):
+ print("{} -> {}".format(node['hostname'],
+ self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily']))
+ print("---------------------------------------- nodes")
+
+
###
def mod_python(self):
"""yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
@staticmethod
def translate_timestamp(start, grain, timestamp):
if timestamp < TestPlc.YEAR:
- return start+timestamp*grain
+ return start + timestamp*grain
else:
return timestamp
now = int(time.time())
grain = self.apiserver.GetLeaseGranularity(self.auth_root())
print('API answered grain=', grain)
- start = (now/grain)*grain
+ start = (now//grain)*grain
start += grain
# find out all nodes that are reservable
nodes = self.all_reservable_nodenames()
lease_spec['t_from'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
- lease_spec['t_from'],lease_spec['t_until'])
+ lease_spec['t_from'], lease_spec['t_until'])
if lease_addition['errors']:
utils.header("Cannot create leases, {}".format(lease_addition['errors']))
ok = False
@node_mapper
def qemu_local_config(self): pass
@node_mapper
+ def qemu_export(self): pass
+ @node_mapper
def nodestate_reinstall(self): pass
@node_mapper
+ def nodestate_upgrade(self): pass
+ @node_mapper
def nodestate_safeboot(self): pass
@node_mapper
def nodestate_boot(self): pass
@node_mapper
def nodestate_show(self): pass
@node_mapper
- def qemu_export(self): pass
+ def nodedistro_f14(self): pass
+ @node_mapper
+ def nodedistro_f18(self): pass
+ @node_mapper
+ def nodedistro_f20(self): pass
+ @node_mapper
+ def nodedistro_f21(self): pass
+ @node_mapper
+ def nodeflavour_show(self): pass
+ @node_mapper
+ def nodeplain_on(self): pass
+ @node_mapper
+ def nodeplain_off(self): pass
+ @node_mapper
+ def nodeplain_show(self): pass
### check hooks : invoke scripts from hooks/{node,slice}
def check_hooks_node(self):
def keys_clear_known_hosts(self): pass
def plcapi_urls(self):
+ """
+ attempts to reach the PLCAPI with various forms for the URL
+ """
return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
def speed_up_slices(self):
@node_mapper
def qemu_timestamp(self) : pass
+ @node_mapper
+ def qemu_nodefamily(self): pass
+
# when a spec refers to a node possibly on another plc
def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
for plc in [ self ] + other_plcs: