# keep this our of the way for now
'check_vsys_defaults_ignore', SEP,
# run this first off so it's easier to re-run on another qemu box
- 'qemu_kill_mine', SEP,
- 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
- 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP,
+ 'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
+ 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', 'qemu_nodeflavour', SEP,
'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
'check_netflow','check_drl', SEP,
'debug_nodemanager', 'slice_fs_present', SEP,
'standby_1_through_20','yes','no',SEP,
+ 'install_syslinux6', 'installed_bonds', SEP,
]
- bonding_steps = [
+ default_bonding_steps = [
'bonding_init_partial',
'bonding_add_yum',
'bonding_install_rpms', SEP,
# warning, we're now building 'sface' so let's be a bit more picky
# full builds are expected to return with 0 here
utils.header("Checking if build provides SFA package...")
- retcod = os.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
+ retcod = utils.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
encoded = 'yes' if retcod else 'no'
with open(has_sfa_cache_filename,'w') as cache:
cache.write(encoded)
self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
self.vserverip = plc_spec['vserverip']
self.vservername = plc_spec['vservername']
+ self.vplchostname = self.vservername.split('-')[-1]
self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
self.apiserver = TestApiserver(self.url, options.dry_run)
(self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
# see e.g. plc_start esp. the version for f14
#command gets run in the plc's vm
def host_to_guest(self, command):
- vservername = self.vservername
- personality = self.options.personality
- raw = "{personality} virsh -c lxc:/// lxc-enter-namespace {vservername}".format(**locals())
- # f14 still needs some extra help
- if self.options.fcdistro == 'f14':
- raw +=" -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin {command}".format(**locals())
- else:
- raw +=" -- /usr/bin/env {command}".format(**locals())
- return raw
+ ssh_leg = TestSsh(self.vplchostname)
+ return ssh_leg.actual_command(command, keep_stdin=True)
# this /vservers thing is legacy...
def vm_root_in_host(self):
fqdn = "{}.{}".format(self.plc_spec['host_box'], domain)
print("export BUILD={}".format(self.options.buildname))
print("export PLCHOSTLXC={}".format(fqdn))
- print("export GUESTNAME={}".format(self.plc_spec['vservername']))
- vplcname = self.plc_spec['vservername'].split('-')[-1]
- print("export GUESTHOSTNAME={}.{}".format(vplcname, domain))
+ print("export GUESTNAME={}".format(self.vservername))
+ print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain))
# find hostname of first node
hostname, qemubox = self.all_node_infos()[0]
print("export KVMHOST={}.{}".format(qemubox, domain))
### install_rpm
def plc_install(self):
- "yum install myplc, noderepo, and the plain bootstrapfs"
-
- # workaround for getting pgsql8.2 on centos5
- if self.options.fcdistro == "centos5":
- self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
+ """
+ yum install myplc, noderepo
+ plain bootstrapfs is not installed anymore
+ """
# compute nodefamily
if self.options.personality == "linux32":
pkgs_list.append("slicerepo-{}".format(nodefamily))
pkgs_list.append("myplc")
pkgs_list.append("noderepo-{}".format(nodefamily))
- pkgs_list.append("nodeimage-{}-plain".format(nodefamily))
pkgs_string=" ".join(pkgs_list)
return self.yum_install(pkgs_list)
+ def install_syslinux6(self):
+ """
+ install syslinux6 from the fedora21 release
+ """
+ key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary'
+
+ rpms = [
+ 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm',
+ 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm',
+ 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm',
+ ]
+ # this can be done several times
+ self.run_in_guest("rpm --import {key}".format(**locals()))
+ return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0
+
+ def installed_bonds(self):
+ """
+ list /etc/yum.repos.d on the myplc side
+ """
+ self.run_in_guest("ls /etc/yum.repos.d/*partial.repo")
+ return True
+
###
def mod_python(self):
"""yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
@staticmethod
def translate_timestamp(start, grain, timestamp):
if timestamp < TestPlc.YEAR:
- return start+timestamp*grain
+ return start + timestamp*grain
else:
return timestamp
now = int(time.time())
grain = self.apiserver.GetLeaseGranularity(self.auth_root())
print('API answered grain=', grain)
- start = (now/grain)*grain
+ start = (now//grain)*grain
start += grain
# find out all nodes that are reservable
nodes = self.all_reservable_nodenames()
lease_spec['t_from'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_from'])
lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until'])
lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'],
- lease_spec['t_from'],lease_spec['t_until'])
+ lease_spec['t_from'], lease_spec['t_until'])
if lease_addition['errors']:
utils.header("Cannot create leases, {}".format(lease_addition['errors']))
ok = False
def keys_clear_known_hosts(self): pass
def plcapi_urls(self):
+ """
+ attempts to reach the PLCAPI with various forms for the URL
+ """
return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan()
def speed_up_slices(self):
@node_mapper
def qemu_timestamp(self) : pass
+ @node_mapper
+ def qemu_nodeflavour(self): pass
+
# when a spec refers to a node possibly on another plc
def locate_sliver_obj_cross(self, nodename, slicename, other_plcs):
for plc in [ self ] + other_plcs: