X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=system%2FTestPlc.py;h=a9ebd96b261551d1ffcabf577d90628b0b035d5a;hb=d5381e90de0d772a5b8867f03f26d40db8f33fba;hp=1d1ea45309979ea8290b600a50ffb2918616f958;hpb=eeba8e303edbfc5e926767ba22294beda642ecba;p=tests.git diff --git a/system/TestPlc.py b/system/TestPlc.py index 1d1ea45..a9ebd96 100644 --- a/system/TestPlc.py +++ b/system/TestPlc.py @@ -161,15 +161,15 @@ class TestPlc: # keep this our of the way for now 'check_vsys_defaults_ignore', SEP, # run this first off so it's easier to re-run on another qemu box - 'qemu_kill_mine', SEP, - 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP, - 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP, + 'qemu_kill_mine', 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP, + 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', 'qemu_nodefamily', SEP, 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA, 'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA, 'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA, 'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1', 'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA, - 'sfa_discover@1', 'sfa_rspec@1', 'sfa_allocate@1', 'sfa_provision@1', SEPSFA, + 'sfa_discover@1', 'sfa_rspec@1', SEPSFA, + 'sfa_allocate@1', 'sfa_provision@1', 'sfa_describe@1', SEPSFA, 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA, 'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA, # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot @@ -196,7 +196,9 @@ class TestPlc: 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP, 'delete_leases', 'list_leases', SEP, 'populate', SEP, - 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP, + 'nodestate_show','nodestate_safeboot','nodestate_boot', 'nodestate_upgrade', SEP, + 'nodeflavour_show','nodedistro_f14','nodedistro_f18', 'nodedistro_f20', 'nodedistro_f21', SEP, + 'nodeplain_on','nodeplain_off','nodeplain_show', SEP, 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP, 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA, 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA, @@ -205,8 +207,9 @@ class TestPlc: 'check_netflow','check_drl', SEP, 'debug_nodemanager', 'slice_fs_present', SEP, 'standby_1_through_20','yes','no',SEP, + 'install_syslinux6', 'bonding_builds', 'bonding_nodes', SEP, ] - bonding_steps = [ + default_bonding_steps = [ 'bonding_init_partial', 'bonding_add_yum', 'bonding_install_rpms', SEP, @@ -259,6 +262,7 @@ class TestPlc: self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname) self.vserverip = plc_spec['vserverip'] self.vservername = plc_spec['vservername'] + self.vplchostname = self.vservername.split('-')[-1] self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip']) self.apiserver = TestApiserver(self.url, options.dry_run) (self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers'] @@ -306,15 +310,8 @@ class TestPlc: # see e.g. plc_start esp. the version for f14 #command gets run in the plc's vm def host_to_guest(self, command): - vservername = self.vservername - personality = self.options.personality - raw = "{personality} virsh -c lxc:/// lxc-enter-namespace --noseclabel {vservername}".format(**locals()) - # f14 still needs some extra help - if self.options.fcdistro == 'f14': - raw +=" -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin {command}".format(**locals()) - else: - raw +=" -- /usr/bin/env {command}".format(**locals()) - return raw + ssh_leg = TestSsh(self.vplchostname) + return ssh_leg.actual_command(command, keep_stdin=True) # this /vservers thing is legacy... def vm_root_in_host(self): @@ -528,9 +525,8 @@ class TestPlc: fqdn = "{}.{}".format(self.plc_spec['host_box'], domain) print("export BUILD={}".format(self.options.buildname)) print("export PLCHOSTLXC={}".format(fqdn)) - print("export GUESTNAME={}".format(self.plc_spec['vservername'])) - vplcname = self.plc_spec['vservername'].split('-')[-1] - print("export GUESTHOSTNAME={}.{}".format(vplcname, domain)) + print("export GUESTNAME={}".format(self.vservername)) + print("export GUESTHOSTNAME={}.{}".format(self.vplchostname, domain)) # find hostname of first node hostname, qemubox = self.all_node_infos()[0] print("export KVMHOST={}.{}".format(qemubox, domain)) @@ -713,11 +709,9 @@ class TestPlc: ### install_rpm def plc_install(self): - "yum install myplc, noderepo, and the plain bootstrapfs" - - # workaround for getting pgsql8.2 on centos5 - if self.options.fcdistro == "centos5": - self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm") + """ + yum install myplc, noderepo + plain bootstrapfs as well + """ # compute nodefamily if self.options.personality == "linux32": @@ -736,6 +730,39 @@ class TestPlc: pkgs_string=" ".join(pkgs_list) return self.yum_install(pkgs_list) + def install_syslinux6(self): + """ + install syslinux6 from the fedora21 release + """ + key = 'http://mirror.onelab.eu/keys/RPM-GPG-KEY-fedora-21-primary' + + rpms = [ + 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-6.03-1.fc21.x86_64.rpm', + 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-nonlinux-6.03-1.fc21.noarch.rpm', + 'http://mirror.onelab.eu/fedora/releases/21/Everything/x86_64/os/Packages/s/syslinux-perl-6.03-1.fc21.x86_64.rpm', + ] + # this can be done several times + self.run_in_guest("rpm --import {key}".format(**locals())) + return self.run_in_guest("yum -y localinstall {}".format(" ".join(rpms))) == 0 + + def bonding_builds(self): + """ + list /etc/yum.repos.d on the myplc side + """ + self.run_in_guest("ls /etc/yum.repos.d/*partial.repo") + return True + + def bonding_nodes(self): + """ + List nodes known to the myplc together with their nodefamiliy + """ + print("---------------------------------------- nodes") + for node in self.apiserver.GetNodes(self.auth_root()): + print("{} -> {}".format(node['hostname'], + self.apiserver.GetNodeFlavour(self.auth_root(),node['hostname'])['nodefamily'])) + print("---------------------------------------- nodes") + + ### def mod_python(self): """yum install mod_python, useful on f18 and above so as to avoid broken wsgi""" @@ -897,7 +924,7 @@ class TestPlc: @staticmethod def translate_timestamp(start, grain, timestamp): if timestamp < TestPlc.YEAR: - return start+timestamp*grain + return start + timestamp*grain else: return timestamp @@ -910,7 +937,7 @@ class TestPlc: now = int(time.time()) grain = self.apiserver.GetLeaseGranularity(self.auth_root()) print('API answered grain=', grain) - start = (now/grain)*grain + start = (now//grain)*grain start += grain # find out all nodes that are reservable nodes = self.all_reservable_nodenames() @@ -927,7 +954,7 @@ class TestPlc: lease_spec['t_from'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_from']) lease_spec['t_until'] = TestPlc.translate_timestamp(start, grain, lease_spec['t_until']) lease_addition = self.apiserver.AddLeases(self.auth_root(), nodes, lease_spec['slice'], - lease_spec['t_from'],lease_spec['t_until']) + lease_spec['t_from'], lease_spec['t_until']) if lease_addition['errors']: utils.header("Cannot create leases, {}".format(lease_addition['errors'])) ok = False @@ -1100,7 +1127,7 @@ class TestPlc: return True # probing nodes - def check_nodes_ping(self, timeout_seconds=30, period_seconds=10): + def check_nodes_ping(self, timeout_seconds=60, period_seconds=10): class CompleterTaskPingNode(CompleterTask): def __init__(self, hostname): self.hostname = hostname @@ -1165,15 +1192,33 @@ class TestPlc: @node_mapper def qemu_local_config(self): pass @node_mapper + def qemu_export(self): pass + @node_mapper def nodestate_reinstall(self): pass @node_mapper + def nodestate_upgrade(self): pass + @node_mapper def nodestate_safeboot(self): pass @node_mapper def nodestate_boot(self): pass @node_mapper def nodestate_show(self): pass @node_mapper - def qemu_export(self): pass + def nodedistro_f14(self): pass + @node_mapper + def nodedistro_f18(self): pass + @node_mapper + def nodedistro_f20(self): pass + @node_mapper + def nodedistro_f21(self): pass + @node_mapper + def nodeflavour_show(self): pass + @node_mapper + def nodeplain_on(self): pass + @node_mapper + def nodeplain_off(self): pass + @node_mapper + def nodeplain_show(self): pass ### check hooks : invoke scripts from hooks/{node,slice} def check_hooks_node(self): @@ -1295,6 +1340,9 @@ class TestPlc: def keys_clear_known_hosts(self): pass def plcapi_urls(self): + """ + attempts to reach the PLCAPI with various forms for the URL + """ return PlcapiUrlScanner(self.auth_root(), ip=self.vserverip).scan() def speed_up_slices(self): @@ -1337,6 +1385,9 @@ class TestPlc: @node_mapper def qemu_timestamp(self) : pass + @node_mapper + def qemu_nodefamily(self): pass + # when a spec refers to a node possibly on another plc def locate_sliver_obj_cross(self, nodename, slicename, other_plcs): for plc in [ self ] + other_plcs: @@ -1720,6 +1771,8 @@ class TestPlc: @auth_sfa_mapper def sfa_provision_empty(self): pass @auth_sfa_mapper + def sfa_describe(self): pass + @auth_sfa_mapper def sfa_check_slice_plc(self): pass @auth_sfa_mapper def sfa_check_slice_plc_empty(self): pass