X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=system%2FTestPlc.py;h=c62d5c4a638318b09022a89f0b0f34ffc3b3b3c5;hb=c6fa2d88257b561ce7180657cd6fdd32687fe089;hp=9f4799ee22d9d49a0b338325d72e497a76e877d7;hpb=e478b1a856e81956df833a668d218765622db71e;p=tests.git diff --git a/system/TestPlc.py b/system/TestPlc.py index 9f4799e..c62d5c4 100644 --- a/system/TestPlc.py +++ b/system/TestPlc.py @@ -84,10 +84,10 @@ SEP='' class TestPlc: default_steps = [ - 'display', 'local_pre', SEP, + 'display', 'resources_pre', SEP, 'delete_vs','create_vs','install', 'configure', 'start', SEP, 'fetch_keys', 'store_keys', 'clear_known_hosts', SEP, - 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', SEP, + 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP, 'reinstall_node', 'init_node','bootcd', 'configure_qemu', 'export_qemu', 'kill_all_qemus', 'start_node', SEP, # better use of time: do this now that the nodes are taking off @@ -100,13 +100,14 @@ class TestPlc: # 'setup_sfa', 'add_sfa', 'update_sfa', 'view_sfa', SEP, 'check_slice_sfa', 'delete_sfa', 'stop_sfa', SEP, 'check_tcp', 'check_hooks', SEP, - 'force_gather_logs', 'force_local_post', + 'force_gather_logs', 'force_resources_post', ] other_steps = [ - 'show_boxes', 'local_list','local_cleanup',SEP, + 'show_boxes', 'resources_list','resources_release','resources_release_plc','resources_release_qemu',SEP, 'stop', 'vs_start', SEP, 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP, 'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP, + 'clean_leases', 'list_leases', SEP, 'populate' , SEP, 'list_all_qemus', 'list_qemus', 'kill_qemus', SEP, 'db_dump' , 'db_restore', SEP, @@ -120,6 +121,16 @@ class TestPlc: def valid_step (step): return step != SEP + # turn off the sfa-related steps when build has skipped SFA + # this is originally for centos5 as recent SFAs won't build on this platformb + @staticmethod + def check_whether_build_has_sfa (rpms_url): + retcod=os.system ("curl --silent %s/ | grep -q sfa"%rpms_url) + # full builds are expected to return with 0 here + if retcod!=0: + TestPlc.default_steps = [ step for step in TestPlc.default_steps + if step.find('sfa') < 0 ] + def __init__ (self,plc_spec,options): self.plc_spec=plc_spec self.options=options @@ -313,8 +324,10 @@ class TestPlc: return True # entry point + always_display_keys=['PLC_WWW_HOST','nodes','sites',] def display_pass (self,passno): for (key,val) in self.plc_spec.iteritems(): + if not self.options.verbose and key not in TestPlc.always_display_keys: continue if passno == 2: if key == 'sites': for site in val: @@ -337,6 +350,7 @@ class TestPlc: def display_site_spec (self,site): print '+ ======== site',site['site_fields']['name'] for (k,v) in site.iteritems(): + if not self.options.verbose and k not in TestPlc.always_display_keys: continue if k=='nodes': if v: print '+ ','nodes : ', @@ -354,8 +368,8 @@ class TestPlc: elif k == 'address_fields': pass else: - print '+ ',k, - PrettyPrinter(indent=8,depth=2).pprint(v) + print '+ ', + utils.pprint(k,v) def display_initscript_spec (self,initscript): print '+ ======== initscript',initscript['initscript_fields']['name'] @@ -386,10 +400,11 @@ class TestPlc: print '+ ',k,v def display_node_spec (self,node): - print "+ node",node['name'],"host_box=",node['host_box'], + print "+ node=%s host_box=%s"%(node['name'],node['host_box']), print "hostname=",node['node_fields']['hostname'], print "ip=",node['interface_fields']['ip'] - + if self.options.verbose: + utils.pprint("node details",node,depth=3) # another entry point for just showing the boxes involved def display_mapping (self): @@ -411,25 +426,35 @@ class TestPlc: print '+\tqemu box %s'%node_spec['host_box'] print '+\thostname=%s'%node_spec['node_fields']['hostname'] - def local_pre (self): + def resources_pre (self): "run site-dependant pre-test script as defined in LocalTestResources" from LocalTestResources import local_resources return local_resources.step_pre(self) - def local_post (self): + def resources_post (self): "run site-dependant post-test script as defined in LocalTestResources" from LocalTestResources import local_resources return local_resources.step_post(self) - def local_list (self): + def resources_list (self): "run site-dependant list script as defined in LocalTestResources" from LocalTestResources import local_resources return local_resources.step_list(self) - def local_cleanup (self): - "run site-dependant cleanup script as defined in LocalTestResources" + def resources_release (self): + "run site-dependant release script as defined in LocalTestResources" + from LocalTestResources import local_resources + return local_resources.step_release(self) + + def resources_release_plc (self): + "run site-dependant release script as defined in LocalTestResources" from LocalTestResources import local_resources - return local_resources.step_cleanup(self) + return local_resources.step_release_plc(self) + + def resources_release_qemu (self): + "run site-dependant release script as defined in LocalTestResources" + from LocalTestResources import local_resources + return local_resources.step_release_qemu(self) def delete_vs(self): "vserver delete the test myplc" @@ -437,8 +462,12 @@ class TestPlc: return True ### install + # historically the build was being fetched by the tests + # now the build pushes itself as a subdir of the tests workdir + # so that the tests do not have to worry about extracting the build (svn, git, or whatever) def create_vs (self): "vserver creation (no install done)" + # push the local build/ dir to the testplc box if self.is_local(): # a full path for the local calls build_dir=os.path.dirname(sys.argv[0]) @@ -448,10 +477,9 @@ class TestPlc: else: # use a standard name - will be relative to remote buildname build_dir="build" - # run checkout in any case - would do an update if already exists - build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir) - if self.run_in_host(build_checkout) != 0: - return False + # remove for safety; do *not* mkdir first, otherwise we end up with build/build/ + self.test_ssh.rmdir(build_dir) + self.test_ssh.copy(build_dir,recursive=True) # the repo url is taken from arch-rpms-url # with the last step (i386) removed repo_url = self.options.arch_rpms_url @@ -481,7 +509,6 @@ class TestPlc: # workaround for getting pgsql8.2 on centos5 if self.options.fcdistro == "centos5": - self.run_in_guest("rpm -Uvh http://yum.pgsqlrpms.org/8.2/pgdg-centos-8.2-5.noarch.rpm") self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm") if self.options.personality == "linux32": @@ -632,6 +659,64 @@ class TestPlc: "delete nodegroups with PLCAPI" return self.do_nodegroups("delete") + YEAR = 365*24*3600 + @staticmethod + def translate_timestamp (start,timestamp): + if timestamp < TestPlc.YEAR: return start+timestamp + else: return timestamp + + @staticmethod + def timestamp_printable (timestamp): + return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp)) + + def leases(self): + "create leases (on reservable nodes only, use e.g. run -c default -c resa)" + now=int(time.time()) + grain=self.apiserver.GetLeaseGranularity(self.auth_root()) + round_time=(now/grain)*grain + start=round_time+grain + # find out all nodes that are reservable + nodes=self.all_reservable_nodenames() + if not nodes: + utils.header ("No reservable node found - proceeding without leases") + return True + ok=True + # attach them to the leases as specified in plc_specs + # this is where the 'leases' field gets interpreted as relative of absolute + for lease_spec in self.plc_spec['leases']: + # skip the ones that come with a null slice id + if not lease_spec['slice']: continue + lease_spec['t_from']=TestPlc.translate_timestamp(start,lease_spec['t_from']) + lease_spec['t_until']=TestPlc.translate_timestamp(start,lease_spec['t_until']) + lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes, + lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until']) + if lease_addition['errors']: + utils.header("Cannot create leases, %s"%lease_addition['errors']) + ok=False + else: + utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\ + (nodes,lease_spec['slice'], + lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']), + lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until']))) + + return ok + + def clean_leases (self): + "remove all leases in the myplc side" + lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())] + utils.header("Cleaning leases %r"%lease_ids) + self.apiserver.DeleteLeases(self.auth_root(),lease_ids) + return True + + def list_leases (self): + "list all leases known to the myplc" + leases = self.apiserver.GetLeases(self.auth_root()) + for l in leases: + utils.header("%s %s from %s until %s"%(l['hostname'],l['name'], + TestPlc.timestamp_printable(l['t_from']), + TestPlc.timestamp_printable(l['t_until']))) + return True + # create nodegroups if needed, and populate def do_nodegroups (self, action="add"): # 1st pass to scan contents @@ -703,10 +788,18 @@ class TestPlc: node_infos = [] for site_spec in self.plc_spec['sites']: node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \ - for node_spec in site_spec['nodes'] ] + for node_spec in site_spec['nodes'] ] return node_infos def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ] + def all_reservable_nodenames (self): + res=[] + for site_spec in self.plc_spec['sites']: + for node_spec in site_spec['nodes']: + node_fields=node_spec['node_fields'] + if 'node_type' in node_fields and node_fields['node_type']=='reservable': + res.append(node_fields['hostname']) + return res # silent_minutes : during the first minutes nothing gets printed def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):