From e40ef24f58e1a8190339fa7e43beb05f1034deeb Mon Sep 17 00:00:00 2001 From: Thierry Parmentelat Date: Thu, 14 May 2009 17:09:48 +0000 Subject: [PATCH] first very rough draft of the LocalTestResources config mechanism --- system/TestMain.py | 65 +++++-- system/TestMapper.py | 8 +- system/TestPlc.py | 168 +++++++++------- system/TestResources.py | 194 +++++++++++++++++++ system/TestResources.readme | 37 ++++ system/Trackers.py | 12 +- system/config_1default.py | 38 ---- system/config_1testqemus.py | 37 ---- system/config_1vnodes.py | 46 ----- system/config_1vplcs.py | 53 ----- system/{config_main.py => config_default.py} | 0 system/config_pdefault.py | 46 ----- system/config_ptestqemus.py | 37 ---- system/config_pvnodes.py | 46 ----- system/config_pvplcs.py | 56 ------ system/{runtest => run_log} | 0 16 files changed, 384 insertions(+), 459 deletions(-) create mode 100644 system/TestResources.py create mode 100644 system/TestResources.readme delete mode 100644 system/config_1default.py delete mode 100644 system/config_1testqemus.py delete mode 100644 system/config_1vnodes.py delete mode 100644 system/config_1vplcs.py rename system/{config_main.py => config_default.py} (100%) delete mode 100644 system/config_pdefault.py delete mode 100644 system/config_ptestqemus.py delete mode 100644 system/config_pvnodes.py delete mode 100644 system/config_pvplcs.py rename system/{runtest => run_log} (100%) diff --git a/system/TestMain.py b/system/TestMain.py index ba2e9a9..74dd39e 100755 --- a/system/TestMain.py +++ b/system/TestMain.py @@ -12,6 +12,10 @@ from TestPlc import TestPlc from TestSite import TestSite from TestNode import TestNode +# add $HOME in PYTHONPATH so we can import LocalTestResources.py +sys.path.append(os.environ['HOME']) +import LocalTestResources + class TestMain: subversion_id = "$Id$" @@ -24,10 +28,35 @@ class TestMain: self.path=os.path.dirname(sys.argv[0]) or "." os.chdir(self.path) - @staticmethod - def show_env (options, message): - utils.header (message) - utils.show_options("main options",options) + def show_env (self,options, message): + if self.options.verbose: + utils.header (message) + utils.show_options("main options",options) + + def init_steps(self): + self.steps_message=20*'x'+" Defaut steps are\n"+TestPlc.printable_steps(TestPlc.default_steps) + self.steps_message += "\n"+20*'x'+" Other useful steps are\n"+TestPlc.printable_steps(TestPlc.other_steps) + + def list_steps(self): + if not self.options.verbose: + print self.steps_message + else: + testplc_method_dict = __import__("TestPlc").__dict__['TestPlc'].__dict__ + scopes = [("Default steps",TestPlc.default_steps)] + if self.options.all_steps: + scopes.append ( ("Other steps",TestPlc.other_steps) ) + for (scope,steps) in scopes: + print '--------------------',scope + for step in [step for step in steps if TestPlc.valid_step(step)]: + stepname=step + if step.find("force_") == 0: + stepname=step.replace("force_","") + force=True + print '*',step,"\r",4*"\t", + try: + print testplc_method_dict[stepname].__doc__ + except: + print "*** no doc found" @staticmethod def optparse_list (option, opt, value, parser): @@ -37,8 +66,7 @@ class TestMain: setattr(parser.values,option.dest,value.split()) def run (self): - steps_message=20*'x'+" Defaut steps are\n"+TestPlc.printable_steps(TestPlc.default_steps) - steps_message += "\n"+20*'x'+" Other useful steps are\n"+TestPlc.printable_steps(TestPlc.other_steps) + self.init_steps() usage = """usage: %%prog [options] steps arch-rpms-url defaults to the last value used, as stored in arg-arch-rpms-url, no default @@ -51,7 +79,7 @@ ips_node, ips_plc and ips_qemu defaults to the last value used, as stored in arg steps refer to a method in TestPlc or to a step_* module === """%(TestMain.default_build_url,TestMain.default_config) - usage += steps_message + usage += self.steps_message parser=OptionParser(usage=usage,version=self.subversion_id) parser.add_option("-u","--url",action="store", dest="arch_rpms_url", help="URL of the arch-dependent RPMS area - for locating what to test") @@ -103,21 +131,16 @@ steps refer to a method in TestPlc or to a step_* module if self.options.quiet: self.options.verbose=False + # no step specified if len(self.args) == 0: - if self.options.all_steps: - self.options.steps=TestPlc.default_steps - elif self.options.dry_run: - self.options.steps=TestPlc.default_steps - elif self.options.list_steps: - print steps_message - sys.exit(1) - else: - print 'No step found (do you mean -a ? )' - print "Run %s --help for help"%sys.argv[0] - sys.exit(1) + self.options.steps=TestPlc.default_steps else: self.options.steps = self.args + if self.options.list_steps: + self.list_steps() + sys.exit(1) + # handle defaults and option persistence for (recname,filename,default) in ( ('build_url','arg-build-url',TestMain.default_build_url) , @@ -166,7 +189,7 @@ steps refer to a method in TestPlc or to a step_* module if isinstance(getattr(self.options,recname),list): getattr(self.options,recname).reverse() - if not self.options.quiet: + if self.options.verbose: utils.header('* Using %s = %s'%(recname,getattr(self.options,recname))) @@ -207,6 +230,10 @@ steps refer to a method in TestPlc or to a step_* module traceback.print_exc() print 'Cannot load config %s -- ignored'%modulename raise + + # run localize as defined by local_resources + all_plc_specs = LocalTestResources.local_resources.localize(all_plc_specs,self.options) + # remember plc IP address(es) if not specified ips_plc_file=open('arg-ips-plc','w') for plc_spec in all_plc_specs: diff --git a/system/TestMapper.py b/system/TestMapper.py index 8b9e915..faaeb00 100644 --- a/system/TestMapper.py +++ b/system/TestMapper.py @@ -35,7 +35,7 @@ class TestMapper: def apply_first_map (self, type, name, obj, maplist): for (map_pattern,rename_dict) in maplist: if utils.match (name,map_pattern): - if not self.options.quiet: + if self.options.verbose: utils.header("TestMapper/%s : applying rules '%s' on %s"%(type,map_pattern,name)) for (k,v) in rename_dict.iteritems(): # apply : separator @@ -45,20 +45,20 @@ class TestMapper: for step in path[:-1]: if not o.has_key(step): o[step]={} - if not self.options.quiet: + if self.options.verbose: utils.header ("WARNING : created step %s in path %s on %s %s"%( step,path,type,name)) o=o[step] # last step is the one for side-effect step=path[-1] - if not self.options.quiet: + if self.options.verbose: if not o.has_key(step): utils.header ("WARNING : inserting key %s for path %s on %s %s"%( step,path,type,name)) # apply formatting if '%s' found in the value if v.find('%s')>=0: v=v%obj[k] - if not self.options.quiet: + if self.options.verbose: print("TestMapper, rewriting %s: %s into %s"%(name,k,v)) o[step]=v # only apply first rule diff --git a/system/TestPlc.py b/system/TestPlc.py index 3c406aa..bd6b9ae 100644 --- a/system/TestPlc.py +++ b/system/TestPlc.py @@ -17,7 +17,6 @@ from TestSliver import TestSliver from TestBox import TestBox from TestSsh import TestSsh from TestApiserver import TestApiserver -from Trackers import TrackerPlc, TrackerQemu # step methods must take (self) and return a boolean (options is a member of the class) @@ -45,6 +44,8 @@ def node_mapper (method): test_node = TestNode (self,test_site,node_spec) if not node_method(test_node): overall=False return overall + # restore the doc text + actual.__doc__=method.__doc__ return actual def slice_mapper_options (method): @@ -57,6 +58,8 @@ def slice_mapper_options (method): test_slice=TestSlice(self,test_site,slice_spec) if not slice_method(test_slice,self.options): overall=False return overall + # restore the doc text + actual.__doc__=method.__doc__ return actual SEP='' @@ -64,27 +67,26 @@ SEP='' class TestPlc: default_steps = [ - 'display','trqemu_record','trqemu_free','uninstall','install','install_rpm', - 'configure', 'start', 'fetch_keys', SEP, - 'store_keys', 'clear_known_hosts', 'initscripts', SEP, - 'sites', 'nodes', 'slices', 'nodegroups', SEP, - 'init_node','bootcd', 'configure_qemu', 'export_qemu', - 'kill_all_qemus', 'reinstall_node','start_node', SEP, + 'display', 'local_pre', SEP, + 'delete','create','install', 'configure', 'start', SEP, + 'fetch_keys', 'store_keys', 'clear_known_hosts', SEP, + 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', SEP, + 'reinstall_node', 'init_node','bootcd', 'configure_qemu', 'export_qemu', + 'kill_all_qemus', 'start_node', SEP, # better use of time: do this now that the nodes are taking off 'plcsh_stress_test', SEP, 'nodes_ssh_debug', 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEP, - 'check_tcp', SEP, - 'check_sanity', SEP, - 'force_gather_logs', 'force_trplc_record','force_trplc_free', + 'check_tcp', 'check_sanity', SEP, + 'force_gather_logs', 'force_local_post', ] other_steps = [ - 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', 'vs_start', SEP, + 'fresh_install', 'stop', 'vs_start', SEP, 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP, - 'clean_sites', 'clean_nodes', - 'clean_slices', 'clean_keys', SEP, + 'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP, 'populate' , SEP, 'show_boxes', 'list_all_qemus', 'list_qemus', 'kill_qemus', SEP, - 'db_dump' , 'db_restore', 'trplc_cleanup','trqemu_cleanup','trackers_cleanup', SEP, + 'db_dump' , 'db_restore', SEP, + 'local_list','local_cleanup',SEP, 'standby_1 through 20', ] @@ -245,6 +247,7 @@ class TestPlc: # make this a valid step def kill_all_qemus(self): + "all qemu boxes: kill all running qemus (even of former runs)" # this is the brute force version, kill all qemus on that host box for (box,nodes) in self.gather_hostBoxes().iteritems(): # pass the first nodename, as we don't push template-qemu on testboxes @@ -277,6 +280,7 @@ class TestPlc: #################### display config def display (self): + "show test configuration after localization" self.display_pass (1) self.display_pass (2) return True @@ -380,60 +384,34 @@ class TestPlc: print '*\tqemu box %s'%node_spec['host_box'] print '*\thostname=%s'%node_spec['node_fields']['hostname'] - ### tracking - def trplc_record (self): - tracker = TrackerPlc(self.options) - tracker.record(self.test_ssh.hostname,self.vservername) - tracker.store() - return True - - def trplc_free (self): - tracker = TrackerPlc(self.options) - tracker.free() - tracker.store() - return True - - def trplc_cleanup (self): - tracker = TrackerPlc(self.options) - tracker.cleanup() - tracker.store() - return True - - def trqemu_record (self): - tracker=TrackerQemu(self.options) - for site_spec in self.plc_spec['sites']: - for node_spec in site_spec['nodes']: - tracker.record(node_spec['host_box'],self.options.buildname,node_spec['node_fields']['hostname']) - tracker.store() - return True - - def trqemu_free (self): - tracker=TrackerQemu(self.options) - for site_spec in self.plc_spec['sites']: - for node_spec in site_spec['nodes']: - tracker.free() - tracker.store() - return True - - def trqemu_cleanup (self): - tracker=TrackerQemu(self.options) - for site_spec in self.plc_spec['sites']: - for node_spec in site_spec['nodes']: - tracker.cleanup() - tracker.store() - return True - - def trackers_cleanup (self): - self.trqemu_cleanup() - self.trplc_cleanup() - return True - - def uninstall(self): + def local_pre (self): + "run site-dependant pre-test script as defined in LocalTestResources" + from LocalTestResources import local_resources + return local_resources.step_pre(self) + + def local_post (self): + "run site-dependant post-test script as defined in LocalTestResources" + from LocalTestResources import local_resources + return local_resources.step_post(self) + + def local_list (self): + "run site-dependant list script as defined in LocalTestResources" + from LocalTestResources import local_resources + return local_resources.step_list(self) + + def local_cleanup (self): + "run site-dependant cleanup script as defined in LocalTestResources" + from LocalTestResources import local_resources + return local_resources.step_cleanup(self) + + def delete(self): + "vserver delete the test myplc" self.run_in_host("vserver --silent %s delete"%self.vservername) return True ### install - def install(self): + def create (self): + "vserver creation (no install done)" if self.is_local(): # a full path for the local calls build_dir=os.path.dirname(sys.argv[0]) @@ -469,7 +447,8 @@ class TestPlc: return self.run_in_host(create_vserver) == 0 ### install_rpm - def install_rpm(self): + def install(self): + "yum install myplc, noderepo, and the plain bootstrapfs" if self.options.personality == "linux32": arch = "i386" elif self.options.personality == "linux64": @@ -483,6 +462,7 @@ class TestPlc: ### def configure(self): + "run plc-config-tty" tmpname='%s.plc-config-tty'%(self.name()) fileconf=open(tmpname,'w') for var in [ 'PLC_NAME', @@ -506,10 +486,12 @@ class TestPlc: return True def start(self): + "service plc start" self.run_in_guest('service plc start') return True def stop(self): + "service plc stop" self.run_in_guest('service plc stop') return True @@ -519,6 +501,7 @@ class TestPlc: # stores the keys from the config for further use def store_keys(self): + "stores test users ssh keys in keys/" for key_spec in self.plc_spec['keys']: TestKey(self,key_spec).store_key() return True @@ -529,6 +512,7 @@ class TestPlc: # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/ # for later direct access to the nodes def fetch_keys(self): + "gets ssh keys in /etc/planetlab/ and stores them locally in keys/" dir="./keys" if not os.path.isdir(dir): os.mkdir(dir) @@ -547,9 +531,11 @@ class TestPlc: return overall def sites (self): + "create sites with PLCAPI" return self.do_sites() def clean_sites (self): + "delete sites with PLCAPI" return self.do_sites(action="delete") def do_sites (self,action="add"): @@ -575,8 +561,10 @@ class TestPlc: self.apiserver.DeleteSite(self.auth_root(),site_id) def nodes (self): + "create nodes with PLCAPI" return self.do_nodes() def clean_nodes (self): + "delete nodes with PLCAPI" return self.do_nodes(action="delete") def do_nodes (self,action="add"): @@ -597,8 +585,10 @@ class TestPlc: return True def nodegroups (self): + "create nodegroups with PLCAPI" return self.do_nodegroups("add") def clean_nodegroups (self): + "delete nodegroups with PLCAPI" return self.do_nodegroups("delete") # create nodegroups if needed, and populate @@ -777,21 +767,33 @@ class TestPlc: return True def nodes_ssh_debug(self): + "Tries to ssh into nodes in debug mode with the debug ssh key" return self.check_nodes_ssh(debug=True,timeout_minutes=30,silent_minutes=10) def nodes_ssh_boot(self): + "Tries to ssh into nodes in production mode with the root ssh key" return self.check_nodes_ssh(debug=False,timeout_minutes=30,silent_minutes=10) @node_mapper - def init_node (self): pass + def init_node (self): + "all nodes : init a clean local directory for holding node-dep stuff like iso image..." + pass @node_mapper - def bootcd (self): pass + def bootcd (self): + "all nodes: invoke GetBootMedium and store result locally" + pass @node_mapper - def configure_qemu (self): pass + def configure_qemu (self): + "all nodes: compute qemu config qemu.conf and store it locally" + pass @node_mapper - def reinstall_node (self): pass + def reinstall_node (self): + "all nodes: mark PLCAPI boot_state as reinstall" + pass @node_mapper - def export_qemu (self): pass + def export_qemu (self): + "all nodes: push local node-dep directory on the qemu box" + pass ### check sanity : invoke scripts from qaapi/qa/tests/{node,slice} def check_sanity_node (self): @@ -800,6 +802,7 @@ class TestPlc: return self.locate_first_sliver().check_sanity() def check_sanity (self): + "runs unit tests in the node and slice contexts - see tests/qaapi/qa/tests/{node,slice}" return self.check_sanity_node() and self.check_sanity_sliver() ### initscripts @@ -821,15 +824,18 @@ class TestPlc: return overall def check_initscripts(self): - return self.do_check_initscripts() - + "check that the initscripts have triggered" + return self.do_check_initscripts() + def initscripts (self): + "create initscripts with PLCAPI" for initscript in self.plc_spec['initscripts']: utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript) self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields']) return True def clean_initscripts (self): + "delete initscripts with PLCAPI" for initscript in self.plc_spec['initscripts']: initscript_name = initscript['initscript_fields']['name'] print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name'])) @@ -842,9 +848,11 @@ class TestPlc: ### manage slices def slices (self): + "create slices with PLCAPI" return self.do_slices() def clean_slices (self): + "delete slices with PLCAPI" return self.do_slices("delete") def do_slices (self, action="add"): @@ -862,15 +870,22 @@ class TestPlc: return True @slice_mapper_options - def check_slice(self): pass + def check_slice(self): + "tries to ssh-enter the slice with the user key, to ensure slice creation" + pass @node_mapper - def clear_known_hosts (self): pass + def clear_known_hosts (self): + "remove test nodes entries from the local known_hosts file" + pass @node_mapper - def start_node (self) : pass + def start_node (self) : + "all nodes: start the qemu instance (also runs qemu-bridge-init start)" + pass def check_tcp (self): + "check TCP connectivity between 2 slices (or in loopback if only one is defined)" specs = self.plc_spec['tcp_test'] overall=True for spec in specs: @@ -888,6 +903,7 @@ class TestPlc: return overall def plcsh_stress_test (self): + "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents" # install the stress-test in the plc image location = "/usr/share/plc_api/plcsh_stress_test.py" remote="/vservers/%s/%s"%(self.vservername,location) @@ -902,6 +918,7 @@ class TestPlc: # in particular runs with --preserve (dont cleanup) and without --check # also it gets run twice, once with the --foreign option for creating fake foreign entries def populate (self): + "creates random entries in the PLCAPI" # install the stress-test in the plc image location = "/usr/share/plc_api/plcsh_stress_test.py" remote="/vservers/%s/%s"%(self.vservername,location) @@ -915,6 +932,7 @@ class TestPlc: return ( local and remote) def gather_logs (self): + "gets all possible logs from plc's/qemu node's/slice's for future reference" # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log./* # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log./* # (2) get all the nodes qemu log and store it as logs/node.qemu..log diff --git a/system/TestResources.py b/system/TestResources.py new file mode 100644 index 0000000..c7b343c --- /dev/null +++ b/system/TestResources.py @@ -0,0 +1,194 @@ +# +# $Id$ +# + +import sys + +import utils +from TestMapper import TestMapper +from TestPool import TestPoolQemu, TestPoolIP +from Trackers import TrackerPlc, TrackerQemu + +class TestResources (): + + # need more specialization, see an example in OnelabTestResources + + ########## + def localize (self,plcs,options): + try: + plcs = self.localize_qemus(plcs,options) + except: + print 'Could not localize qemus - exiting' + sys.exit(1) + try: + plcs = self.localize_nodes(plcs,options) + except: + print 'Could not localize nodes - exiting' + sys.exit(1) + try: + plcs = self.localize_plcs(plcs,options) + except: + print 'Could not localize plcs - exiting' + sys.exit(1) + return plcs + + def localize_qemus (self,plcs,options): + + # all plcs on the same vserver box + plc_box = self.plc_boxes()[0] + + # informative + label=options.personality.replace("linux","") + + node_map = [] + qemu_pool = TestPoolQemu (self.qemus_ip_pool(), options) + + for index in range(options.size): + index += 1 + if options.ips_qemu: + ip_or_hostname=options.ips_qemu.pop() + (hostname,ip,unused)=qemu_pool.locate_entry(ip_or_hostname) + else: + (hostname,ip,unused) = qemu_pool.next_free() + + node_map += [ ('node%d'%index, {'host_box':hostname},) ] + + mapper = {'plc': [ ('*' , {'hostname':plc_box, + 'PLC_DB_HOST':plc_box, + 'PLC_API_HOST':plc_box, + 'PLC_BOOT_HOST':plc_box, + 'PLC_WWW_HOST':plc_box, + 'name':'%s-'+label } ) + ], + 'node': node_map, + } + + return TestMapper(plcs,options).map(mapper) + + + def localize_nodes (self, plcs, options): + + ip_pool = TestPoolIP (self.nodes_ip_pool(),options) + network_dict = self.network_dict() + + test_mapper = TestMapper (plcs, options) + + all_nodenames = test_mapper.node_names() + maps = [] + for nodename in all_nodenames: + if options.ips_node: + ip_or_hostname=options.ips_node.pop() + print 'debug','in',ip_or_hostname,'out',ip_pool.locate_entry(ip_or_hostname) + (hostname,ip,mac)=ip_pool.locate_entry(ip_or_hostname) + else: + (hostname,ip,mac) = ip_pool.next_free() + utils.header('Attaching node %s to %s (%s)'%(nodename,hostname,ip)) + node_dict= {'node_fields:hostname':hostname, + 'interface_fields:ip':ip, + 'interface_fields:mac':mac, + } + + node_dict.update(network_dict) + maps.append ( ( nodename, node_dict) ) + + plc_map = [ ( '*' , { 'PLC_NET_DNS1' : network_dict [ 'interface_fields:dns1' ], + 'PLC_NET_DNS2' : network_dict [ 'interface_fields:dns2' ], } ) ] + + return test_mapper.map ({'node': maps, 'plc' : plc_map } ) + + + def localize_plcs (self,plcs,options): + + utils.header ("Turning configuration into a vserver-based one for onelab") + + ip_pool = TestPoolIP (self.plcs_ip_pool(),options) + + plc_counter=0 + for plc in plcs: + if options.ips_plc : + ip_or_hostname=options.ips_plc.pop() + (hostname,ip,mac)=ip_pool.locate_entry(ip_or_hostname) + if options.verbose: + utils.header("Using user-provided %s %s for plc %s"%( + hostname,ip_or_hostname,plc['name'])) + else: + (hostname,ip,mac)=ip_pool.next_free() + if options.verbose: + utils.header("Using auto-allocated %s %s for plc %s"%( + hostname,ip,plc['name'])) + + ### rewrite fields in plc + # compute a helpful vserver name - remove domain in hostname + simplehostname=hostname.split('.')[0] + vservername = options.buildname + if len(plcs) == 1 : + vservername = "%s-%s" % (vservername,simplehostname) + else: + plc_counter += 1 + vservername = "%s-%d-%s" % (vservername,plc_counter,simplehostname) + # apply + plc['vservername']=vservername + plc['vserverip']=ip + plc['name'] = "%s_%s"%(plc['name'],simplehostname) + utils.header("Attaching plc %s to vserver %s (%s)"%( + plc['name'],plc['vservername'],plc['vserverip'])) + for key in [ 'PLC_DB_HOST', 'PLC_API_HOST', 'PLC_WWW_HOST', 'PLC_BOOT_HOST',]: + plc[key] = hostname + + return plcs + + # as a plc step this should return a boolean + def step_pre (self,plc): + return self.trqemu_record (plc) and self.trqemu_free(plc) + + def step_post (self,plc): + return self.trplc_record (plc) and self.trplc_free(plc) + + def step_cleanup (self,plc): + return self.trqemu_cleanup(plc) and self.trplc_cleanup(plc) + + #################### + def trqemu_record (self,plc): + tracker=TrackerQemu(plc.options,instances=self.max_qemus()-1) + for site_spec in plc.plc_spec['sites']: + for node_spec in site_spec['nodes']: + tracker.record(node_spec['host_box'],plc.options.buildname,node_spec['node_fields']['hostname']) + tracker.store() + return True + + def trqemu_free (self,plc): + tracker=TrackerQemu(plc.options,instances=self.max_qemus()-1) + for site_spec in plc.plc_spec['sites']: + for node_spec in site_spec['nodes']: + tracker.free() + tracker.store() + return True + + ### + def trplc_record (self): + tracker = TrackerPlc(plc.options,instances=self.max_plcs()) + tracker.record(self.test_ssh.hostname,self.vservername) + tracker.store() + return True + + def trplc_free (self): + tracker = TrackerPlc(plc.options,instances=self.max_plcs()) + tracker.free() + tracker.store() + return True + + ### + def trqemu_cleanup (self,plc): + tracker=TrackerQemu(plc.options,instances=self.max_qemus()-1) + for site_spec in plc.plc_spec['sites']: + for node_spec in site_spec['nodes']: + tracker.cleanup() + tracker.store() + return True + + def trplc_cleanup (self,plc): + tracker = TrackerPlc(plc.options,instances=self.max_plcs()) + tracker.cleanup() + tracker.store() + return True + diff --git a/system/TestResources.readme b/system/TestResources.readme new file mode 100644 index 0000000..61bf652 --- /dev/null +++ b/system/TestResources.readme @@ -0,0 +1,37 @@ +Historically, all the site-dependant configuration was part of the svn +tree. + +However this turned out to be a wrong choice as the actual test h/w +configuration changes over time, so when you try to test e.g. a 4.2 +build in 2009, you get a very old description of the test hardware. + +So, here's the way out + +==================== loading local resources + +- the test code expects to find a module named LocalTestResources in $HOME +- and this module is expected to define the following stuff + +* one object named local_resources that implement the following methods + +* local_resources.localize (plcs,options) -> plcs +--> this is called on the plcs obtained after the configs are loaded, + and should return the altered plc + +* local_resources.local_pre (plc) -> boolean +--> this is the implementation of the local_pre step on TestPlc +does any required house cleaning, like turning down qemu instances or the like + +* local_resources.local_post (plc) -> boolean +--> this is the implementation of the local_post step on TestPlc + +local_pre and local_post are parts of the default set of steps + +* local_list (plc) -> boolean +* local_cleanup (plc) -> boolean + + +===================== template + +The TestResources class can be used as a template for implementing local_resources +See OnelabTestResources for how to use it diff --git a/system/Trackers.py b/system/Trackers.py index 6a15aa5..257c033 100644 --- a/system/Trackers.py +++ b/system/Trackers.py @@ -28,6 +28,14 @@ class Tracker: tracks=[] self.tracks = [track for track in tracks if track] + def list (self): + try: + contents=file(self.filename).read() + print "==>",self.filename,"<==" + print contents + except: + print "xxxxxxxxxxxx",self.filename,"not found" + def store (self): out = file(self.filename,'w') for track in self.tracks: @@ -45,7 +53,7 @@ class Tracker: self.tracks.append( track ) print "Recorded %s in tracker %s"%(track,self.filename) - # this actually stops the old instances to fit the number of instances + # this actually stops the old instances, so that the total fits in the number of instances def free (self): # number of instances to stop how_many=len(self.tracks)-self.instances @@ -91,7 +99,7 @@ class TrackerQemu (Tracker): DEFAULT_FILENAME=os.environ['HOME']+"/tracker-qemus" # how many concurrent plcs are we keeping alive - adjust with the IP pool size - DEFAULT_MAX_INSTANCES = 2 + DEFAULT_MAX_INSTANCES = 3 def __init__ (self,options,filename=None,instances=0): if not filename: filename=TrackerQemu.DEFAULT_FILENAME diff --git a/system/config_1default.py b/system/config_1default.py deleted file mode 100644 index e6d73dd..0000000 --- a/system/config_1default.py +++ /dev/null @@ -1,38 +0,0 @@ -# the defaults -import utils -import TestPlc - -# this default is for the OneLab test infrastructure - -def config (plc_specs, options): - - import config_main - plcs = config_main.config([],options) - if options.verbose: - print '======================================== AFTER main' - for plc in plcs: TestPlc.TestPlc.display_mapping_plc(plc) - print '========================================' - - import config_1testqemus - plcs = config_1testqemus.config (plcs,options) - if options.verbose: - print '======================================== AFTER testqemus' - for plc in plcs: TestPlc.TestPlc.display_mapping_plc(plc) - print '========================================' - - import config_1vnodes - plcs = config_1vnodes.config(plcs,options) - if options.verbose: - print '======================================== AFTER vnodes' - for plc in plcs: TestPlc.TestPlc.display_mapping_plc(plc) - print '========================================' - - import config_1vplcs - plcs = config_1vplcs.config (plcs,options) - if options.verbose: - print '======================================== AFTER vservers' - for plc in plcs: TestPlc.TestPlc.display_mapping_plc(plc) - print '========================================' - - return plcs - diff --git a/system/config_1testqemus.py b/system/config_1testqemus.py deleted file mode 100644 index 9ec075d..0000000 --- a/system/config_1testqemus.py +++ /dev/null @@ -1,37 +0,0 @@ -import sys - -from TestMapper import TestMapper -from TestPool import TestPoolQemu - -onelab_qemus_pool = [ ( 'testqemu%d.onelab.eu'%i, None, None) for i in range(1,4) ] - -def config (plcs, options): - - # all plcs on the same vserver box - plc_box ='testbox-plc.onelab.eu' - # informative - label=options.personality.replace("linux","") - - # all qemus on a unique pool of 64bits boxes - node_map = [] - qemu_pool = TestPoolQemu (onelab_qemus_pool,options) - for index in range(options.size): - index += 1 - if options.ips_qemu: - ip_or_hostname=options.ips_qemu.pop() - (hostname,ip,unused)=qemu_pool.locate_entry(ip_or_hostname) - else: - (hostname,ip,unused) = qemu_pool.next_free() - node_map += [ ('node%d'%index, {'host_box':hostname},) ] - - mapper = {'plc': [ ('*' , {'hostname':plc_box, - 'PLC_DB_HOST':plc_box, - 'PLC_API_HOST':plc_box, - 'PLC_BOOT_HOST':plc_box, - 'PLC_WWW_HOST':plc_box, - 'name':'%s-'+label } ) - ], - 'node': node_map, - } - - return TestMapper(plcs,options).map(mapper) diff --git a/system/config_1vnodes.py b/system/config_1vnodes.py deleted file mode 100644 index a3764a7..0000000 --- a/system/config_1vnodes.py +++ /dev/null @@ -1,46 +0,0 @@ -# map all nodes onto the avail. pool - -import utils -from TestMapper import TestMapper -from TestPool import TestPoolIP - -onelab_nodes_ip_pool = [ - ( 'vnode%02d.inria.fr'%i, - '138.96.255.%d'%(230+i), - '02:34:56:00:00:%02d'%i) for i in range(1,10) ] - -site_dict = { - 'interface_fields:gateway':'138.96.248.250', - 'interface_fields:network':'138.96.0.0', - 'interface_fields:broadcast':'138.96.255.255', - 'interface_fields:netmask':'255.255.0.0', - 'interface_fields:dns1': '138.96.0.10', - 'interface_fields:dns2': '138.96.0.11', -} - -def config (plcs, options): - - ip_pool = TestPoolIP (onelab_nodes_ip_pool,options) - test_mapper = TestMapper (plcs, options) - - all_nodenames = test_mapper.node_names() - maps = [] - for nodename in all_nodenames: - if options.ips_node: - ip_or_hostname=options.ips_node.pop() - (hostname,ip,mac)=ip_pool.locate_entry(ip_or_hostname) - else: - (hostname,ip,mac) = ip_pool.next_free() - utils.header('Attaching node %s to %s (%s)'%(nodename,hostname,ip)) - node_dict= {'node_fields:hostname':hostname, - 'interface_fields:ip':ip, - 'interface_fields:mac':mac, - } - - node_dict.update(site_dict) - maps.append ( ( nodename, node_dict) ) - - plc_map = [ ( '*' , { 'PLC_NET_DNS1' : site_dict [ 'interface_fields:dns1' ], - 'PLC_NET_DNS2' : site_dict [ 'interface_fields:dns2' ], } ) ] - - return test_mapper.map ({'node': maps, 'plc' : plc_map } ) diff --git a/system/config_1vplcs.py b/system/config_1vplcs.py deleted file mode 100644 index 4f68f59..0000000 --- a/system/config_1vplcs.py +++ /dev/null @@ -1,53 +0,0 @@ -import utils -import os.path -from TestPool import TestPoolIP - -# using vplc01 .. vplc15 - keep [16,17,18] for 4.2 and 19 and 20 for long-haul tests -onelab_plcs_ip_pool = [ - ( 'vplc%02d.inria.fr'%i, - '138.96.255.%d'%(200+i), - '02:34:56:00:ee:%02d'%i) for i in range(1,16) ] - -def config (plcs,options): - - utils.header ("Turning configuration into a vserver-based one for onelab") - - ip_pool = TestPoolIP (onelab_plcs_ip_pool,options) - - plc_counter=0 - for plc in plcs: - try: - if options.ips_plc : - ip_or_hostname=options.ips_plc.pop() - (hostname,ip,mac)=ip_pool.locate_entry(ip_or_hostname) - if not options.quiet: - utils.header("Using user-provided %s %s for plc %s"%( - hostname,ip_or_hostname,plc['name'])) - else: - (hostname,ip,mac)=ip_pool.next_free() - if not options.quiet: - utils.header("Using auto-allocated %s %s for plc %s"%( - hostname,ip,plc['name'])) - - ### rewrite fields in plc - # compute a helpful vserver name - remove domain in hostname - simplehostname=hostname.split('.')[0] - vservername = options.buildname - if len(plcs) == 1 : - vservername = "%s-%s" % (vservername,simplehostname) - else: - plc_counter += 1 - vservername = "%s-%d-%s" % (vservername,plc_counter,simplehostname) - # apply - plc['vservername']=vservername - plc['vserverip']=ip - plc['name'] = "%s_%s"%(plc['name'],simplehostname) - utils.header("Attaching plc %s to vserver %s (%s)"%( - plc['name'],plc['vservername'],plc['vserverip'])) - for key in [ 'PLC_DB_HOST', 'PLC_API_HOST', 'PLC_WWW_HOST', 'PLC_BOOT_HOST',]: - plc[key] = hostname - - except: - raise Exception('Cannot find an available IP for %s - exiting'%plc['name']) - - return plcs diff --git a/system/config_main.py b/system/config_default.py similarity index 100% rename from system/config_main.py rename to system/config_default.py diff --git a/system/config_pdefault.py b/system/config_pdefault.py deleted file mode 100644 index b85df36..0000000 --- a/system/config_pdefault.py +++ /dev/null @@ -1,46 +0,0 @@ -# the defaults -import utils -import TestPlc - -# this default is for the Princeton test infrastructure - -def config (plc_specs, options): - - import config_main - plcs = config_main.config([],options) - if options.verbose: - print '======================================== AFTER main' - for plc in plcs: TestPlc.TestPlc.display_mapping_plc(plc) - print '========================================' - - ### side-effects on global config (was for onelab.eu initially) - from TestMapper import TestMapper - main_mapper = TestMapper (plcs,options) - plc_map = [ ( '*', {'PLC_ROOT_USER' : 'root@test.planet-lab.org', - 'PLC_MAIL_ENABLED' : 'false', - }) ] - plcs = main_mapper.map ( { 'plc' : plc_map } ) - - import config_ptestqemus - plcs = config_ptestqemus.config (plcs,options) - if options.verbose: - print '======================================== AFTER testqemus' - for plc in plcs: TestPlc.TestPlc.display_mapping_plc(plc) - print '========================================' - - import config_pvnodes - plcs = config_pvnodes.config(plcs,options) - if options.verbose: - print '======================================== AFTER vnodes' - for plc in plcs: TestPlc.TestPlc.display_mapping_plc(plc) - print '========================================' - - import config_pvplcs - plcs = config_pvplcs.config (plcs,options) - if options.verbose: - print '======================================== AFTER vservers' - for plc in plcs: TestPlc.TestPlc.display_mapping_plc(plc) - print '========================================' - - return plcs - diff --git a/system/config_ptestqemus.py b/system/config_ptestqemus.py deleted file mode 100644 index cde7790..0000000 --- a/system/config_ptestqemus.py +++ /dev/null @@ -1,37 +0,0 @@ -import sys - -from TestMapper import TestMapper -from TestPool import TestPoolQemu - -princeton_qemus_pool = [ ( 'testqemu1.test.planet-lab.org', None, None ) ] - -def config (plcs, options): - - # all plcs on the same vserver box - plc_box ='testbox.test.planet-lab.org' - # informative - label=options.personality.replace("linux","") - - # all qemus on a unique pool of 64bits boxes - node_map = [] - qemu_pool = TestPoolQemu (princeton_qemus_pool,options) - for index in range(options.size): - index += 1 - if options.ips_qemu: - ip_or_hostname=options.ips_qemu.pop() - (hostname,ip,unused)=qemu_pool.locate_entry(ip_or_hostname) - else: - (hostname,ip,unused) = qemu_pool.next_free() - node_map += [ ('node%d'%index, {'host_box':hostname},) ] - - mapper = {'plc': [ ('*' , {'hostname':plc_box, - 'PLC_DB_HOST':plc_box, - 'PLC_API_HOST':plc_box, - 'PLC_BOOT_HOST':plc_box, - 'PLC_WWW_HOST':plc_box, - 'name':'%s-'+label } ) - ], - 'node': node_map, - } - - return TestMapper(plcs,options).map(mapper) diff --git a/system/config_pvnodes.py b/system/config_pvnodes.py deleted file mode 100644 index 98ab1f9..0000000 --- a/system/config_pvnodes.py +++ /dev/null @@ -1,46 +0,0 @@ -# map all nodes onto the avail. pool - -import utils -from TestMapper import TestMapper -from TestPool import TestPoolIP - -onelab_nodes_ip_pool = [ - ("node-01.test.planet-lab.org","128.112.139.44", "de:ad:be:ef:00:10"), - ("node-02.test.planet-lab.org","128.112.139.66", "de:ad:be:ef:00:20"), -] - -site_dict = { - 'interface_fields:gateway':'128.112.139.1', - 'interface_fields:network':'128.112.139.0', - 'interface_fields:broadcast':'128.112.139.127', - 'interface_fields:netmask':'255.255.255.128', - 'interface_fields:dns1': '128.112.136.10', - 'interface_fields:dns2': '128.112.136.12', -} - -def config (plcs, options): - - ip_pool = TestPoolIP (onelab_nodes_ip_pool,options) - test_mapper = TestMapper (plcs, options) - - all_nodenames = test_mapper.node_names() - maps = [] - for nodename in all_nodenames: - if options.ips_node: - ip_or_hostname=options.ips_node.pop() - (hostname,ip,mac)=ip_pool.locate_entry(ip_or_hostname) - else: - (hostname,ip,mac) = ip_pool.next_free() - utils.header('Attaching node %s to %s (%s)'%(nodename,hostname,ip)) - node_dict= {'node_fields:hostname':hostname, - 'interface_fields:ip':ip, - 'interface_fields:mac':mac, - } - - node_dict.update(site_dict) - maps.append ( ( nodename, node_dict) ) - - plc_map = [ ( '*' , { 'PLC_NET_DNS1' : site_dict [ 'interface_fields:dns1' ], - 'PLC_NET_DNS2' : site_dict [ 'interface_fields:dns2' ], } ) ] - - return test_mapper.map ({'node': maps, 'plc' : plc_map } ) diff --git a/system/config_pvplcs.py b/system/config_pvplcs.py deleted file mode 100644 index 318110a..0000000 --- a/system/config_pvplcs.py +++ /dev/null @@ -1,56 +0,0 @@ -import utils -import os.path -from TestPool import TestPoolIP - -# using vplc01 .. vplc15 - keep [16,17,18] for 4.2 and 19 and 20 for long-haul tests -princeton_plcs_ip_pool = [ - ("plc-01.test.planet-lab.org","128.112.139.34", "de:ad:be:ef:ff:01"), - ("plc-02.test.planet-lab.org","128.112.139.35", "de:ad:be:ef:ff:02"), - ("plc-03.test.planet-lab.org","128.112.139.36", "de:ad:be:ef:ff:03"), - ("plc-04.test.planet-lab.org","128.112.139.37", "de:ad:be:ef:ff:04"), - ("plc-05.test.planet-lab.org","128.112.139.41", "de:ad:be:ef:ff:05"), -] - -def config (plcs,options): - - utils.header ("Turning configuration into a vserver-based one for princeton") - - ip_pool = TestPoolIP (princeton_plcs_ip_pool,options) - - plc_counter=0 - for plc in plcs: - try: - if options.ips_plc : - ip_or_hostname=options.ips_plc.pop() - (hostname,ip,mac)=ip_pool.locate_entry(ip_or_hostname) - if not options.quiet: - utils.header("Using user-provided %s %s for plc %s"%( - hostname,ip_or_hostname,plc['name'])) - else: - (hostname,ip,mac)=ip_pool.next_free() - if not options.quiet: - utils.header("Using auto-allocated %s %s for plc %s"%( - hostname,ip,plc['name'])) - - ### rewrite fields in plc - # compute a helpful vserver name - remove domain in hostname - simplehostname=hostname.split('.')[0] - vservername = options.buildname - if len(plcs) == 1 : - vservername = "%s-%s" % (vservername,simplehostname) - else: - plc_counter += 1 - vservername = "%s-%d-%s" % (vservername,plc_counter,simplehostname) - # apply - plc['vservername']=vservername - plc['vserverip']=ip - plc['name'] = "%s_%s"%(plc['name'],simplehostname) - utils.header("Attaching plc %s to vserver %s (%s)"%( - plc['name'],plc['vservername'],plc['vserverip'])) - for key in [ 'PLC_DB_HOST', 'PLC_API_HOST', 'PLC_WWW_HOST', 'PLC_BOOT_HOST',]: - plc[key] = hostname - - except: - raise Exception('Cannot find an available IP for %s - exiting'%plc['name']) - - return plcs diff --git a/system/runtest b/system/run_log similarity index 100% rename from system/runtest rename to system/run_log -- 2.43.0