From: Thierry Parmentelat Date: Wed, 23 May 2012 09:35:56 +0000 (+0200) Subject: fix old bug about doc strings for mapped methods X-Git-Tag: tests-5.1-4~5 X-Git-Url: http://git.onelab.eu/?a=commitdiff_plain;h=bb0875631f86a2603b4e53191a7e88e242c453bf;hp=5aab1614c2ccc5dd76395a9613757bf4eec97c3f;p=tests.git fix old bug about doc strings for mapped methods trim TestPlc accordingly, have the doc where it belongs split sfa_view into sfa_list sfa_show and sfa_slices --- diff --git a/system/TestNode.py b/system/TestNode.py index 54ea416..548edbd 100644 --- a/system/TestNode.py +++ b/system/TestNode.py @@ -122,6 +122,7 @@ class TestNode: # Do most of the stuff locally - will be pushed on host_box - *not* the plc - later if needed def qemu_local_init(self): + "all nodes : init a clean local directory for holding node-dep stuff like iso image..." utils.system("rm -rf %s"%self.nodedir()) utils.system("mkdir %s"%self.nodedir()) if not self.is_qemu(): @@ -129,6 +130,7 @@ class TestNode: return utils.system("rsync -v -a --exclude .svn template-qemu/ %s/"%self.nodedir())==0 def bootcd(self): + "all nodes: invoke GetBootMedium and store result locally" utils.header("Calling GetBootMedium for %s"%self.name()) options = [] if self.is_qemu(): @@ -149,21 +151,25 @@ class TestNode: return True def nodestate_reinstall (self): + "all nodes: mark PLCAPI boot_state as reinstall" self.test_plc.apiserver.UpdateNode(self.test_plc.auth_root(), self.name(),{'boot_state':'reinstall'}) return True def nodestate_safeboot (self): + "all nodes: mark PLCAPI boot_state as safeboot" self.test_plc.apiserver.UpdateNode(self.test_plc.auth_root(), self.name(),{'boot_state':'safeboot'}) return True def nodestate_boot (self): + "all nodes: mark PLCAPI boot_state as boot" self.test_plc.apiserver.UpdateNode(self.test_plc.auth_root(), self.name(),{'boot_state':'boot'}) return True def nodestate_show (self): + "all nodes: show PLCAPI boot_state" if self.test_plc.options.dry_run: print "Dry_run: skipped getting current node state" return True @@ -172,6 +178,7 @@ class TestNode: return True def qemu_local_config(self): + "all nodes: compute qemu config qemu.conf and store it locally" if not self.is_qemu(): return mac=self.node_spec['interface_fields']['mac'] @@ -194,6 +201,7 @@ class TestNode: return True def qemu_export (self): + "all nodes: push local node-dep directory on the qemu box" # if relevant, push the qemu area onto the host box if self.test_box().is_local(): return True @@ -203,6 +211,7 @@ class TestNode: return self.test_box().copy(self.nodedir(),recursive=True)==0 def qemu_start (self): + "all nodes: start the qemu instance (also runs qemu-bridge-init start)" model=self.node_spec['node_fields']['model'] #starting the Qemu nodes before if self.is_qemu(): @@ -212,6 +221,7 @@ class TestNode: return True def timestamp_qemu (self): + "all nodes: start the qemu instance (also runs qemu-bridge-init start)" test_box = self.test_box() test_box.run_in_buildname("mkdir -p %s"%self.nodedir()) now=int(time.time()) @@ -248,6 +258,7 @@ class TestNode: self.test_box().test_ssh.fetch(remote_log,local_log) def keys_clear_known_hosts (self): + "remove test nodes entries from the local known_hosts file" TestSsh(self.name()).clear_known_hosts() return True diff --git a/system/TestPlc.py b/system/TestPlc.py index 9475d23..790c617 100644 --- a/system/TestPlc.py +++ b/system/TestPlc.py @@ -45,7 +45,7 @@ def node_mapper (method): if not node_method(test_node, *args, **kwds): overall=False return overall # restore the doc text - actual.__doc__=method.__doc__ + actual.__doc__=TestNode.__dict__[method.__name__].__doc__ return actual def slice_mapper (method): @@ -59,7 +59,7 @@ def slice_mapper (method): if not slice_method(test_slice,self.options): overall=False return overall # restore the doc text - actual.__doc__=method.__doc__ + actual.__doc__=TestSlice.__dict__[method.__name__].__doc__ return actual def slice_sfa_mapper (method): @@ -71,7 +71,7 @@ def slice_sfa_mapper (method): if not slice_method(test_slice,self.options): overall=False return overall # restore the doc text - actual.__doc__=method.__doc__ + actual.__doc__=TestSliceSfa.__dict__[method.__name__].__doc__ return actual SEP='' @@ -91,7 +91,8 @@ class TestPlc: 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA, 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA, 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA, - 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA, + 'sfa_update_user@1', 'sfa_update_slice@1', SEPSFA, + 'sfa_list@1', 'sfa_show@1', 'sfa_slices@1', 'sfa_utest@1', SEPSFA, # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot # but as the stress test might take a while, we sometimes missed the debug mode.. 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP, @@ -495,6 +496,7 @@ class TestPlc: # write a timestamp in /vservers/<>.timestamp # cannot be inside the vserver, that causes vserver .. build to cough def timestamp_vs (self): + "Create a timestamp to remember creation date for this plc" now=int(time.time()) # TODO-lxc check this one # a first approx. is to store the timestamp close to the VM root like vs does @@ -991,37 +993,21 @@ class TestPlc: return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38) @node_mapper - def qemu_local_init (self): - "all nodes : init a clean local directory for holding node-dep stuff like iso image..." - pass + def qemu_local_init (self): pass @node_mapper - def bootcd (self): - "all nodes: invoke GetBootMedium and store result locally" - pass + def bootcd (self): pass @node_mapper - def qemu_local_config (self): - "all nodes: compute qemu config qemu.conf and store it locally" - pass + def qemu_local_config (self): pass @node_mapper - def nodestate_reinstall (self): - "all nodes: mark PLCAPI boot_state as reinstall" - pass + def nodestate_reinstall (self): pass @node_mapper - def nodestate_safeboot (self): - "all nodes: mark PLCAPI boot_state as safeboot" - pass + def nodestate_safeboot (self): pass @node_mapper - def nodestate_boot (self): - "all nodes: mark PLCAPI boot_state as boot" - pass + def nodestate_boot (self): pass @node_mapper - def nodestate_show (self): - "all nodes: show PLCAPI boot_state" - pass + def nodestate_show (self): pass @node_mapper - def qemu_export (self): - "all nodes: push local node-dep directory on the qemu box" - pass + def qemu_export (self): pass ### check hooks : invoke scripts from hooks/{node,slice} def check_hooks_node (self): @@ -1107,19 +1093,12 @@ class TestPlc: return True @slice_mapper - def ssh_slice(self): - "tries to ssh-enter the slice with the user key, to ensure slice creation" - pass - + def ssh_slice(self): pass @slice_mapper - def ssh_slice_off (self): - "tries to ssh-enter the slice with the user key, expecting it to be unreachable" - pass + def ssh_slice_off (self): pass @node_mapper - def keys_clear_known_hosts (self): - "remove test nodes entries from the local known_hosts file" - pass + def keys_clear_known_hosts (self): pass def speed_up_slices (self): "tweak nodemanager settings on all nodes using a conf file" @@ -1139,14 +1118,10 @@ class TestPlc: return True @node_mapper - def qemu_start (self) : - "all nodes: start the qemu instance (also runs qemu-bridge-init start)" - pass + def qemu_start (self) : pass @node_mapper - def timestamp_qemu (self) : - "all nodes: start the qemu instance (also runs qemu-bridge-init start)" - pass + def timestamp_qemu (self) : pass def check_tcp (self): "check TCP connectivity between 2 slices (or in loopback if only one is defined)" @@ -1251,6 +1226,7 @@ class TestPlc: if first_try: return True utils.header ("********** Regular yum failed - special workaround in place, 2nd chance") (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm')) + utils.header("rpm_path=<<%s>>"%rpm_path) # just for checking self.run_in_guest("rpm -i %s"%cached_rpm_path) return self.yum_check_installed ("sfa-client") @@ -1416,7 +1392,7 @@ class TestPlc: utils.header("DRY RUN - skipping step") return True sfa_spec=self.plc_spec['sfa'] - # cannot use sfa_slice_mapper to pass dir_name + # cannot use slice_sfa_mapper to pass dir_name for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']: test_slice=TestSliceSfa(self,slice_spec) dir_basename=os.path.basename(test_slice.sfi_path()) @@ -1437,68 +1413,35 @@ class TestPlc: return True @slice_sfa_mapper - def sfa_add_site (self): - "bootstrap a site using sfaadmin" - pass - + def sfa_add_site (self): pass @slice_sfa_mapper - def sfa_add_pi (self): - "bootstrap a PI user for that site" - pass - + def sfa_add_pi (self): pass @slice_sfa_mapper - def sfa_add_user(self): - "run sfi.py add" - pass - + def sfa_add_user(self): pass @slice_sfa_mapper - def sfa_update_user(self): - "run sfi.py update" - + def sfa_update_user(self): pass @slice_sfa_mapper - def sfa_add_slice(self): - "run sfi.py add (on Registry) from slice.xml" - pass - + def sfa_add_slice(self): pass @slice_sfa_mapper - def sfa_discover(self): - "discover resources into resouces_in.rspec" - pass - + def sfa_discover(self): pass @slice_sfa_mapper - def sfa_create_slice(self): - "run sfi.py create (on SM) - 1st time" - pass - + def sfa_create_slice(self): pass @slice_sfa_mapper - def sfa_check_slice_plc(self): - "check sfa_create_slice at the plcs - all local nodes should be in slice" - pass - + def sfa_check_slice_plc(self): pass @slice_sfa_mapper - def sfa_update_slice(self): - "run sfi.py create (on SM) on existing object" - pass - + def sfa_update_slice(self): pass @slice_sfa_mapper - def sfa_view(self): - "various registry-related calls" - pass - + def sfa_list(self): pass @slice_sfa_mapper - def ssh_slice_sfa(self): - "tries to ssh-enter the SFA slice" - pass - + def sfa_show(self): pass @slice_sfa_mapper - def sfa_delete_user(self): - "run sfi.py delete" - pass - + def sfa_slices(self): pass @slice_sfa_mapper - def sfa_delete_slice(self): - "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices" - pass + def ssh_slice_sfa(self): pass + @slice_sfa_mapper + def sfa_delete_user(self): pass + @slice_sfa_mapper + def sfa_delete_slice(self): pass def sfa_stop(self): "service sfa stop" diff --git a/system/TestSlice.py b/system/TestSlice.py index 31bd70b..27d2901 100644 --- a/system/TestSlice.py +++ b/system/TestSlice.py @@ -108,10 +108,12 @@ class TestSlice: # trying to reach the slice through ssh - expected to answer def ssh_slice (self, options, *args, **kwds): + "tries to ssh-enter the slice with the user key, to ensure slice creation" return self.do_ssh_slice(options, expected=True, *args, **kwds) # when we expect the slice is not reachable def ssh_slice_off (self, options, *args, **kwds): + "tries to ssh-enter the slice with the user key, expecting it to be unreachable" return self.do_ssh_slice(options, expected=False, *args, **kwds) def do_ssh_slice(self,options,expected=True,timeout_minutes=20,silent_minutes=10,period=15): diff --git a/system/TestSliceSfa.py b/system/TestSliceSfa.py index b476150..7f357fc 100644 --- a/system/TestSliceSfa.py +++ b/system/TestSliceSfa.py @@ -30,10 +30,13 @@ class TestSliceSfa: def rspec_style (self): return self.sfa_slice_spec['rspec_style'] + # the hrn for the site + def auth_hrn (self): + return self.test_plc.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'] + # the hrn for the site def site_hrn (self): - return "%s.%s"%(self.test_plc.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'], - self.login_base) + return "%s.%s"%(self.auth_hrn(),self.login_base) # something in the site (users typically) def qualified_hrn (self, name): @@ -120,10 +123,12 @@ class TestSliceSfa: # using sfaadmin to bootstrap def sfa_add_site (self, options): + "bootstrap a site using sfaadmin" command="sfaadmin reg register -t authority -x %s"%self.site_hrn() return self.test_plc.run_in_guest(command)==0 def sfa_add_pi (self, options): + "bootstrap a PI user for that site" pi_hrn=self.qualified_hrn(self.piuser) pi_mail=self.sfa_slice_spec['pimail'] # as installed by sfi_config @@ -135,10 +140,13 @@ class TestSliceSfa: # user management def sfa_add_user (self, options): + "add a regular user using sfi.py add" return TestUserSfa(self.test_plc, self.sfa_slice_spec, self).add_user() def sfa_update_user (self, options): + "update a user record using sfi.py update" return TestUserSfa(self.test_plc, self.sfa_slice_spec, self).update_user() def sfa_delete_user (self, options): + "run sfi.py delete" return TestUserSfa(self.test_plc, self.sfa_slice_spec, self).delete_user() # run as pi @@ -149,25 +157,37 @@ class TestSliceSfa: return "sfi.py -d %s -u %s %s"%(self.sfi_path(),self.qualified_hrn(self.regularuser), command,) # those are step names exposed as methods of TestPlc, hence the _sfa - def sfa_view (self, options): - "run (as regular user) sfi list and sfi show (both on Registry) and sfi slices (on SM)" - root_auth=self.test_plc.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'] + + def sfa_list (self, options): + "run (as regular user) sfi list (on Registry)" + return \ + self.test_plc.run_in_guest(self.sfi_user("list -r %s"%self.auth_hrn()))==0 and \ + self.test_plc.run_in_guest(self.sfi_user("list %s"%(self.site_hrn())))==0 + + def sfa_show (self, options): + "run (as regular user) sfi show (on Registry)" + return \ + self.test_plc.run_in_guest(self.sfi_user("show %s"%(self.site_hrn())))==0 + + def sfa_slices (self, options): + "run (as regular user) sfi slices (on SM)" return \ - self.test_plc.run_in_guest(self.sfi_user("list %s"%(self.site_hrn())))==0 and \ - self.test_plc.run_in_guest(self.sfi_user("show %s"%(self.site_hrn())))==0 and \ - self.test_plc.run_in_guest(self.sfi_user("slices"))==0 + self.test_plc.run_in_guest(self.sfi_user("slices"))==0 # needs to be run as pi def sfa_add_slice(self,options): + "run sfi.py add (on Registry) from slice.xml" return self.test_plc.run_in_guest(self.sfi_pi("add %s"%(self.addslicefile())))==0 # run as user def sfa_discover(self,options): + "discover resources into resouces_in.rspec" return self.test_plc.run_in_guest(self.sfi_user(\ "resources %s -o %s/%s"% (self.discover_option(),self.sfi_path(),self.adfile())))==0 # run sfi create as a regular user def sfa_create_slice(self,options): + "run sfi.py create (on SM) - 1st time" commands=[ "sfiListNodes.py -i %s/%s -o %s/%s"%(self.sfi_path(),self.adfile(),self.sfi_path(),self.nodefile()), "sfiAddSliver.py -i %s/%s -n %s/%s -o %s/%s"%\ @@ -180,6 +200,7 @@ class TestSliceSfa: # all local nodes in slice ? def sfa_check_slice_plc (self,options): + "check sfa_create_slice at the plcs - all local nodes should be in slice" slice_fields = self.sfa_slice_spec['slice_fields'] slice_name = slice_fields['name'] slice=self.test_plc.apiserver.GetSlices(self.test_plc.auth_root(), slice_name)[0] @@ -195,15 +216,18 @@ class TestSliceSfa: # actually the same for now def sfa_update_slice(self,options): + "run sfi.py create (on SM) on existing object" return self.sfa_create_slice(options) # run as pi def sfa_delete_slice(self,options): + "run sfi.py delete" self.test_plc.run_in_guest(self.sfi_pi("delete %s"%(self.hrn(),))) return self.test_plc.run_in_guest(self.sfi_pi("remove -t slice %s"%(self.hrn(),)))==0 # check the resulting sliver def ssh_slice_sfa(self,options,timeout_minutes=40,silent_minutes=30,period=15): + "tries to ssh-enter the SFA slice" timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes) graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes) # locate a key diff --git a/system/macros.py b/system/macros.py index fac4d85..b6511fc 100644 --- a/system/macros.py +++ b/system/macros.py @@ -59,6 +59,12 @@ sequences['sfa_scratch'] = [ 'sfa_update_user', 'sfa_update_slice', 'sfa_view', 'sfa_utest', ] +sequences['sfa_view'] = [ + 'sfa-list', + 'sfa-show', + 'sfa-slices', +] + # something that can given to the nightly to prepare a standalone sfa setup # after what you'll want to tweak the config to point to a myplc some place else sequences['sfa_standalone'] = [ @@ -66,8 +72,8 @@ sequences['sfa_standalone'] = [ 'vs_delete', 'timestamp_vs', 'vs_create', - 'sfa_install_core', 'sfa_install_client', + 'sfa_install_core', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', @@ -76,8 +82,10 @@ sequences['sfa_standalone'] = [ 'sfa-add-pi', 'sfa-add-user', 'sfa-add-slice', + 'sfa-view', 'sfa-delete-slice', 'sfa-delete-user', + 'sfa-view', ]