1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
10 from types import StringTypes
13 from Completer import Completer, CompleterTask
14 from TestSite import TestSite
15 from TestNode import TestNode, CompleterTaskNodeSsh
16 from TestUser import TestUser
17 from TestKey import TestKey
18 from TestSlice import TestSlice
19 from TestSliver import TestSliver
20 from TestBoxQemu import TestBoxQemu
21 from TestSsh import TestSsh
22 from TestApiserver import TestApiserver
23 from TestAuthSfa import TestAuthSfa
24 from PlcapiUrlScanner import PlcapiUrlScanner
26 has_sfa_cache_filename="sfa-cache"
28 # step methods must take (self) and return a boolean (options is a member of the class)
30 def standby(minutes,dry_run):
31 utils.header('Entering StandBy for %d mn'%minutes)
35 time.sleep(60*minutes)
38 def standby_generic (func):
40 minutes=int(func.__name__.split("_")[1])
41 return standby(minutes,self.options.dry_run)
44 def node_mapper (method):
45 def map_on_nodes(self,*args, **kwds):
47 node_method = TestNode.__dict__[method.__name__]
48 for test_node in self.all_nodes():
49 if not node_method(test_node, *args, **kwds): overall=False
51 # maintain __name__ for ignore_result
52 map_on_nodes.__name__=method.__name__
53 # restore the doc text
54 map_on_nodes.__doc__=TestNode.__dict__[method.__name__].__doc__
57 def slice_mapper (method):
58 def map_on_slices(self):
60 slice_method = TestSlice.__dict__[method.__name__]
61 for slice_spec in self.plc_spec['slices']:
62 site_spec = self.locate_site (slice_spec['sitename'])
63 test_site = TestSite(self,site_spec)
64 test_slice=TestSlice(self,test_site,slice_spec)
65 if not slice_method(test_slice,self.options): overall=False
67 # maintain __name__ for ignore_result
68 map_on_slices.__name__=method.__name__
69 # restore the doc text
70 map_on_slices.__doc__=TestSlice.__dict__[method.__name__].__doc__
73 # run a step but return True so that we can go on
74 def ignore_result (method):
76 # ssh_slice_ignore->ssh_slice
77 ref_name=method.__name__.replace('_ignore','').replace('force_','')
78 ref_method=TestPlc.__dict__[ref_name]
79 result=ref_method(self)
80 print "Actual (but ignored) result for %(ref_name)s is %(result)s"%locals()
81 return Ignored (result)
82 name=method.__name__.replace('_ignore','').replace('force_','')
83 ignoring.__name__=name
84 ignoring.__doc__="ignored version of " + name
87 # a variant that expects the TestSlice method to return a list of CompleterTasks that
88 # are then merged into a single Completer run to avoid wating for all the slices
89 # esp. useful when a test fails of course
90 # because we need to pass arguments we use a class instead..
91 class slice_mapper__tasks (object):
92 # could not get this to work with named arguments
93 def __init__ (self,timeout_minutes,silent_minutes,period_seconds):
94 self.timeout=timedelta(minutes=timeout_minutes)
95 self.silent=timedelta(minutes=silent_minutes)
96 self.period=timedelta(seconds=period_seconds)
97 def __call__ (self, method):
99 # compute augmented method name
100 method_name = method.__name__ + "__tasks"
101 # locate in TestSlice
102 slice_method = TestSlice.__dict__[ method_name ]
105 for slice_spec in self.plc_spec['slices']:
106 site_spec = self.locate_site (slice_spec['sitename'])
107 test_site = TestSite(self,site_spec)
108 test_slice=TestSlice(self,test_site,slice_spec)
109 tasks += slice_method (test_slice, self.options)
110 return Completer (tasks).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
111 # restore the doc text from the TestSlice method even if a bit odd
112 wrappee.__name__ = method.__name__
113 wrappee.__doc__ = slice_method.__doc__
116 def auth_sfa_mapper (method):
119 auth_method = TestAuthSfa.__dict__[method.__name__]
120 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
121 test_auth=TestAuthSfa(self,auth_spec)
122 if not auth_method(test_auth,self.options): overall=False
124 # restore the doc text
125 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
129 def __init__ (self,result):
139 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
140 'plc_install', 'plc_configure', 'plc_start', SEP,
141 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
142 'plcapi_urls','speed_up_slices', SEP,
143 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
144 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
145 # keep this our of the way for now
146 'check_vsys_defaults_ignore', SEP,
147 # run this first off so it's easier to re-run on another qemu box
148 'qemu_kill_mine', SEP,
149 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
150 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP,
151 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
152 'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
153 'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
154 'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
155 'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
156 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
157 'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
158 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
159 # but as the stress test might take a while, we sometimes missed the debug mode..
160 'probe_kvm_iptables',
161 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
162 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts_ignore', SEP,
163 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
164 'cross_check_tcp@1', 'check_system_slice', SEP,
165 # check slices are turned off properly
166 'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
167 # check they are properly re-created with the same name
168 'fill_slices', 'ssh_slice_again_ignore', SEP,
169 'gather_logs_force', SEP,
172 'export', 'show_boxes', SEP,
173 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
174 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
175 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
176 'delete_leases', 'list_leases', SEP,
178 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
179 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
180 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
181 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
182 'plc_db_dump' , 'plc_db_restore', SEP,
183 'check_netflow','check_drl', SEP,
184 'debug_nodemanager', 'slice_fs_present', SEP,
185 'standby_1_through_20','yes','no',SEP,
189 def printable_steps (list):
190 single_line=" ".join(list)+" "
191 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
193 def valid_step (step):
194 return step != SEP and step != SEPSFA
196 # turn off the sfa-related steps when build has skipped SFA
197 # this was originally for centos5 but is still valid
198 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
200 def _has_sfa_cached (rpms_url):
201 if os.path.isfile(has_sfa_cache_filename):
202 cached=file(has_sfa_cache_filename).read()=="yes"
203 utils.header("build provides SFA (cached):%s"%cached)
205 # warning, we're now building 'sface' so let's be a bit more picky
206 # full builds are expected to return with 0 here
207 utils.header ("Checking if build provides SFA package...")
208 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)==0
209 encoded='yes' if retcod else 'no'
210 file(has_sfa_cache_filename,'w').write(encoded)
214 def check_whether_build_has_sfa (rpms_url):
215 has_sfa=TestPlc._has_sfa_cached(rpms_url)
217 utils.header("build does provide SFA")
219 # move all steps containing 'sfa' from default_steps to other_steps
220 utils.header("SFA package not found - removing steps with sfa or sfi")
221 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
222 TestPlc.other_steps += sfa_steps
223 for step in sfa_steps: TestPlc.default_steps.remove(step)
225 def __init__ (self,plc_spec,options):
226 self.plc_spec=plc_spec
228 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
229 self.vserverip=plc_spec['vserverip']
230 self.vservername=plc_spec['vservername']
231 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
232 self.apiserver=TestApiserver(self.url,options.dry_run)
233 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
234 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
236 def has_addresses_api (self):
237 return self.apiserver.has_method('AddIpAddress')
240 name=self.plc_spec['name']
241 return "%s.%s"%(name,self.vservername)
244 return self.plc_spec['host_box']
247 return self.test_ssh.is_local()
249 # define the API methods on this object through xmlrpc
250 # would help, but not strictly necessary
254 def actual_command_in_guest (self,command, backslash=False):
255 raw1=self.host_to_guest(command)
256 raw2=self.test_ssh.actual_command(raw1,dry_run=self.options.dry_run, backslash=backslash)
259 def start_guest (self):
260 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),dry_run=self.options.dry_run))
262 def stop_guest (self):
263 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),dry_run=self.options.dry_run))
265 def run_in_guest (self,command,backslash=False):
266 raw=self.actual_command_in_guest(command,backslash)
267 return utils.system(raw)
269 def run_in_host (self,command):
270 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
272 # backslashing turned out so awful at some point that I've turned off auto-backslashing
273 # see e.g. plc_start esp. the version for f14
274 #command gets run in the plc's vm
275 def host_to_guest(self,command):
276 # f14 still needs some extra help
277 if self.options.fcdistro == 'f14':
278 raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %s" %(self.vservername,command)
280 raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env %s" %(self.vservername,command)
283 # this /vservers thing is legacy...
284 def vm_root_in_host(self):
285 return "/vservers/%s/"%(self.vservername)
287 def vm_timestamp_path (self):
288 return "/vservers/%s/%s.timestamp"%(self.vservername,self.vservername)
290 #start/stop the vserver
291 def start_guest_in_host(self):
292 return "virsh -c lxc:/// start %s"%(self.vservername)
294 def stop_guest_in_host(self):
295 return "virsh -c lxc:/// destroy %s"%(self.vservername)
298 def run_in_guest_piped (self,local,remote):
299 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
301 def yum_check_installed (self, rpms):
302 if isinstance (rpms, list):
304 return self.run_in_guest("rpm -q %s"%rpms)==0
306 # does a yum install in the vs, ignore yum retcod, check with rpm
307 def yum_install (self, rpms):
308 if isinstance (rpms, list):
310 self.run_in_guest("yum -y install %s"%rpms)
311 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
312 self.run_in_guest("yum-complete-transaction -y")
313 return self.yum_check_installed (rpms)
315 def auth_root (self):
316 return {'Username':self.plc_spec['settings']['PLC_ROOT_USER'],
317 'AuthMethod':'password',
318 'AuthString':self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
319 'Role' : self.plc_spec['role']
321 def locate_site (self,sitename):
322 for site in self.plc_spec['sites']:
323 if site['site_fields']['name'] == sitename:
325 if site['site_fields']['login_base'] == sitename:
327 raise Exception,"Cannot locate site %s"%sitename
329 def locate_node (self,nodename):
330 for site in self.plc_spec['sites']:
331 for node in site['nodes']:
332 if node['name'] == nodename:
334 raise Exception,"Cannot locate node %s"%nodename
336 def locate_hostname (self,hostname):
337 for site in self.plc_spec['sites']:
338 for node in site['nodes']:
339 if node['node_fields']['hostname'] == hostname:
341 raise Exception,"Cannot locate hostname %s"%hostname
343 def locate_key (self,key_name):
344 for key in self.plc_spec['keys']:
345 if key['key_name'] == key_name:
347 raise Exception,"Cannot locate key %s"%key_name
349 def locate_private_key_from_key_names (self, key_names):
350 # locate the first avail. key
352 for key_name in key_names:
353 key_spec=self.locate_key(key_name)
354 test_key=TestKey(self,key_spec)
355 publickey=test_key.publicpath()
356 privatekey=test_key.privatepath()
357 if os.path.isfile(publickey) and os.path.isfile(privatekey):
359 if found: return privatekey
362 def locate_slice (self, slicename):
363 for slice in self.plc_spec['slices']:
364 if slice['slice_fields']['name'] == slicename:
366 raise Exception,"Cannot locate slice %s"%slicename
368 def all_sliver_objs (self):
370 for slice_spec in self.plc_spec['slices']:
371 slicename = slice_spec['slice_fields']['name']
372 for nodename in slice_spec['nodenames']:
373 result.append(self.locate_sliver_obj (nodename,slicename))
376 def locate_sliver_obj (self,nodename,slicename):
377 (site,node) = self.locate_node(nodename)
378 slice = self.locate_slice (slicename)
380 test_site = TestSite (self, site)
381 test_node = TestNode (self, test_site,node)
382 # xxx the slice site is assumed to be the node site - mhh - probably harmless
383 test_slice = TestSlice (self, test_site, slice)
384 return TestSliver (self, test_node, test_slice)
386 def locate_first_node(self):
387 nodename=self.plc_spec['slices'][0]['nodenames'][0]
388 (site,node) = self.locate_node(nodename)
389 test_site = TestSite (self, site)
390 test_node = TestNode (self, test_site,node)
393 def locate_first_sliver (self):
394 slice_spec=self.plc_spec['slices'][0]
395 slicename=slice_spec['slice_fields']['name']
396 nodename=slice_spec['nodenames'][0]
397 return self.locate_sliver_obj(nodename,slicename)
399 # all different hostboxes used in this plc
400 def get_BoxNodes(self):
401 # maps on sites and nodes, return [ (host_box,test_node) ]
403 for site_spec in self.plc_spec['sites']:
404 test_site = TestSite (self,site_spec)
405 for node_spec in site_spec['nodes']:
406 test_node = TestNode (self, test_site, node_spec)
407 if not test_node.is_real():
408 tuples.append( (test_node.host_box(),test_node) )
409 # transform into a dict { 'host_box' -> [ test_node .. ] }
411 for (box,node) in tuples:
412 if not result.has_key(box):
415 result[box].append(node)
418 # a step for checking this stuff
419 def show_boxes (self):
420 'print summary of nodes location'
421 for (box,nodes) in self.get_BoxNodes().iteritems():
422 print box,":"," + ".join( [ node.name() for node in nodes ] )
425 # make this a valid step
426 def qemu_kill_all(self):
427 'kill all qemu instances on the qemu boxes involved by this setup'
428 # this is the brute force version, kill all qemus on that host box
429 for (box,nodes) in self.get_BoxNodes().iteritems():
430 # pass the first nodename, as we don't push template-qemu on testboxes
431 nodedir=nodes[0].nodedir()
432 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
435 # make this a valid step
436 def qemu_list_all(self):
437 'list all qemu instances on the qemu boxes involved by this setup'
438 for (box,nodes) in self.get_BoxNodes().iteritems():
439 # this is the brute force version, kill all qemus on that host box
440 TestBoxQemu(box,self.options.buildname).qemu_list_all()
443 # kill only the qemus related to this test
444 def qemu_list_mine(self):
445 'list qemu instances for our nodes'
446 for (box,nodes) in self.get_BoxNodes().iteritems():
447 # the fine-grain version
452 # kill only the qemus related to this test
453 def qemu_clean_mine(self):
454 'cleanup (rm -rf) qemu instances for our nodes'
455 for (box,nodes) in self.get_BoxNodes().iteritems():
456 # the fine-grain version
461 # kill only the right qemus
462 def qemu_kill_mine(self):
463 'kill the qemu instances for our nodes'
464 for (box,nodes) in self.get_BoxNodes().iteritems():
465 # the fine-grain version
470 #################### display config
472 "show test configuration after localization"
477 # uggly hack to make sure 'run export' only reports about the 1st plc
478 # to avoid confusion - also we use 'inri_slice1' in various aliases..
481 "print cut'n paste-able stuff to export env variables to your shell"
482 # guess local domain from hostname
483 if TestPlc.exported_id>1:
484 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
486 TestPlc.exported_id+=1
487 domain=socket.gethostname().split('.',1)[1]
488 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
489 print "export BUILD=%s"%self.options.buildname
490 print "export PLCHOSTLXC=%s"%fqdn
491 print "export GUESTNAME=%s"%self.plc_spec['vservername']
492 vplcname=self.plc_spec['vservername'].split('-')[-1]
493 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
494 # find hostname of first node
495 (hostname,qemubox) = self.all_node_infos()[0]
496 print "export KVMHOST=%s.%s"%(qemubox,domain)
497 print "export NODE=%s"%(hostname)
501 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
502 def show_pass (self,passno):
503 for (key,val) in self.plc_spec.iteritems():
504 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
508 self.display_site_spec(site)
509 for node in site['nodes']:
510 self.display_node_spec(node)
511 elif key=='initscripts':
512 for initscript in val:
513 self.display_initscript_spec (initscript)
516 self.display_slice_spec (slice)
519 self.display_key_spec (key)
521 if key not in ['sites','initscripts','slices','keys']:
522 print '+ ',key,':',val
524 def display_site_spec (self,site):
525 print '+ ======== site',site['site_fields']['name']
526 for (k,v) in site.iteritems():
527 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
530 print '+ ','nodes : ',
532 print node['node_fields']['hostname'],'',
538 print user['name'],'',
540 elif k == 'site_fields':
541 print '+ login_base',':',v['login_base']
542 elif k == 'address_fields':
548 def display_initscript_spec (self,initscript):
549 print '+ ======== initscript',initscript['initscript_fields']['name']
551 def display_key_spec (self,key):
552 print '+ ======== key',key['key_name']
554 def display_slice_spec (self,slice):
555 print '+ ======== slice',slice['slice_fields']['name']
556 for (k,v) in slice.iteritems():
569 elif k=='slice_fields':
570 print '+ fields',':',
571 print 'max_nodes=',v['max_nodes'],
576 def display_node_spec (self,node):
577 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
578 print "hostname=",node['node_fields']['hostname'],
579 print "ip=",node['interface_fields']['ip']
580 if self.options.verbose:
581 utils.pprint("node details",node,depth=3)
583 # another entry point for just showing the boxes involved
584 def display_mapping (self):
585 TestPlc.display_mapping_plc(self.plc_spec)
589 def display_mapping_plc (plc_spec):
590 print '+ MyPLC',plc_spec['name']
591 # WARNING this would not be right for lxc-based PLC's - should be harmless though
592 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
593 print '+\tIP = %s/%s'%(plc_spec['settings']['PLC_API_HOST'],plc_spec['vserverip'])
594 for site_spec in plc_spec['sites']:
595 for node_spec in site_spec['nodes']:
596 TestPlc.display_mapping_node(node_spec)
599 def display_mapping_node (node_spec):
600 print '+ NODE %s'%(node_spec['name'])
601 print '+\tqemu box %s'%node_spec['host_box']
602 print '+\thostname=%s'%node_spec['node_fields']['hostname']
604 # write a timestamp in /vservers/<>.timestamp
605 # cannot be inside the vserver, that causes vserver .. build to cough
606 def plcvm_timestamp (self):
607 "Create a timestamp to remember creation date for this plc"
609 # TODO-lxc check this one
610 # a first approx. is to store the timestamp close to the VM root like vs does
611 stamp_path=self.vm_timestamp_path ()
612 stamp_dir = os.path.dirname (stamp_path)
613 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
614 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
616 # this is called inconditionnally at the beginning of the test sequence
617 # just in case this is a rerun, so if the vm is not running it's fine
618 def plcvm_delete(self):
619 "vserver delete the test myplc"
620 stamp_path=self.vm_timestamp_path()
621 self.run_in_host("rm -f %s"%stamp_path)
622 self.run_in_host("virsh -c lxc:// destroy %s"%self.vservername)
623 self.run_in_host("virsh -c lxc:// undefine %s"%self.vservername)
624 self.run_in_host("rm -fr /vservers/%s"%self.vservername)
628 # historically the build was being fetched by the tests
629 # now the build pushes itself as a subdir of the tests workdir
630 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
631 def plcvm_create (self):
632 "vserver creation (no install done)"
633 # push the local build/ dir to the testplc box
635 # a full path for the local calls
636 build_dir=os.path.dirname(sys.argv[0])
637 # sometimes this is empty - set to "." in such a case
638 if not build_dir: build_dir="."
639 build_dir += "/build"
641 # use a standard name - will be relative to remote buildname
643 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
644 self.test_ssh.rmdir(build_dir)
645 self.test_ssh.copy(build_dir,recursive=True)
646 # the repo url is taken from arch-rpms-url
647 # with the last step (i386) removed
648 repo_url = self.options.arch_rpms_url
649 for level in [ 'arch' ]:
650 repo_url = os.path.dirname(repo_url)
652 # invoke initvm (drop support for vs)
653 script="lbuild-initvm.sh"
655 # pass the vbuild-nightly options to [lv]test-initvm
656 script_options += " -p %s"%self.options.personality
657 script_options += " -d %s"%self.options.pldistro
658 script_options += " -f %s"%self.options.fcdistro
659 script_options += " -r %s"%repo_url
660 vserver_name = self.vservername
662 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
663 script_options += " -n %s"%vserver_hostname
665 print "Cannot reverse lookup %s"%self.vserverip
666 print "This is considered fatal, as this might pollute the test results"
668 create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s"%locals()
669 return self.run_in_host(create_vserver) == 0
672 def plc_install(self):
673 "yum install myplc, noderepo, and the plain bootstrapfs"
675 # workaround for getting pgsql8.2 on centos5
676 if self.options.fcdistro == "centos5":
677 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
680 if self.options.personality == "linux32":
682 elif self.options.personality == "linux64":
685 raise Exception, "Unsupported personality %r"%self.options.personality
686 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
689 pkgs_list.append ("slicerepo-%s"%nodefamily)
690 pkgs_list.append ("myplc")
691 pkgs_list.append ("noderepo-%s"%nodefamily)
692 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
693 pkgs_string=" ".join(pkgs_list)
694 return self.yum_install (pkgs_list)
697 def mod_python(self):
698 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
699 return self.yum_install ( [ 'mod_python' ] )
702 def plc_configure(self):
704 tmpname='%s.plc-config-tty'%(self.name())
705 fileconf=open(tmpname,'w')
706 for (var,value) in self.plc_spec['settings'].iteritems():
707 fileconf.write ('e %s\n%s\n'%(var,value))
708 fileconf.write('w\n')
709 fileconf.write('q\n')
711 utils.system('cat %s'%tmpname)
712 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
713 utils.system('rm %s'%tmpname)
716 # f14 is a bit odd in this respect, although this worked fine in guests up to f18
717 # however using a vplc guest under f20 requires this trick
718 # the symptom is this: service plc start
719 # Starting plc (via systemctl): Failed to get D-Bus connection: \
720 # Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
721 # weird thing is the doc says f14 uses upstart by default and not systemd
722 # so this sounds kind of harmless
723 def start_service (self,service): return self.start_stop_service (service,'start')
724 def stop_service (self,service): return self.start_stop_service (service,'stop')
726 def start_stop_service (self, service,start_or_stop):
727 "utility to start/stop a service with the special trick for f14"
728 if self.options.fcdistro != 'f14':
729 return self.run_in_guest ("service %s %s"%(service,start_or_stop))==0
731 # patch /sbin/service so it does not reset environment
732 self.run_in_guest ('sed -i -e \\"s,env -i,env,\\" /sbin/service')
733 # this is because our own scripts in turn call service
734 return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service %s %s"%(service,start_or_stop))==0
738 return self.start_service ('plc')
742 return self.stop_service ('plc')
744 def plcvm_start (self):
745 "start the PLC vserver"
749 def plcvm_stop (self):
750 "stop the PLC vserver"
754 # stores the keys from the config for further use
755 def keys_store(self):
756 "stores test users ssh keys in keys/"
757 for key_spec in self.plc_spec['keys']:
758 TestKey(self,key_spec).store_key()
761 def keys_clean(self):
762 "removes keys cached in keys/"
763 utils.system("rm -rf ./keys")
766 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
767 # for later direct access to the nodes
768 def keys_fetch(self):
769 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
771 if not os.path.isdir(dir):
773 vservername=self.vservername
774 vm_root=self.vm_root_in_host()
776 prefix = 'debug_ssh_key'
777 for ext in [ 'pub', 'rsa' ] :
778 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
779 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
780 if self.test_ssh.fetch(src,dst) != 0: overall=False
784 "create sites with PLCAPI"
785 return self.do_sites()
787 def delete_sites (self):
788 "delete sites with PLCAPI"
789 return self.do_sites(action="delete")
791 def do_sites (self,action="add"):
792 for site_spec in self.plc_spec['sites']:
793 test_site = TestSite (self,site_spec)
794 if (action != "add"):
795 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
796 test_site.delete_site()
797 # deleted with the site
798 #test_site.delete_users()
801 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
802 test_site.create_site()
803 test_site.create_users()
806 def delete_all_sites (self):
807 "Delete all sites in PLC, and related objects"
808 print 'auth_root',self.auth_root()
809 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
811 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
812 if site['login_base']==self.plc_spec['settings']['PLC_SLICE_PREFIX']: continue
813 site_id=site['site_id']
814 print 'Deleting site_id',site_id
815 self.apiserver.DeleteSite(self.auth_root(),site_id)
819 "create nodes with PLCAPI"
820 return self.do_nodes()
821 def delete_nodes (self):
822 "delete nodes with PLCAPI"
823 return self.do_nodes(action="delete")
825 def do_nodes (self,action="add"):
826 for site_spec in self.plc_spec['sites']:
827 test_site = TestSite (self,site_spec)
829 utils.header("Deleting nodes in site %s"%test_site.name())
830 for node_spec in site_spec['nodes']:
831 test_node=TestNode(self,test_site,node_spec)
832 utils.header("Deleting %s"%test_node.name())
833 test_node.delete_node()
835 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
836 for node_spec in site_spec['nodes']:
837 utils.pprint('Creating node %s'%node_spec,node_spec)
838 test_node = TestNode (self,test_site,node_spec)
839 test_node.create_node ()
842 def nodegroups (self):
843 "create nodegroups with PLCAPI"
844 return self.do_nodegroups("add")
845 def delete_nodegroups (self):
846 "delete nodegroups with PLCAPI"
847 return self.do_nodegroups("delete")
851 def translate_timestamp (start,grain,timestamp):
852 if timestamp < TestPlc.YEAR: return start+timestamp*grain
853 else: return timestamp
856 def timestamp_printable (timestamp):
857 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
860 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
862 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
863 print 'API answered grain=',grain
864 start=(now/grain)*grain
866 # find out all nodes that are reservable
867 nodes=self.all_reservable_nodenames()
869 utils.header ("No reservable node found - proceeding without leases")
872 # attach them to the leases as specified in plc_specs
873 # this is where the 'leases' field gets interpreted as relative of absolute
874 for lease_spec in self.plc_spec['leases']:
875 # skip the ones that come with a null slice id
876 if not lease_spec['slice']: continue
877 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
878 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
879 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
880 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
881 if lease_addition['errors']:
882 utils.header("Cannot create leases, %s"%lease_addition['errors'])
885 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
886 (nodes,lease_spec['slice'],
887 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
888 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
892 def delete_leases (self):
893 "remove all leases in the myplc side"
894 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
895 utils.header("Cleaning leases %r"%lease_ids)
896 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
899 def list_leases (self):
900 "list all leases known to the myplc"
901 leases = self.apiserver.GetLeases(self.auth_root())
904 current=l['t_until']>=now
905 if self.options.verbose or current:
906 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
907 TestPlc.timestamp_printable(l['t_from']),
908 TestPlc.timestamp_printable(l['t_until'])))
911 # create nodegroups if needed, and populate
912 def do_nodegroups (self, action="add"):
913 # 1st pass to scan contents
915 for site_spec in self.plc_spec['sites']:
916 test_site = TestSite (self,site_spec)
917 for node_spec in site_spec['nodes']:
918 test_node=TestNode (self,test_site,node_spec)
919 if node_spec.has_key('nodegroups'):
920 nodegroupnames=node_spec['nodegroups']
921 if isinstance(nodegroupnames,StringTypes):
922 nodegroupnames = [ nodegroupnames ]
923 for nodegroupname in nodegroupnames:
924 if not groups_dict.has_key(nodegroupname):
925 groups_dict[nodegroupname]=[]
926 groups_dict[nodegroupname].append(test_node.name())
927 auth=self.auth_root()
929 for (nodegroupname,group_nodes) in groups_dict.iteritems():
931 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
932 # first, check if the nodetagtype is here
933 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
935 tag_type_id = tag_types[0]['tag_type_id']
937 tag_type_id = self.apiserver.AddTagType(auth,
938 {'tagname':nodegroupname,
939 'description': 'for nodegroup %s'%nodegroupname,
941 print 'located tag (type)',nodegroupname,'as',tag_type_id
943 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
945 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
946 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
947 # set node tag on all nodes, value='yes'
948 for nodename in group_nodes:
950 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
952 traceback.print_exc()
953 print 'node',nodename,'seems to already have tag',nodegroupname
956 expect_yes = self.apiserver.GetNodeTags(auth,
957 {'hostname':nodename,
958 'tagname':nodegroupname},
959 ['value'])[0]['value']
960 if expect_yes != "yes":
961 print 'Mismatch node tag on node',nodename,'got',expect_yes
964 if not self.options.dry_run:
965 print 'Cannot find tag',nodegroupname,'on node',nodename
969 print 'cleaning nodegroup',nodegroupname
970 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
972 traceback.print_exc()
976 # a list of TestNode objs
977 def all_nodes (self):
979 for site_spec in self.plc_spec['sites']:
980 test_site = TestSite (self,site_spec)
981 for node_spec in site_spec['nodes']:
982 nodes.append(TestNode (self,test_site,node_spec))
985 # return a list of tuples (nodename,qemuname)
986 def all_node_infos (self) :
988 for site_spec in self.plc_spec['sites']:
989 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
990 for node_spec in site_spec['nodes'] ]
993 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
994 def all_reservable_nodenames (self):
996 for site_spec in self.plc_spec['sites']:
997 for node_spec in site_spec['nodes']:
998 node_fields=node_spec['node_fields']
999 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
1000 res.append(node_fields['hostname'])
1003 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1004 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
1005 if self.options.dry_run:
1009 class CompleterTaskBootState (CompleterTask):
1010 def __init__ (self, test_plc,hostname):
1011 self.test_plc=test_plc
1012 self.hostname=hostname
1013 self.last_boot_state='undef'
1014 def actual_run (self):
1016 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
1018 self.last_boot_state = node['boot_state']
1019 return self.last_boot_state == target_boot_state
1023 return "CompleterTaskBootState with node %s"%self.hostname
1024 def failure_epilogue (self):
1025 print "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
1027 timeout = timedelta(minutes=timeout_minutes)
1028 graceout = timedelta(minutes=silent_minutes)
1029 period = timedelta(seconds=period_seconds)
1030 # the nodes that haven't checked yet - start with a full list and shrink over time
1031 utils.header("checking nodes boot state (expected %s)"%target_boot_state)
1032 tasks = [ CompleterTaskBootState (self,hostname) \
1033 for (hostname,_) in self.all_node_infos() ]
1034 return Completer (tasks).run (timeout, graceout, period)
1036 def nodes_booted(self):
1037 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
1039 def probe_kvm_iptables (self):
1040 (_,kvmbox) = self.all_node_infos()[0]
1041 TestSsh(kvmbox).run("iptables-save")
1045 def check_nodes_ping(self,timeout_seconds=120,period_seconds=10):
1046 class CompleterTaskPingNode (CompleterTask):
1047 def __init__ (self, hostname):
1048 self.hostname=hostname
1049 def run(self,silent):
1050 command="ping -c 1 -w 1 %s >& /dev/null"%self.hostname
1051 return utils.system (command, silent=silent)==0
1052 def failure_epilogue (self):
1053 print "Cannot ping node with name %s"%self.hostname
1054 timeout=timedelta (seconds=timeout_seconds)
1056 period=timedelta (seconds=period_seconds)
1057 node_infos = self.all_node_infos()
1058 tasks = [ CompleterTaskPingNode (h) for (h,_) in node_infos ]
1059 return Completer (tasks).run (timeout, graceout, period)
1061 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1062 def ping_node (self):
1064 return self.check_nodes_ping ()
1066 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
1068 timeout = timedelta(minutes=timeout_minutes)
1069 graceout = timedelta(minutes=silent_minutes)
1070 period = timedelta(seconds=period_seconds)
1071 vservername=self.vservername
1074 local_key = "keys/%(vservername)s-debug.rsa"%locals()
1077 local_key = "keys/key_admin.rsa"
1078 utils.header("checking ssh access to nodes (expected in %s mode)"%message)
1079 node_infos = self.all_node_infos()
1080 tasks = [ CompleterTaskNodeSsh (nodename, qemuname, local_key, boot_state=message) \
1081 for (nodename,qemuname) in node_infos ]
1082 return Completer (tasks).run (timeout, graceout, period)
1084 def ssh_node_debug(self):
1085 "Tries to ssh into nodes in debug mode with the debug ssh key"
1086 return self.check_nodes_ssh(debug=True,
1087 timeout_minutes=self.ssh_node_debug_timeout,
1088 silent_minutes=self.ssh_node_debug_silent)
1090 def ssh_node_boot(self):
1091 "Tries to ssh into nodes in production mode with the root ssh key"
1092 return self.check_nodes_ssh(debug=False,
1093 timeout_minutes=self.ssh_node_boot_timeout,
1094 silent_minutes=self.ssh_node_boot_silent)
1096 def node_bmlogs(self):
1097 "Checks that there's a non-empty dir. /var/log/bm/raw"
1098 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
1101 def qemu_local_init (self): pass
1103 def bootcd (self): pass
1105 def qemu_local_config (self): pass
1107 def nodestate_reinstall (self): pass
1109 def nodestate_safeboot (self): pass
1111 def nodestate_boot (self): pass
1113 def nodestate_show (self): pass
1115 def qemu_export (self): pass
1117 ### check hooks : invoke scripts from hooks/{node,slice}
1118 def check_hooks_node (self):
1119 return self.locate_first_node().check_hooks()
1120 def check_hooks_sliver (self) :
1121 return self.locate_first_sliver().check_hooks()
1123 def check_hooks (self):
1124 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1125 return self.check_hooks_node() and self.check_hooks_sliver()
1128 def do_check_initscripts(self):
1129 class CompleterTaskInitscript (CompleterTask):
1130 def __init__ (self, test_sliver, stamp):
1131 self.test_sliver=test_sliver
1133 def actual_run (self):
1134 return self.test_sliver.check_initscript_stamp (self.stamp)
1136 return "initscript checker for %s"%self.test_sliver.name()
1137 def failure_epilogue (self):
1138 print "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
1141 for slice_spec in self.plc_spec['slices']:
1142 if not slice_spec.has_key('initscriptstamp'):
1144 stamp=slice_spec['initscriptstamp']
1145 slicename=slice_spec['slice_fields']['name']
1146 for nodename in slice_spec['nodenames']:
1147 print 'nodename',nodename,'slicename',slicename,'stamp',stamp
1148 (site,node) = self.locate_node (nodename)
1149 # xxx - passing the wrong site - probably harmless
1150 test_site = TestSite (self,site)
1151 test_slice = TestSlice (self,test_site,slice_spec)
1152 test_node = TestNode (self,test_site,node)
1153 test_sliver = TestSliver (self, test_node, test_slice)
1154 tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
1155 return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1157 def check_initscripts(self):
1158 "check that the initscripts have triggered"
1159 return self.do_check_initscripts()
1161 def initscripts (self):
1162 "create initscripts with PLCAPI"
1163 for initscript in self.plc_spec['initscripts']:
1164 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1165 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1168 def delete_initscripts (self):
1169 "delete initscripts with PLCAPI"
1170 for initscript in self.plc_spec['initscripts']:
1171 initscript_name = initscript['initscript_fields']['name']
1172 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1174 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1175 print initscript_name,'deleted'
1177 print 'deletion went wrong - probably did not exist'
1182 "create slices with PLCAPI"
1183 return self.do_slices(action="add")
1185 def delete_slices (self):
1186 "delete slices with PLCAPI"
1187 return self.do_slices(action="delete")
1189 def fill_slices (self):
1190 "add nodes in slices with PLCAPI"
1191 return self.do_slices(action="fill")
1193 def empty_slices (self):
1194 "remove nodes from slices with PLCAPI"
1195 return self.do_slices(action="empty")
1197 def do_slices (self, action="add"):
1198 for slice in self.plc_spec['slices']:
1199 site_spec = self.locate_site (slice['sitename'])
1200 test_site = TestSite(self,site_spec)
1201 test_slice=TestSlice(self,test_site,slice)
1202 if action == "delete":
1203 test_slice.delete_slice()
1204 elif action=="fill":
1205 test_slice.add_nodes()
1206 elif action=="empty":
1207 test_slice.delete_nodes()
1209 test_slice.create_slice()
1212 @slice_mapper__tasks(20,10,15)
1213 def ssh_slice(self): pass
1214 @slice_mapper__tasks(20,19,15)
1215 def ssh_slice_off (self): pass
1216 @slice_mapper__tasks(1,1,15)
1217 def slice_fs_present(self): pass
1218 @slice_mapper__tasks(1,1,15)
1219 def slice_fs_deleted(self): pass
1221 # use another name so we can exclude/ignore it from the tests on the nightly command line
1222 def ssh_slice_again(self): return self.ssh_slice()
1223 # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1224 # but for some reason the ignore-wrapping thing would not
1227 def ssh_slice_basics(self): pass
1229 def check_vsys_defaults(self): pass
1232 def keys_clear_known_hosts (self): pass
1234 def plcapi_urls (self):
1235 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1237 def speed_up_slices (self):
1238 "tweak nodemanager settings on all nodes using a conf file"
1239 # create the template on the server-side
1240 template="%s.nodemanager"%self.name()
1241 template_file = open (template,"w")
1242 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1243 template_file.close()
1244 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1245 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1246 self.test_ssh.copy_abs(template,remote)
1248 self.apiserver.AddConfFile (self.auth_root(),
1249 {'dest':'/etc/sysconfig/nodemanager',
1250 'source':'PlanetLabConf/nodemanager',
1251 'postinstall_cmd':'service nm restart',})
1254 def debug_nodemanager (self):
1255 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1256 template="%s.nodemanager"%self.name()
1257 template_file = open (template,"w")
1258 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1259 template_file.close()
1260 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1261 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1262 self.test_ssh.copy_abs(template,remote)
1266 def qemu_start (self) : pass
1269 def qemu_timestamp (self) : pass
1271 # when a spec refers to a node possibly on another plc
1272 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1273 for plc in [ self ] + other_plcs:
1275 return plc.locate_sliver_obj (nodename, slicename)
1278 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1280 # implement this one as a cross step so that we can take advantage of different nodes
1281 # in multi-plcs mode
1282 def cross_check_tcp (self, other_plcs):
1283 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1284 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1285 utils.header ("check_tcp: no/empty config found")
1287 specs = self.plc_spec['tcp_specs']
1292 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1293 if not s_test_sliver.run_tcp_server(port,timeout=20):
1297 # idem for the client side
1298 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1299 # use nodename from locatesd sliver, unless 'client_connect' is set
1300 if 'client_connect' in spec:
1301 destination = spec['client_connect']
1303 destination=s_test_sliver.test_node.name()
1304 if not c_test_sliver.run_tcp_client(destination,port):
1308 # painfully enough, we need to allow for some time as netflow might show up last
1309 def check_system_slice (self):
1310 "all nodes: check that a system slice is alive"
1311 # netflow currently not working in the lxc distro
1312 # drl not built at all in the wtx distro
1313 # if we find either of them we're happy
1314 return self.check_netflow() or self.check_drl()
1317 def check_netflow (self): return self._check_system_slice ('netflow')
1318 def check_drl (self): return self._check_system_slice ('drl')
1320 # we have the slices up already here, so it should not take too long
1321 def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
1322 class CompleterTaskSystemSlice (CompleterTask):
1323 def __init__ (self, test_node, dry_run):
1324 self.test_node=test_node
1325 self.dry_run=dry_run
1326 def actual_run (self):
1327 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
1329 return "System slice %s @ %s"%(slicename, self.test_node.name())
1330 def failure_epilogue (self):
1331 print "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
1332 timeout = timedelta(minutes=timeout_minutes)
1333 silent = timedelta (0)
1334 period = timedelta (seconds=period_seconds)
1335 tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
1336 for test_node in self.all_nodes() ]
1337 return Completer (tasks) . run (timeout, silent, period)
1339 def plcsh_stress_test (self):
1340 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1341 # install the stress-test in the plc image
1342 location = "/usr/share/plc_api/plcsh_stress_test.py"
1343 remote="%s/%s"%(self.vm_root_in_host(),location)
1344 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1346 command += " -- --check"
1347 if self.options.size == 1:
1348 command += " --tiny"
1349 return ( self.run_in_guest(command) == 0)
1351 # populate runs the same utility without slightly different options
1352 # in particular runs with --preserve (dont cleanup) and without --check
1353 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1355 def sfa_install_all (self):
1356 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1357 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1359 def sfa_install_core(self):
1361 return self.yum_install ("sfa")
1363 def sfa_install_plc(self):
1364 "yum install sfa-plc"
1365 return self.yum_install("sfa-plc")
1367 def sfa_install_sfatables(self):
1368 "yum install sfa-sfatables"
1369 return self.yum_install ("sfa-sfatables")
1371 # for some very odd reason, this sometimes fails with the following symptom
1372 # # yum install sfa-client
1373 # Setting up Install Process
1375 # Downloading Packages:
1376 # Running rpm_check_debug
1377 # Running Transaction Test
1378 # Transaction Test Succeeded
1379 # Running Transaction
1380 # Transaction couldn't start:
1381 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1382 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1383 # even though in the same context I have
1384 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1385 # Filesystem Size Used Avail Use% Mounted on
1386 # /dev/hdv1 806G 264G 501G 35% /
1387 # none 16M 36K 16M 1% /tmp
1389 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1390 def sfa_install_client(self):
1391 "yum install sfa-client"
1392 first_try=self.yum_install("sfa-client")
1393 if first_try: return True
1394 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1395 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1396 utils.header("rpm_path=<<%s>>"%rpm_path)
1398 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1399 return self.yum_check_installed ("sfa-client")
1401 def sfa_dbclean(self):
1402 "thoroughly wipes off the SFA database"
1403 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1404 self.run_in_guest("sfa-nuke.py")==0 or \
1405 self.run_in_guest("sfa-nuke-plc.py")==0
1407 def sfa_fsclean(self):
1408 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1409 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1412 def sfa_plcclean(self):
1413 "cleans the PLC entries that were created as a side effect of running the script"
1415 sfa_spec=self.plc_spec['sfa']
1417 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1418 login_base=auth_sfa_spec['login_base']
1419 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1420 except: print "Site %s already absent from PLC db"%login_base
1422 for spec_name in ['pi_spec','user_spec']:
1423 user_spec=auth_sfa_spec[spec_name]
1424 username=user_spec['email']
1425 try: self.apiserver.DeletePerson(self.auth_root(),username)
1427 # this in fact is expected as sites delete their members
1428 #print "User %s already absent from PLC db"%username
1431 print "REMEMBER TO RUN sfa_import AGAIN"
1434 def sfa_uninstall(self):
1435 "uses rpm to uninstall sfa - ignore result"
1436 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1437 self.run_in_guest("rm -rf /var/lib/sfa")
1438 self.run_in_guest("rm -rf /etc/sfa")
1439 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1441 self.run_in_guest("rpm -e --noscripts sfa-plc")
1444 ### run unit tests for SFA
1445 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1446 # Running Transaction
1447 # Transaction couldn't start:
1448 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1449 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1450 # no matter how many Gbs are available on the testplc
1451 # could not figure out what's wrong, so...
1452 # if the yum install phase fails, consider the test is successful
1453 # other combinations will eventually run it hopefully
1454 def sfa_utest(self):
1455 "yum install sfa-tests and run SFA unittests"
1456 self.run_in_guest("yum -y install sfa-tests")
1457 # failed to install - forget it
1458 if self.run_in_guest("rpm -q sfa-tests")!=0:
1459 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1461 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1465 dirname="conf.%s"%self.plc_spec['name']
1466 if not os.path.isdir(dirname):
1467 utils.system("mkdir -p %s"%dirname)
1468 if not os.path.isdir(dirname):
1469 raise Exception,"Cannot create config dir for plc %s"%self.name()
1472 def conffile(self,filename):
1473 return "%s/%s"%(self.confdir(),filename)
1474 def confsubdir(self,dirname,clean,dry_run=False):
1475 subdirname="%s/%s"%(self.confdir(),dirname)
1477 utils.system("rm -rf %s"%subdirname)
1478 if not os.path.isdir(subdirname):
1479 utils.system("mkdir -p %s"%subdirname)
1480 if not dry_run and not os.path.isdir(subdirname):
1481 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1484 def conffile_clean (self,filename):
1485 filename=self.conffile(filename)
1486 return utils.system("rm -rf %s"%filename)==0
1489 def sfa_configure(self):
1490 "run sfa-config-tty"
1491 tmpname=self.conffile("sfa-config-tty")
1492 fileconf=open(tmpname,'w')
1493 for (var,value) in self.plc_spec['sfa']['settings'].iteritems():
1494 fileconf.write ('e %s\n%s\n'%(var,value))
1495 # # the way plc_config handles booleans just sucks..
1498 # if self.plc_spec['sfa'][var]: val='true'
1499 # fileconf.write ('e %s\n%s\n'%(var,val))
1500 fileconf.write('w\n')
1501 fileconf.write('R\n')
1502 fileconf.write('q\n')
1504 utils.system('cat %s'%tmpname)
1505 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1508 def aggregate_xml_line(self):
1509 port=self.plc_spec['sfa']['neighbours-port']
1510 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1511 (self.vserverip,self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'],port)
1513 def registry_xml_line(self):
1514 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1515 (self.vserverip,self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1518 # a cross step that takes all other plcs in argument
1519 def cross_sfa_configure(self, other_plcs):
1520 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1521 # of course with a single plc, other_plcs is an empty list
1524 agg_fname=self.conffile("agg.xml")
1525 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1526 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1527 utils.header ("(Over)wrote %s"%agg_fname)
1528 reg_fname=self.conffile("reg.xml")
1529 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1530 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1531 utils.header ("(Over)wrote %s"%reg_fname)
1532 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1533 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1535 def sfa_import(self):
1536 "use sfaadmin to import from plc"
1537 auth=self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1538 return self.run_in_guest('sfaadmin reg import_registry')==0
1540 def sfa_start(self):
1542 return self.start_service('sfa')
1545 def sfi_configure(self):
1546 "Create /root/sfi on the plc side for sfi client configuration"
1547 if self.options.dry_run:
1548 utils.header("DRY RUN - skipping step")
1550 sfa_spec=self.plc_spec['sfa']
1551 # cannot use auth_sfa_mapper to pass dir_name
1552 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1553 test_slice=TestAuthSfa(self,slice_spec)
1554 dir_basename=os.path.basename(test_slice.sfi_path())
1555 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1556 test_slice.sfi_configure(dir_name)
1557 # push into the remote /root/sfi area
1558 location = test_slice.sfi_path()
1559 remote="%s/%s"%(self.vm_root_in_host(),location)
1560 self.test_ssh.mkdir(remote,abs=True)
1561 # need to strip last level or remote otherwise we get an extra dir level
1562 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1566 def sfi_clean (self):
1567 "clean up /root/sfi on the plc side"
1568 self.run_in_guest("rm -rf /root/sfi")
1572 def sfa_register_site (self): pass
1574 def sfa_register_pi (self): pass
1576 def sfa_register_user(self): pass
1578 def sfa_update_user(self): pass
1580 def sfa_register_slice(self): pass
1582 def sfa_renew_slice(self): pass
1584 def sfa_discover(self): pass
1586 def sfa_create_slice(self): pass
1588 def sfa_check_slice_plc(self): pass
1590 def sfa_update_slice(self): pass
1592 def sfa_remove_user_from_slice(self): pass
1594 def sfa_insert_user_in_slice(self): pass
1596 def sfi_list(self): pass
1598 def sfi_show_site(self): pass
1600 def sfi_show_slice(self): pass
1602 def sfi_show_slice_researchers(self): pass
1604 def ssh_slice_sfa(self): pass
1606 def sfa_delete_user(self): pass
1608 def sfa_delete_slice(self): pass
1612 return self.stop_service ('sfa')
1614 def populate (self):
1615 "creates random entries in the PLCAPI"
1616 # install the stress-test in the plc image
1617 location = "/usr/share/plc_api/plcsh_stress_test.py"
1618 remote="%s/%s"%(self.vm_root_in_host(),location)
1619 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1621 command += " -- --preserve --short-names"
1622 local = (self.run_in_guest(command) == 0);
1623 # second run with --foreign
1624 command += ' --foreign'
1625 remote = (self.run_in_guest(command) == 0);
1626 return ( local and remote)
1628 def gather_logs (self):
1629 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1630 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1631 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1632 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1633 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1634 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1635 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1637 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1638 self.gather_var_logs ()
1640 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1641 self.gather_pgsql_logs ()
1643 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1644 self.gather_root_sfi ()
1646 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1647 for site_spec in self.plc_spec['sites']:
1648 test_site = TestSite (self,site_spec)
1649 for node_spec in site_spec['nodes']:
1650 test_node=TestNode(self,test_site,node_spec)
1651 test_node.gather_qemu_logs()
1653 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1654 self.gather_nodes_var_logs()
1656 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1657 self.gather_slivers_var_logs()
1660 def gather_slivers_var_logs(self):
1661 for test_sliver in self.all_sliver_objs():
1662 remote = test_sliver.tar_var_logs()
1663 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1664 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1665 utils.system(command)
1668 def gather_var_logs (self):
1669 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1670 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1671 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1672 utils.system(command)
1673 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1674 utils.system(command)
1676 def gather_pgsql_logs (self):
1677 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1678 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1679 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1680 utils.system(command)
1682 def gather_root_sfi (self):
1683 utils.system("mkdir -p logs/sfi.%s"%self.name())
1684 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1685 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1686 utils.system(command)
1688 def gather_nodes_var_logs (self):
1689 for site_spec in self.plc_spec['sites']:
1690 test_site = TestSite (self,site_spec)
1691 for node_spec in site_spec['nodes']:
1692 test_node=TestNode(self,test_site,node_spec)
1693 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1694 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1695 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1696 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1697 utils.system(command)
1700 # returns the filename to use for sql dump/restore, using options.dbname if set
1701 def dbfile (self, database):
1702 # uses options.dbname if it is found
1704 name=self.options.dbname
1705 if not isinstance(name,StringTypes):
1711 return "/root/%s-%s.sql"%(database,name)
1713 def plc_db_dump(self):
1714 'dump the planetlab5 DB in /root in the PLC - filename has time'
1715 dump=self.dbfile("planetab5")
1716 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1717 utils.header('Dumped planetlab5 database in %s'%dump)
1720 def plc_db_restore(self):
1721 'restore the planetlab5 DB - looks broken, but run -n might help'
1722 dump=self.dbfile("planetab5")
1723 ##stop httpd service
1724 self.run_in_guest('service httpd stop')
1725 # xxx - need another wrapper
1726 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1727 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1728 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1729 ##starting httpd service
1730 self.run_in_guest('service httpd start')
1732 utils.header('Database restored from ' + dump)
1735 def create_ignore_steps ():
1736 for step in TestPlc.default_steps + TestPlc.other_steps:
1737 # default step can have a plc qualifier
1738 if '@' in step: (step,qualifier)=step.split('@')
1739 # or be defined as forced or ignored by default
1740 for keyword in ['_ignore','_force']:
1741 if step.endswith (keyword): step=step.replace(keyword,'')
1742 if step == SEP or step == SEPSFA : continue
1743 method=getattr(TestPlc,step)
1745 wrapped=ignore_result(method)
1746 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1747 setattr(TestPlc, name, wrapped)
1750 # def ssh_slice_again_ignore (self): pass
1752 # def check_initscripts_ignore (self): pass
1754 def standby_1_through_20(self):
1755 """convenience function to wait for a specified number of minutes"""
1758 def standby_1(): pass
1760 def standby_2(): pass
1762 def standby_3(): pass
1764 def standby_4(): pass
1766 def standby_5(): pass
1768 def standby_6(): pass
1770 def standby_7(): pass
1772 def standby_8(): pass
1774 def standby_9(): pass
1776 def standby_10(): pass
1778 def standby_11(): pass
1780 def standby_12(): pass
1782 def standby_13(): pass
1784 def standby_14(): pass
1786 def standby_15(): pass
1788 def standby_16(): pass
1790 def standby_17(): pass
1792 def standby_18(): pass
1794 def standby_19(): pass
1796 def standby_20(): pass
1798 # convenience for debugging the test logic
1799 def yes (self): return True
1800 def no (self): return False