1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
10 from types import StringTypes
13 from Completer import Completer, CompleterTask
14 from TestSite import TestSite
15 from TestNode import TestNode, CompleterTaskNodeSsh
16 from TestUser import TestUser
17 from TestKey import TestKey
18 from TestSlice import TestSlice
19 from TestSliver import TestSliver
20 from TestBoxQemu import TestBoxQemu
21 from TestSsh import TestSsh
22 from TestApiserver import TestApiserver
23 from TestAuthSfa import TestAuthSfa
24 from PlcapiUrlScanner import PlcapiUrlScanner
26 has_sfa_cache_filename="sfa-cache"
28 # step methods must take (self) and return a boolean (options is a member of the class)
30 def standby(minutes,dry_run):
31 utils.header('Entering StandBy for %d mn'%minutes)
35 time.sleep(60*minutes)
38 def standby_generic (func):
40 minutes=int(func.__name__.split("_")[1])
41 return standby(minutes,self.options.dry_run)
44 def node_mapper (method):
45 def map_on_nodes(self,*args, **kwds):
47 node_method = TestNode.__dict__[method.__name__]
48 for test_node in self.all_nodes():
49 if not node_method(test_node, *args, **kwds): overall=False
51 # maintain __name__ for ignore_result
52 map_on_nodes.__name__=method.__name__
53 # restore the doc text
54 map_on_nodes.__doc__=TestNode.__dict__[method.__name__].__doc__
57 def slice_mapper (method):
58 def map_on_slices(self):
60 slice_method = TestSlice.__dict__[method.__name__]
61 for slice_spec in self.plc_spec['slices']:
62 site_spec = self.locate_site (slice_spec['sitename'])
63 test_site = TestSite(self,site_spec)
64 test_slice=TestSlice(self,test_site,slice_spec)
65 if not slice_method(test_slice,self.options): overall=False
67 # maintain __name__ for ignore_result
68 map_on_slices.__name__=method.__name__
69 # restore the doc text
70 map_on_slices.__doc__=TestSlice.__dict__[method.__name__].__doc__
73 # run a step but return True so that we can go on
74 def ignore_result (method):
76 # ssh_slice_ignore->ssh_slice
77 ref_name=method.__name__.replace('_ignore','').replace('force_','')
78 ref_method=TestPlc.__dict__[ref_name]
79 result=ref_method(self)
80 print "Actual (but ignored) result for %(ref_name)s is %(result)s"%locals()
81 return Ignored (result)
82 name=method.__name__.replace('_ignore','').replace('force_','')
83 ignoring.__name__=name
84 ignoring.__doc__="ignored version of " + name
87 # a variant that expects the TestSlice method to return a list of CompleterTasks that
88 # are then merged into a single Completer run to avoid wating for all the slices
89 # esp. useful when a test fails of course
90 # because we need to pass arguments we use a class instead..
91 class slice_mapper__tasks (object):
92 # could not get this to work with named arguments
93 def __init__ (self,timeout_minutes,silent_minutes,period_seconds):
94 self.timeout=timedelta(minutes=timeout_minutes)
95 self.silent=timedelta(minutes=silent_minutes)
96 self.period=timedelta(seconds=period_seconds)
97 def __call__ (self, method):
99 # compute augmented method name
100 method_name = method.__name__ + "__tasks"
101 # locate in TestSlice
102 slice_method = TestSlice.__dict__[ method_name ]
105 for slice_spec in self.plc_spec['slices']:
106 site_spec = self.locate_site (slice_spec['sitename'])
107 test_site = TestSite(self,site_spec)
108 test_slice=TestSlice(self,test_site,slice_spec)
109 tasks += slice_method (test_slice, self.options)
110 return Completer (tasks).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
111 # restore the doc text from the TestSlice method even if a bit odd
112 wrappee.__name__ = method.__name__
113 wrappee.__doc__ = slice_method.__doc__
116 def auth_sfa_mapper (method):
119 auth_method = TestAuthSfa.__dict__[method.__name__]
120 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
121 test_auth=TestAuthSfa(self,auth_spec)
122 if not auth_method(test_auth,self.options): overall=False
124 # restore the doc text
125 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
129 def __init__ (self,result):
139 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
140 'plc_install', 'plc_configure', 'plc_start', SEP,
141 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
142 'plcapi_urls','speed_up_slices', SEP,
143 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
144 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
145 # keep this our of the way for now
146 'check_vsys_defaults_ignore', SEP,
147 # run this first off so it's easier to re-run on another qemu box
148 'qemu_kill_mine', SEP,
149 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
150 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP,
151 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
152 'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
153 'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
154 'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
155 'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
156 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
157 'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
158 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
159 # but as the stress test might take a while, we sometimes missed the debug mode..
160 'probe_kvm_iptables',
161 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
162 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts_ignore', SEP,
163 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
164 'cross_check_tcp@1', 'check_system_slice', SEP,
165 # check slices are turned off properly
166 'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
167 # check they are properly re-created with the same name
168 'fill_slices', 'ssh_slice_again', SEP,
169 'gather_logs_force', SEP,
172 'export', 'show_boxes', 'super_speed_up_slices', SEP,
173 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
174 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
175 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
176 'delete_leases', 'list_leases', SEP,
178 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
179 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
180 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
181 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
182 'sfa_get_expires', SEPSFA,
183 'plc_db_dump' , 'plc_db_restore', SEP,
184 'check_netflow','check_drl', SEP,
185 'debug_nodemanager', 'slice_fs_present', SEP,
186 'standby_1_through_20','yes','no',SEP,
190 def printable_steps (list):
191 single_line=" ".join(list)+" "
192 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
194 def valid_step (step):
195 return step != SEP and step != SEPSFA
197 # turn off the sfa-related steps when build has skipped SFA
198 # this was originally for centos5 but is still valid
199 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
201 def _has_sfa_cached (rpms_url):
202 if os.path.isfile(has_sfa_cache_filename):
203 cached=file(has_sfa_cache_filename).read()=="yes"
204 utils.header("build provides SFA (cached):%s"%cached)
206 # warning, we're now building 'sface' so let's be a bit more picky
207 # full builds are expected to return with 0 here
208 utils.header ("Checking if build provides SFA package...")
209 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)==0
210 encoded='yes' if retcod else 'no'
211 file(has_sfa_cache_filename,'w').write(encoded)
215 def check_whether_build_has_sfa (rpms_url):
216 has_sfa=TestPlc._has_sfa_cached(rpms_url)
218 utils.header("build does provide SFA")
220 # move all steps containing 'sfa' from default_steps to other_steps
221 utils.header("SFA package not found - removing steps with sfa or sfi")
222 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
223 TestPlc.other_steps += sfa_steps
224 for step in sfa_steps: TestPlc.default_steps.remove(step)
226 def __init__ (self,plc_spec,options):
227 self.plc_spec=plc_spec
229 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
230 self.vserverip=plc_spec['vserverip']
231 self.vservername=plc_spec['vservername']
232 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
233 self.apiserver=TestApiserver(self.url,options.dry_run)
234 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
235 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
237 def has_addresses_api (self):
238 return self.apiserver.has_method('AddIpAddress')
241 name=self.plc_spec['name']
242 return "%s.%s"%(name,self.vservername)
245 return self.plc_spec['host_box']
248 return self.test_ssh.is_local()
250 # define the API methods on this object through xmlrpc
251 # would help, but not strictly necessary
255 def actual_command_in_guest (self,command, backslash=False):
256 raw1=self.host_to_guest(command)
257 raw2=self.test_ssh.actual_command(raw1,dry_run=self.options.dry_run, backslash=backslash)
260 def start_guest (self):
261 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),dry_run=self.options.dry_run))
263 def stop_guest (self):
264 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),dry_run=self.options.dry_run))
266 def run_in_guest (self,command,backslash=False):
267 raw=self.actual_command_in_guest(command,backslash)
268 return utils.system(raw)
270 def run_in_host (self,command):
271 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
273 # backslashing turned out so awful at some point that I've turned off auto-backslashing
274 # see e.g. plc_start esp. the version for f14
275 #command gets run in the plc's vm
276 def host_to_guest(self,command):
277 # f14 still needs some extra help
278 if self.options.fcdistro == 'f14':
279 raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %s" %(self.vservername,command)
281 raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env %s" %(self.vservername,command)
284 # this /vservers thing is legacy...
285 def vm_root_in_host(self):
286 return "/vservers/%s/"%(self.vservername)
288 def vm_timestamp_path (self):
289 return "/vservers/%s/%s.timestamp"%(self.vservername,self.vservername)
291 #start/stop the vserver
292 def start_guest_in_host(self):
293 return "virsh -c lxc:/// start %s"%(self.vservername)
295 def stop_guest_in_host(self):
296 return "virsh -c lxc:/// destroy %s"%(self.vservername)
299 def run_in_guest_piped (self,local,remote):
300 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
302 def yum_check_installed (self, rpms):
303 if isinstance (rpms, list):
305 return self.run_in_guest("rpm -q %s"%rpms)==0
307 # does a yum install in the vs, ignore yum retcod, check with rpm
308 def yum_install (self, rpms):
309 if isinstance (rpms, list):
311 self.run_in_guest("yum -y install %s"%rpms)
312 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
313 self.run_in_guest("yum-complete-transaction -y")
314 return self.yum_check_installed (rpms)
316 def auth_root (self):
317 return {'Username':self.plc_spec['settings']['PLC_ROOT_USER'],
318 'AuthMethod':'password',
319 'AuthString':self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
320 'Role' : self.plc_spec['role']
322 def locate_site (self,sitename):
323 for site in self.plc_spec['sites']:
324 if site['site_fields']['name'] == sitename:
326 if site['site_fields']['login_base'] == sitename:
328 raise Exception,"Cannot locate site %s"%sitename
330 def locate_node (self,nodename):
331 for site in self.plc_spec['sites']:
332 for node in site['nodes']:
333 if node['name'] == nodename:
335 raise Exception,"Cannot locate node %s"%nodename
337 def locate_hostname (self,hostname):
338 for site in self.plc_spec['sites']:
339 for node in site['nodes']:
340 if node['node_fields']['hostname'] == hostname:
342 raise Exception,"Cannot locate hostname %s"%hostname
344 def locate_key (self,key_name):
345 for key in self.plc_spec['keys']:
346 if key['key_name'] == key_name:
348 raise Exception,"Cannot locate key %s"%key_name
350 def locate_private_key_from_key_names (self, key_names):
351 # locate the first avail. key
353 for key_name in key_names:
354 key_spec=self.locate_key(key_name)
355 test_key=TestKey(self,key_spec)
356 publickey=test_key.publicpath()
357 privatekey=test_key.privatepath()
358 if os.path.isfile(publickey) and os.path.isfile(privatekey):
360 if found: return privatekey
363 def locate_slice (self, slicename):
364 for slice in self.plc_spec['slices']:
365 if slice['slice_fields']['name'] == slicename:
367 raise Exception,"Cannot locate slice %s"%slicename
369 def all_sliver_objs (self):
371 for slice_spec in self.plc_spec['slices']:
372 slicename = slice_spec['slice_fields']['name']
373 for nodename in slice_spec['nodenames']:
374 result.append(self.locate_sliver_obj (nodename,slicename))
377 def locate_sliver_obj (self,nodename,slicename):
378 (site,node) = self.locate_node(nodename)
379 slice = self.locate_slice (slicename)
381 test_site = TestSite (self, site)
382 test_node = TestNode (self, test_site,node)
383 # xxx the slice site is assumed to be the node site - mhh - probably harmless
384 test_slice = TestSlice (self, test_site, slice)
385 return TestSliver (self, test_node, test_slice)
387 def locate_first_node(self):
388 nodename=self.plc_spec['slices'][0]['nodenames'][0]
389 (site,node) = self.locate_node(nodename)
390 test_site = TestSite (self, site)
391 test_node = TestNode (self, test_site,node)
394 def locate_first_sliver (self):
395 slice_spec=self.plc_spec['slices'][0]
396 slicename=slice_spec['slice_fields']['name']
397 nodename=slice_spec['nodenames'][0]
398 return self.locate_sliver_obj(nodename,slicename)
400 # all different hostboxes used in this plc
401 def get_BoxNodes(self):
402 # maps on sites and nodes, return [ (host_box,test_node) ]
404 for site_spec in self.plc_spec['sites']:
405 test_site = TestSite (self,site_spec)
406 for node_spec in site_spec['nodes']:
407 test_node = TestNode (self, test_site, node_spec)
408 if not test_node.is_real():
409 tuples.append( (test_node.host_box(),test_node) )
410 # transform into a dict { 'host_box' -> [ test_node .. ] }
412 for (box,node) in tuples:
413 if not result.has_key(box):
416 result[box].append(node)
419 # a step for checking this stuff
420 def show_boxes (self):
421 'print summary of nodes location'
422 for (box,nodes) in self.get_BoxNodes().iteritems():
423 print box,":"," + ".join( [ node.name() for node in nodes ] )
426 # make this a valid step
427 def qemu_kill_all(self):
428 'kill all qemu instances on the qemu boxes involved by this setup'
429 # this is the brute force version, kill all qemus on that host box
430 for (box,nodes) in self.get_BoxNodes().iteritems():
431 # pass the first nodename, as we don't push template-qemu on testboxes
432 nodedir=nodes[0].nodedir()
433 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
436 # make this a valid step
437 def qemu_list_all(self):
438 'list all qemu instances on the qemu boxes involved by this setup'
439 for (box,nodes) in self.get_BoxNodes().iteritems():
440 # this is the brute force version, kill all qemus on that host box
441 TestBoxQemu(box,self.options.buildname).qemu_list_all()
444 # kill only the qemus related to this test
445 def qemu_list_mine(self):
446 'list qemu instances for our nodes'
447 for (box,nodes) in self.get_BoxNodes().iteritems():
448 # the fine-grain version
453 # kill only the qemus related to this test
454 def qemu_clean_mine(self):
455 'cleanup (rm -rf) qemu instances for our nodes'
456 for (box,nodes) in self.get_BoxNodes().iteritems():
457 # the fine-grain version
462 # kill only the right qemus
463 def qemu_kill_mine(self):
464 'kill the qemu instances for our nodes'
465 for (box,nodes) in self.get_BoxNodes().iteritems():
466 # the fine-grain version
471 #################### display config
473 "show test configuration after localization"
478 # uggly hack to make sure 'run export' only reports about the 1st plc
479 # to avoid confusion - also we use 'inri_slice1' in various aliases..
482 "print cut'n paste-able stuff to export env variables to your shell"
483 # guess local domain from hostname
484 if TestPlc.exported_id>1:
485 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
487 TestPlc.exported_id+=1
488 domain=socket.gethostname().split('.',1)[1]
489 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
490 print "export BUILD=%s"%self.options.buildname
491 print "export PLCHOSTLXC=%s"%fqdn
492 print "export GUESTNAME=%s"%self.plc_spec['vservername']
493 vplcname=self.plc_spec['vservername'].split('-')[-1]
494 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
495 # find hostname of first node
496 (hostname,qemubox) = self.all_node_infos()[0]
497 print "export KVMHOST=%s.%s"%(qemubox,domain)
498 print "export NODE=%s"%(hostname)
502 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
503 def show_pass (self,passno):
504 for (key,val) in self.plc_spec.iteritems():
505 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
509 self.display_site_spec(site)
510 for node in site['nodes']:
511 self.display_node_spec(node)
512 elif key=='initscripts':
513 for initscript in val:
514 self.display_initscript_spec (initscript)
517 self.display_slice_spec (slice)
520 self.display_key_spec (key)
522 if key not in ['sites','initscripts','slices','keys']:
523 print '+ ',key,':',val
525 def display_site_spec (self,site):
526 print '+ ======== site',site['site_fields']['name']
527 for (k,v) in site.iteritems():
528 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
531 print '+ ','nodes : ',
533 print node['node_fields']['hostname'],'',
539 print user['name'],'',
541 elif k == 'site_fields':
542 print '+ login_base',':',v['login_base']
543 elif k == 'address_fields':
549 def display_initscript_spec (self,initscript):
550 print '+ ======== initscript',initscript['initscript_fields']['name']
552 def display_key_spec (self,key):
553 print '+ ======== key',key['key_name']
555 def display_slice_spec (self,slice):
556 print '+ ======== slice',slice['slice_fields']['name']
557 for (k,v) in slice.iteritems():
570 elif k=='slice_fields':
571 print '+ fields',':',
572 print 'max_nodes=',v['max_nodes'],
577 def display_node_spec (self,node):
578 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
579 print "hostname=",node['node_fields']['hostname'],
580 print "ip=",node['interface_fields']['ip']
581 if self.options.verbose:
582 utils.pprint("node details",node,depth=3)
584 # another entry point for just showing the boxes involved
585 def display_mapping (self):
586 TestPlc.display_mapping_plc(self.plc_spec)
590 def display_mapping_plc (plc_spec):
591 print '+ MyPLC',plc_spec['name']
592 # WARNING this would not be right for lxc-based PLC's - should be harmless though
593 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
594 print '+\tIP = %s/%s'%(plc_spec['settings']['PLC_API_HOST'],plc_spec['vserverip'])
595 for site_spec in plc_spec['sites']:
596 for node_spec in site_spec['nodes']:
597 TestPlc.display_mapping_node(node_spec)
600 def display_mapping_node (node_spec):
601 print '+ NODE %s'%(node_spec['name'])
602 print '+\tqemu box %s'%node_spec['host_box']
603 print '+\thostname=%s'%node_spec['node_fields']['hostname']
605 # write a timestamp in /vservers/<>.timestamp
606 # cannot be inside the vserver, that causes vserver .. build to cough
607 def plcvm_timestamp (self):
608 "Create a timestamp to remember creation date for this plc"
610 # TODO-lxc check this one
611 # a first approx. is to store the timestamp close to the VM root like vs does
612 stamp_path=self.vm_timestamp_path ()
613 stamp_dir = os.path.dirname (stamp_path)
614 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
615 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
617 # this is called inconditionnally at the beginning of the test sequence
618 # just in case this is a rerun, so if the vm is not running it's fine
619 def plcvm_delete(self):
620 "vserver delete the test myplc"
621 stamp_path=self.vm_timestamp_path()
622 self.run_in_host("rm -f %s"%stamp_path)
623 self.run_in_host("virsh -c lxc:// destroy %s"%self.vservername)
624 self.run_in_host("virsh -c lxc:// undefine %s"%self.vservername)
625 self.run_in_host("rm -fr /vservers/%s"%self.vservername)
629 # historically the build was being fetched by the tests
630 # now the build pushes itself as a subdir of the tests workdir
631 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
632 def plcvm_create (self):
633 "vserver creation (no install done)"
634 # push the local build/ dir to the testplc box
636 # a full path for the local calls
637 build_dir=os.path.dirname(sys.argv[0])
638 # sometimes this is empty - set to "." in such a case
639 if not build_dir: build_dir="."
640 build_dir += "/build"
642 # use a standard name - will be relative to remote buildname
644 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
645 self.test_ssh.rmdir(build_dir)
646 self.test_ssh.copy(build_dir,recursive=True)
647 # the repo url is taken from arch-rpms-url
648 # with the last step (i386) removed
649 repo_url = self.options.arch_rpms_url
650 for level in [ 'arch' ]:
651 repo_url = os.path.dirname(repo_url)
653 # invoke initvm (drop support for vs)
654 script="lbuild-initvm.sh"
656 # pass the vbuild-nightly options to [lv]test-initvm
657 script_options += " -p %s"%self.options.personality
658 script_options += " -d %s"%self.options.pldistro
659 script_options += " -f %s"%self.options.fcdistro
660 script_options += " -r %s"%repo_url
661 vserver_name = self.vservername
663 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
664 script_options += " -n %s"%vserver_hostname
666 print "Cannot reverse lookup %s"%self.vserverip
667 print "This is considered fatal, as this might pollute the test results"
669 create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s"%locals()
670 return self.run_in_host(create_vserver) == 0
673 def plc_install(self):
674 "yum install myplc, noderepo, and the plain bootstrapfs"
676 # workaround for getting pgsql8.2 on centos5
677 if self.options.fcdistro == "centos5":
678 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
681 if self.options.personality == "linux32":
683 elif self.options.personality == "linux64":
686 raise Exception, "Unsupported personality %r"%self.options.personality
687 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
690 pkgs_list.append ("slicerepo-%s"%nodefamily)
691 pkgs_list.append ("myplc")
692 pkgs_list.append ("noderepo-%s"%nodefamily)
693 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
694 pkgs_string=" ".join(pkgs_list)
695 return self.yum_install (pkgs_list)
698 def mod_python(self):
699 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
700 return self.yum_install ( [ 'mod_python' ] )
703 def plc_configure(self):
705 tmpname='%s.plc-config-tty'%(self.name())
706 fileconf=open(tmpname,'w')
707 for (var,value) in self.plc_spec['settings'].iteritems():
708 fileconf.write ('e %s\n%s\n'%(var,value))
709 fileconf.write('w\n')
710 fileconf.write('q\n')
712 utils.system('cat %s'%tmpname)
713 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
714 utils.system('rm %s'%tmpname)
717 # f14 is a bit odd in this respect, although this worked fine in guests up to f18
718 # however using a vplc guest under f20 requires this trick
719 # the symptom is this: service plc start
720 # Starting plc (via systemctl): Failed to get D-Bus connection: \
721 # Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
722 # weird thing is the doc says f14 uses upstart by default and not systemd
723 # so this sounds kind of harmless
724 def start_service (self,service): return self.start_stop_service (service,'start')
725 def stop_service (self,service): return self.start_stop_service (service,'stop')
727 def start_stop_service (self, service,start_or_stop):
728 "utility to start/stop a service with the special trick for f14"
729 if self.options.fcdistro != 'f14':
730 return self.run_in_guest ("service %s %s"%(service,start_or_stop))==0
732 # patch /sbin/service so it does not reset environment
733 self.run_in_guest ('sed -i -e \\"s,env -i,env,\\" /sbin/service')
734 # this is because our own scripts in turn call service
735 return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service %s %s"%(service,start_or_stop))==0
739 return self.start_service ('plc')
743 return self.stop_service ('plc')
745 def plcvm_start (self):
746 "start the PLC vserver"
750 def plcvm_stop (self):
751 "stop the PLC vserver"
755 # stores the keys from the config for further use
756 def keys_store(self):
757 "stores test users ssh keys in keys/"
758 for key_spec in self.plc_spec['keys']:
759 TestKey(self,key_spec).store_key()
762 def keys_clean(self):
763 "removes keys cached in keys/"
764 utils.system("rm -rf ./keys")
767 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
768 # for later direct access to the nodes
769 def keys_fetch(self):
770 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
772 if not os.path.isdir(dir):
774 vservername=self.vservername
775 vm_root=self.vm_root_in_host()
777 prefix = 'debug_ssh_key'
778 for ext in [ 'pub', 'rsa' ] :
779 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
780 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
781 if self.test_ssh.fetch(src,dst) != 0: overall=False
785 "create sites with PLCAPI"
786 return self.do_sites()
788 def delete_sites (self):
789 "delete sites with PLCAPI"
790 return self.do_sites(action="delete")
792 def do_sites (self,action="add"):
793 for site_spec in self.plc_spec['sites']:
794 test_site = TestSite (self,site_spec)
795 if (action != "add"):
796 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
797 test_site.delete_site()
798 # deleted with the site
799 #test_site.delete_users()
802 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
803 test_site.create_site()
804 test_site.create_users()
807 def delete_all_sites (self):
808 "Delete all sites in PLC, and related objects"
809 print 'auth_root',self.auth_root()
810 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
812 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
813 if site['login_base']==self.plc_spec['settings']['PLC_SLICE_PREFIX']: continue
814 site_id=site['site_id']
815 print 'Deleting site_id',site_id
816 self.apiserver.DeleteSite(self.auth_root(),site_id)
820 "create nodes with PLCAPI"
821 return self.do_nodes()
822 def delete_nodes (self):
823 "delete nodes with PLCAPI"
824 return self.do_nodes(action="delete")
826 def do_nodes (self,action="add"):
827 for site_spec in self.plc_spec['sites']:
828 test_site = TestSite (self,site_spec)
830 utils.header("Deleting nodes in site %s"%test_site.name())
831 for node_spec in site_spec['nodes']:
832 test_node=TestNode(self,test_site,node_spec)
833 utils.header("Deleting %s"%test_node.name())
834 test_node.delete_node()
836 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
837 for node_spec in site_spec['nodes']:
838 utils.pprint('Creating node %s'%node_spec,node_spec)
839 test_node = TestNode (self,test_site,node_spec)
840 test_node.create_node ()
843 def nodegroups (self):
844 "create nodegroups with PLCAPI"
845 return self.do_nodegroups("add")
846 def delete_nodegroups (self):
847 "delete nodegroups with PLCAPI"
848 return self.do_nodegroups("delete")
852 def translate_timestamp (start,grain,timestamp):
853 if timestamp < TestPlc.YEAR: return start+timestamp*grain
854 else: return timestamp
857 def timestamp_printable (timestamp):
858 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
861 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
863 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
864 print 'API answered grain=',grain
865 start=(now/grain)*grain
867 # find out all nodes that are reservable
868 nodes=self.all_reservable_nodenames()
870 utils.header ("No reservable node found - proceeding without leases")
873 # attach them to the leases as specified in plc_specs
874 # this is where the 'leases' field gets interpreted as relative of absolute
875 for lease_spec in self.plc_spec['leases']:
876 # skip the ones that come with a null slice id
877 if not lease_spec['slice']: continue
878 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
879 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
880 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
881 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
882 if lease_addition['errors']:
883 utils.header("Cannot create leases, %s"%lease_addition['errors'])
886 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
887 (nodes,lease_spec['slice'],
888 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
889 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
893 def delete_leases (self):
894 "remove all leases in the myplc side"
895 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
896 utils.header("Cleaning leases %r"%lease_ids)
897 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
900 def list_leases (self):
901 "list all leases known to the myplc"
902 leases = self.apiserver.GetLeases(self.auth_root())
905 current=l['t_until']>=now
906 if self.options.verbose or current:
907 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
908 TestPlc.timestamp_printable(l['t_from']),
909 TestPlc.timestamp_printable(l['t_until'])))
912 # create nodegroups if needed, and populate
913 def do_nodegroups (self, action="add"):
914 # 1st pass to scan contents
916 for site_spec in self.plc_spec['sites']:
917 test_site = TestSite (self,site_spec)
918 for node_spec in site_spec['nodes']:
919 test_node=TestNode (self,test_site,node_spec)
920 if node_spec.has_key('nodegroups'):
921 nodegroupnames=node_spec['nodegroups']
922 if isinstance(nodegroupnames,StringTypes):
923 nodegroupnames = [ nodegroupnames ]
924 for nodegroupname in nodegroupnames:
925 if not groups_dict.has_key(nodegroupname):
926 groups_dict[nodegroupname]=[]
927 groups_dict[nodegroupname].append(test_node.name())
928 auth=self.auth_root()
930 for (nodegroupname,group_nodes) in groups_dict.iteritems():
932 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
933 # first, check if the nodetagtype is here
934 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
936 tag_type_id = tag_types[0]['tag_type_id']
938 tag_type_id = self.apiserver.AddTagType(auth,
939 {'tagname':nodegroupname,
940 'description': 'for nodegroup %s'%nodegroupname,
942 print 'located tag (type)',nodegroupname,'as',tag_type_id
944 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
946 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
947 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
948 # set node tag on all nodes, value='yes'
949 for nodename in group_nodes:
951 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
953 traceback.print_exc()
954 print 'node',nodename,'seems to already have tag',nodegroupname
957 expect_yes = self.apiserver.GetNodeTags(auth,
958 {'hostname':nodename,
959 'tagname':nodegroupname},
960 ['value'])[0]['value']
961 if expect_yes != "yes":
962 print 'Mismatch node tag on node',nodename,'got',expect_yes
965 if not self.options.dry_run:
966 print 'Cannot find tag',nodegroupname,'on node',nodename
970 print 'cleaning nodegroup',nodegroupname
971 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
973 traceback.print_exc()
977 # a list of TestNode objs
978 def all_nodes (self):
980 for site_spec in self.plc_spec['sites']:
981 test_site = TestSite (self,site_spec)
982 for node_spec in site_spec['nodes']:
983 nodes.append(TestNode (self,test_site,node_spec))
986 # return a list of tuples (nodename,qemuname)
987 def all_node_infos (self) :
989 for site_spec in self.plc_spec['sites']:
990 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
991 for node_spec in site_spec['nodes'] ]
994 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
995 def all_reservable_nodenames (self):
997 for site_spec in self.plc_spec['sites']:
998 for node_spec in site_spec['nodes']:
999 node_fields=node_spec['node_fields']
1000 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
1001 res.append(node_fields['hostname'])
1004 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1005 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
1006 if self.options.dry_run:
1010 class CompleterTaskBootState (CompleterTask):
1011 def __init__ (self, test_plc,hostname):
1012 self.test_plc=test_plc
1013 self.hostname=hostname
1014 self.last_boot_state='undef'
1015 def actual_run (self):
1017 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
1019 self.last_boot_state = node['boot_state']
1020 return self.last_boot_state == target_boot_state
1024 return "CompleterTaskBootState with node %s"%self.hostname
1025 def failure_epilogue (self):
1026 print "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
1028 timeout = timedelta(minutes=timeout_minutes)
1029 graceout = timedelta(minutes=silent_minutes)
1030 period = timedelta(seconds=period_seconds)
1031 # the nodes that haven't checked yet - start with a full list and shrink over time
1032 utils.header("checking nodes boot state (expected %s)"%target_boot_state)
1033 tasks = [ CompleterTaskBootState (self,hostname) \
1034 for (hostname,_) in self.all_node_infos() ]
1035 return Completer (tasks).run (timeout, graceout, period)
1037 def nodes_booted(self):
1038 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
1040 def probe_kvm_iptables (self):
1041 (_,kvmbox) = self.all_node_infos()[0]
1042 TestSsh(kvmbox).run("iptables-save")
1046 def check_nodes_ping(self,timeout_seconds=120,period_seconds=10):
1047 class CompleterTaskPingNode (CompleterTask):
1048 def __init__ (self, hostname):
1049 self.hostname=hostname
1050 def run(self,silent):
1051 command="ping -c 1 -w 1 %s >& /dev/null"%self.hostname
1052 return utils.system (command, silent=silent)==0
1053 def failure_epilogue (self):
1054 print "Cannot ping node with name %s"%self.hostname
1055 timeout=timedelta (seconds=timeout_seconds)
1057 period=timedelta (seconds=period_seconds)
1058 node_infos = self.all_node_infos()
1059 tasks = [ CompleterTaskPingNode (h) for (h,_) in node_infos ]
1060 return Completer (tasks).run (timeout, graceout, period)
1062 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1063 def ping_node (self):
1065 return self.check_nodes_ping ()
1067 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
1069 timeout = timedelta(minutes=timeout_minutes)
1070 graceout = timedelta(minutes=silent_minutes)
1071 period = timedelta(seconds=period_seconds)
1072 vservername=self.vservername
1075 local_key = "keys/%(vservername)s-debug.rsa"%locals()
1078 local_key = "keys/key_admin.rsa"
1079 utils.header("checking ssh access to nodes (expected in %s mode)"%message)
1080 node_infos = self.all_node_infos()
1081 tasks = [ CompleterTaskNodeSsh (nodename, qemuname, local_key, boot_state=message) \
1082 for (nodename,qemuname) in node_infos ]
1083 return Completer (tasks).run (timeout, graceout, period)
1085 def ssh_node_debug(self):
1086 "Tries to ssh into nodes in debug mode with the debug ssh key"
1087 return self.check_nodes_ssh(debug=True,
1088 timeout_minutes=self.ssh_node_debug_timeout,
1089 silent_minutes=self.ssh_node_debug_silent)
1091 def ssh_node_boot(self):
1092 "Tries to ssh into nodes in production mode with the root ssh key"
1093 return self.check_nodes_ssh(debug=False,
1094 timeout_minutes=self.ssh_node_boot_timeout,
1095 silent_minutes=self.ssh_node_boot_silent)
1097 def node_bmlogs(self):
1098 "Checks that there's a non-empty dir. /var/log/bm/raw"
1099 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
1102 def qemu_local_init (self): pass
1104 def bootcd (self): pass
1106 def qemu_local_config (self): pass
1108 def nodestate_reinstall (self): pass
1110 def nodestate_safeboot (self): pass
1112 def nodestate_boot (self): pass
1114 def nodestate_show (self): pass
1116 def qemu_export (self): pass
1118 ### check hooks : invoke scripts from hooks/{node,slice}
1119 def check_hooks_node (self):
1120 return self.locate_first_node().check_hooks()
1121 def check_hooks_sliver (self) :
1122 return self.locate_first_sliver().check_hooks()
1124 def check_hooks (self):
1125 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1126 return self.check_hooks_node() and self.check_hooks_sliver()
1129 def do_check_initscripts(self):
1130 class CompleterTaskInitscript (CompleterTask):
1131 def __init__ (self, test_sliver, stamp):
1132 self.test_sliver=test_sliver
1134 def actual_run (self):
1135 return self.test_sliver.check_initscript_stamp (self.stamp)
1137 return "initscript checker for %s"%self.test_sliver.name()
1138 def failure_epilogue (self):
1139 print "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
1142 for slice_spec in self.plc_spec['slices']:
1143 if not slice_spec.has_key('initscriptstamp'):
1145 stamp=slice_spec['initscriptstamp']
1146 slicename=slice_spec['slice_fields']['name']
1147 for nodename in slice_spec['nodenames']:
1148 print 'nodename',nodename,'slicename',slicename,'stamp',stamp
1149 (site,node) = self.locate_node (nodename)
1150 # xxx - passing the wrong site - probably harmless
1151 test_site = TestSite (self,site)
1152 test_slice = TestSlice (self,test_site,slice_spec)
1153 test_node = TestNode (self,test_site,node)
1154 test_sliver = TestSliver (self, test_node, test_slice)
1155 tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
1156 return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1158 def check_initscripts(self):
1159 "check that the initscripts have triggered"
1160 return self.do_check_initscripts()
1162 def initscripts (self):
1163 "create initscripts with PLCAPI"
1164 for initscript in self.plc_spec['initscripts']:
1165 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1166 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1169 def delete_initscripts (self):
1170 "delete initscripts with PLCAPI"
1171 for initscript in self.plc_spec['initscripts']:
1172 initscript_name = initscript['initscript_fields']['name']
1173 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1175 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1176 print initscript_name,'deleted'
1178 print 'deletion went wrong - probably did not exist'
1183 "create slices with PLCAPI"
1184 return self.do_slices(action="add")
1186 def delete_slices (self):
1187 "delete slices with PLCAPI"
1188 return self.do_slices(action="delete")
1190 def fill_slices (self):
1191 "add nodes in slices with PLCAPI"
1192 return self.do_slices(action="fill")
1194 def empty_slices (self):
1195 "remove nodes from slices with PLCAPI"
1196 return self.do_slices(action="empty")
1198 def do_slices (self, action="add"):
1199 for slice in self.plc_spec['slices']:
1200 site_spec = self.locate_site (slice['sitename'])
1201 test_site = TestSite(self,site_spec)
1202 test_slice=TestSlice(self,test_site,slice)
1203 if action == "delete":
1204 test_slice.delete_slice()
1205 elif action=="fill":
1206 test_slice.add_nodes()
1207 elif action=="empty":
1208 test_slice.delete_nodes()
1210 test_slice.create_slice()
1213 @slice_mapper__tasks(20,10,15)
1214 def ssh_slice(self): pass
1215 @slice_mapper__tasks(20,19,15)
1216 def ssh_slice_off (self): pass
1217 @slice_mapper__tasks(1,1,15)
1218 def slice_fs_present(self): pass
1219 @slice_mapper__tasks(1,1,15)
1220 def slice_fs_deleted(self): pass
1222 # use another name so we can exclude/ignore it from the tests on the nightly command line
1223 def ssh_slice_again(self): return self.ssh_slice()
1224 # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1225 # but for some reason the ignore-wrapping thing would not
1228 def ssh_slice_basics(self): pass
1230 def check_vsys_defaults(self): pass
1233 def keys_clear_known_hosts (self): pass
1235 def plcapi_urls (self):
1236 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1238 def speed_up_slices (self):
1239 "tweak nodemanager cycle (wait time) to 30+/-10 s"
1240 return self._speed_up_slices (30,10)
1241 def super_speed_up_slices (self):
1242 "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
1243 return self._speed_up_slices (5,1)
1245 def _speed_up_slices (self, p, r):
1246 # create the template on the server-side
1247 template="%s.nodemanager"%self.name()
1248 template_file = open (template,"w")
1249 template_file.write('OPTIONS="-p %s -r %s -d"\n'%(p,r))
1250 template_file.close()
1251 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1252 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1253 self.test_ssh.copy_abs(template,remote)
1255 if not self.apiserver.GetConfFiles (self.auth_root(),
1256 {'dest':'/etc/sysconfig/nodemanager'}):
1257 self.apiserver.AddConfFile (self.auth_root(),
1258 {'dest':'/etc/sysconfig/nodemanager',
1259 'source':'PlanetLabConf/nodemanager',
1260 'postinstall_cmd':'service nm restart',})
1263 def debug_nodemanager (self):
1264 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1265 template="%s.nodemanager"%self.name()
1266 template_file = open (template,"w")
1267 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1268 template_file.close()
1269 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1270 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1271 self.test_ssh.copy_abs(template,remote)
1275 def qemu_start (self) : pass
1278 def qemu_timestamp (self) : pass
1280 # when a spec refers to a node possibly on another plc
1281 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1282 for plc in [ self ] + other_plcs:
1284 return plc.locate_sliver_obj (nodename, slicename)
1287 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1289 # implement this one as a cross step so that we can take advantage of different nodes
1290 # in multi-plcs mode
1291 def cross_check_tcp (self, other_plcs):
1292 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1293 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1294 utils.header ("check_tcp: no/empty config found")
1296 specs = self.plc_spec['tcp_specs']
1301 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1302 if not s_test_sliver.run_tcp_server(port,timeout=20):
1306 # idem for the client side
1307 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1308 # use nodename from locatesd sliver, unless 'client_connect' is set
1309 if 'client_connect' in spec:
1310 destination = spec['client_connect']
1312 destination=s_test_sliver.test_node.name()
1313 if not c_test_sliver.run_tcp_client(destination,port):
1317 # painfully enough, we need to allow for some time as netflow might show up last
1318 def check_system_slice (self):
1319 "all nodes: check that a system slice is alive"
1320 # netflow currently not working in the lxc distro
1321 # drl not built at all in the wtx distro
1322 # if we find either of them we're happy
1323 return self.check_netflow() or self.check_drl()
1326 def check_netflow (self): return self._check_system_slice ('netflow')
1327 def check_drl (self): return self._check_system_slice ('drl')
1329 # we have the slices up already here, so it should not take too long
1330 def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
1331 class CompleterTaskSystemSlice (CompleterTask):
1332 def __init__ (self, test_node, dry_run):
1333 self.test_node=test_node
1334 self.dry_run=dry_run
1335 def actual_run (self):
1336 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
1338 return "System slice %s @ %s"%(slicename, self.test_node.name())
1339 def failure_epilogue (self):
1340 print "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
1341 timeout = timedelta(minutes=timeout_minutes)
1342 silent = timedelta (0)
1343 period = timedelta (seconds=period_seconds)
1344 tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
1345 for test_node in self.all_nodes() ]
1346 return Completer (tasks) . run (timeout, silent, period)
1348 def plcsh_stress_test (self):
1349 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1350 # install the stress-test in the plc image
1351 location = "/usr/share/plc_api/plcsh_stress_test.py"
1352 remote="%s/%s"%(self.vm_root_in_host(),location)
1353 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1355 command += " -- --check"
1356 if self.options.size == 1:
1357 command += " --tiny"
1358 return ( self.run_in_guest(command) == 0)
1360 # populate runs the same utility without slightly different options
1361 # in particular runs with --preserve (dont cleanup) and without --check
1362 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1364 def sfa_install_all (self):
1365 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1366 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1368 def sfa_install_core(self):
1370 return self.yum_install ("sfa")
1372 def sfa_install_plc(self):
1373 "yum install sfa-plc"
1374 return self.yum_install("sfa-plc")
1376 def sfa_install_sfatables(self):
1377 "yum install sfa-sfatables"
1378 return self.yum_install ("sfa-sfatables")
1380 # for some very odd reason, this sometimes fails with the following symptom
1381 # # yum install sfa-client
1382 # Setting up Install Process
1384 # Downloading Packages:
1385 # Running rpm_check_debug
1386 # Running Transaction Test
1387 # Transaction Test Succeeded
1388 # Running Transaction
1389 # Transaction couldn't start:
1390 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1391 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1392 # even though in the same context I have
1393 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1394 # Filesystem Size Used Avail Use% Mounted on
1395 # /dev/hdv1 806G 264G 501G 35% /
1396 # none 16M 36K 16M 1% /tmp
1398 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1399 def sfa_install_client(self):
1400 "yum install sfa-client"
1401 first_try=self.yum_install("sfa-client")
1402 if first_try: return True
1403 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1404 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1405 utils.header("rpm_path=<<%s>>"%rpm_path)
1407 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1408 return self.yum_check_installed ("sfa-client")
1410 def sfa_dbclean(self):
1411 "thoroughly wipes off the SFA database"
1412 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1413 self.run_in_guest("sfa-nuke.py")==0 or \
1414 self.run_in_guest("sfa-nuke-plc.py")==0
1416 def sfa_fsclean(self):
1417 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1418 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1421 def sfa_plcclean(self):
1422 "cleans the PLC entries that were created as a side effect of running the script"
1424 sfa_spec=self.plc_spec['sfa']
1426 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1427 login_base=auth_sfa_spec['login_base']
1428 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1429 except: print "Site %s already absent from PLC db"%login_base
1431 for spec_name in ['pi_spec','user_spec']:
1432 user_spec=auth_sfa_spec[spec_name]
1433 username=user_spec['email']
1434 try: self.apiserver.DeletePerson(self.auth_root(),username)
1436 # this in fact is expected as sites delete their members
1437 #print "User %s already absent from PLC db"%username
1440 print "REMEMBER TO RUN sfa_import AGAIN"
1443 def sfa_uninstall(self):
1444 "uses rpm to uninstall sfa - ignore result"
1445 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1446 self.run_in_guest("rm -rf /var/lib/sfa")
1447 self.run_in_guest("rm -rf /etc/sfa")
1448 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1450 self.run_in_guest("rpm -e --noscripts sfa-plc")
1453 ### run unit tests for SFA
1454 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1455 # Running Transaction
1456 # Transaction couldn't start:
1457 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1458 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1459 # no matter how many Gbs are available on the testplc
1460 # could not figure out what's wrong, so...
1461 # if the yum install phase fails, consider the test is successful
1462 # other combinations will eventually run it hopefully
1463 def sfa_utest(self):
1464 "yum install sfa-tests and run SFA unittests"
1465 self.run_in_guest("yum -y install sfa-tests")
1466 # failed to install - forget it
1467 if self.run_in_guest("rpm -q sfa-tests")!=0:
1468 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1470 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1474 dirname="conf.%s"%self.plc_spec['name']
1475 if not os.path.isdir(dirname):
1476 utils.system("mkdir -p %s"%dirname)
1477 if not os.path.isdir(dirname):
1478 raise Exception,"Cannot create config dir for plc %s"%self.name()
1481 def conffile(self,filename):
1482 return "%s/%s"%(self.confdir(),filename)
1483 def confsubdir(self,dirname,clean,dry_run=False):
1484 subdirname="%s/%s"%(self.confdir(),dirname)
1486 utils.system("rm -rf %s"%subdirname)
1487 if not os.path.isdir(subdirname):
1488 utils.system("mkdir -p %s"%subdirname)
1489 if not dry_run and not os.path.isdir(subdirname):
1490 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1493 def conffile_clean (self,filename):
1494 filename=self.conffile(filename)
1495 return utils.system("rm -rf %s"%filename)==0
1498 def sfa_configure(self):
1499 "run sfa-config-tty"
1500 tmpname=self.conffile("sfa-config-tty")
1501 fileconf=open(tmpname,'w')
1502 for (var,value) in self.plc_spec['sfa']['settings'].iteritems():
1503 fileconf.write ('e %s\n%s\n'%(var,value))
1504 # # the way plc_config handles booleans just sucks..
1507 # if self.plc_spec['sfa'][var]: val='true'
1508 # fileconf.write ('e %s\n%s\n'%(var,val))
1509 fileconf.write('w\n')
1510 fileconf.write('R\n')
1511 fileconf.write('q\n')
1513 utils.system('cat %s'%tmpname)
1514 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1517 def aggregate_xml_line(self):
1518 port=self.plc_spec['sfa']['neighbours-port']
1519 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1520 (self.vserverip,self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'],port)
1522 def registry_xml_line(self):
1523 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1524 (self.vserverip,self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1527 # a cross step that takes all other plcs in argument
1528 def cross_sfa_configure(self, other_plcs):
1529 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1530 # of course with a single plc, other_plcs is an empty list
1533 agg_fname=self.conffile("agg.xml")
1534 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1535 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1536 utils.header ("(Over)wrote %s"%agg_fname)
1537 reg_fname=self.conffile("reg.xml")
1538 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1539 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1540 utils.header ("(Over)wrote %s"%reg_fname)
1541 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1542 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1544 def sfa_import(self):
1545 "use sfaadmin to import from plc"
1546 auth=self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1547 return self.run_in_guest('sfaadmin reg import_registry')==0
1549 def sfa_start(self):
1551 return self.start_service('sfa')
1554 def sfi_configure(self):
1555 "Create /root/sfi on the plc side for sfi client configuration"
1556 if self.options.dry_run:
1557 utils.header("DRY RUN - skipping step")
1559 sfa_spec=self.plc_spec['sfa']
1560 # cannot use auth_sfa_mapper to pass dir_name
1561 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1562 test_slice=TestAuthSfa(self,slice_spec)
1563 dir_basename=os.path.basename(test_slice.sfi_path())
1564 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1565 test_slice.sfi_configure(dir_name)
1566 # push into the remote /root/sfi area
1567 location = test_slice.sfi_path()
1568 remote="%s/%s"%(self.vm_root_in_host(),location)
1569 self.test_ssh.mkdir(remote,abs=True)
1570 # need to strip last level or remote otherwise we get an extra dir level
1571 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1575 def sfi_clean (self):
1576 "clean up /root/sfi on the plc side"
1577 self.run_in_guest("rm -rf /root/sfi")
1581 def sfa_register_site (self): pass
1583 def sfa_register_pi (self): pass
1585 def sfa_register_user(self): pass
1587 def sfa_update_user(self): pass
1589 def sfa_register_slice(self): pass
1591 def sfa_renew_slice(self): pass
1593 def sfa_get_expires(self): pass
1595 def sfa_discover(self): pass
1597 def sfa_create_slice(self): pass
1599 def sfa_check_slice_plc(self): pass
1601 def sfa_update_slice(self): pass
1603 def sfa_remove_user_from_slice(self): pass
1605 def sfa_insert_user_in_slice(self): pass
1607 def sfi_list(self): pass
1609 def sfi_show_site(self): pass
1611 def sfi_show_slice(self): pass
1613 def sfi_show_slice_researchers(self): pass
1615 def ssh_slice_sfa(self): pass
1617 def sfa_delete_user(self): pass
1619 def sfa_delete_slice(self): pass
1623 return self.stop_service ('sfa')
1625 def populate (self):
1626 "creates random entries in the PLCAPI"
1627 # install the stress-test in the plc image
1628 location = "/usr/share/plc_api/plcsh_stress_test.py"
1629 remote="%s/%s"%(self.vm_root_in_host(),location)
1630 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1632 command += " -- --preserve --short-names"
1633 local = (self.run_in_guest(command) == 0);
1634 # second run with --foreign
1635 command += ' --foreign'
1636 remote = (self.run_in_guest(command) == 0);
1637 return ( local and remote)
1639 def gather_logs (self):
1640 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1641 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1642 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1643 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1644 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1645 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1646 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1648 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1649 self.gather_var_logs ()
1651 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1652 self.gather_pgsql_logs ()
1654 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1655 self.gather_root_sfi ()
1657 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1658 for site_spec in self.plc_spec['sites']:
1659 test_site = TestSite (self,site_spec)
1660 for node_spec in site_spec['nodes']:
1661 test_node=TestNode(self,test_site,node_spec)
1662 test_node.gather_qemu_logs()
1664 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1665 self.gather_nodes_var_logs()
1667 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1668 self.gather_slivers_var_logs()
1671 def gather_slivers_var_logs(self):
1672 for test_sliver in self.all_sliver_objs():
1673 remote = test_sliver.tar_var_logs()
1674 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1675 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1676 utils.system(command)
1679 def gather_var_logs (self):
1680 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1681 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1682 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1683 utils.system(command)
1684 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1685 utils.system(command)
1687 def gather_pgsql_logs (self):
1688 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1689 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1690 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1691 utils.system(command)
1693 def gather_root_sfi (self):
1694 utils.system("mkdir -p logs/sfi.%s"%self.name())
1695 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1696 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1697 utils.system(command)
1699 def gather_nodes_var_logs (self):
1700 for site_spec in self.plc_spec['sites']:
1701 test_site = TestSite (self,site_spec)
1702 for node_spec in site_spec['nodes']:
1703 test_node=TestNode(self,test_site,node_spec)
1704 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1705 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1706 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1707 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1708 utils.system(command)
1711 # returns the filename to use for sql dump/restore, using options.dbname if set
1712 def dbfile (self, database):
1713 # uses options.dbname if it is found
1715 name=self.options.dbname
1716 if not isinstance(name,StringTypes):
1722 return "/root/%s-%s.sql"%(database,name)
1724 def plc_db_dump(self):
1725 'dump the planetlab5 DB in /root in the PLC - filename has time'
1726 dump=self.dbfile("planetab5")
1727 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1728 utils.header('Dumped planetlab5 database in %s'%dump)
1731 def plc_db_restore(self):
1732 'restore the planetlab5 DB - looks broken, but run -n might help'
1733 dump=self.dbfile("planetab5")
1734 ##stop httpd service
1735 self.run_in_guest('service httpd stop')
1736 # xxx - need another wrapper
1737 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1738 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1739 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1740 ##starting httpd service
1741 self.run_in_guest('service httpd start')
1743 utils.header('Database restored from ' + dump)
1746 def create_ignore_steps ():
1747 for step in TestPlc.default_steps + TestPlc.other_steps:
1748 # default step can have a plc qualifier
1749 if '@' in step: (step,qualifier)=step.split('@')
1750 # or be defined as forced or ignored by default
1751 for keyword in ['_ignore','_force']:
1752 if step.endswith (keyword): step=step.replace(keyword,'')
1753 if step == SEP or step == SEPSFA : continue
1754 method=getattr(TestPlc,step)
1756 wrapped=ignore_result(method)
1757 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1758 setattr(TestPlc, name, wrapped)
1761 # def ssh_slice_again_ignore (self): pass
1763 # def check_initscripts_ignore (self): pass
1765 def standby_1_through_20(self):
1766 """convenience function to wait for a specified number of minutes"""
1769 def standby_1(): pass
1771 def standby_2(): pass
1773 def standby_3(): pass
1775 def standby_4(): pass
1777 def standby_5(): pass
1779 def standby_6(): pass
1781 def standby_7(): pass
1783 def standby_8(): pass
1785 def standby_9(): pass
1787 def standby_10(): pass
1789 def standby_11(): pass
1791 def standby_12(): pass
1793 def standby_13(): pass
1795 def standby_14(): pass
1797 def standby_15(): pass
1799 def standby_16(): pass
1801 def standby_17(): pass
1803 def standby_18(): pass
1805 def standby_19(): pass
1807 def standby_20(): pass
1809 # convenience for debugging the test logic
1810 def yes (self): return True
1811 def no (self): return False