1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
10 from types import StringTypes
13 from Completer import Completer, CompleterTask
14 from TestSite import TestSite
15 from TestNode import TestNode, CompleterTaskNodeSsh
16 from TestUser import TestUser
17 from TestKey import TestKey
18 from TestSlice import TestSlice
19 from TestSliver import TestSliver
20 from TestBoxQemu import TestBoxQemu
21 from TestSsh import TestSsh
22 from TestApiserver import TestApiserver
23 from TestAuthSfa import TestAuthSfa
24 from PlcapiUrlScanner import PlcapiUrlScanner
26 has_sfa_cache_filename="sfa-cache"
28 # step methods must take (self) and return a boolean (options is a member of the class)
30 def standby(minutes,dry_run):
31 utils.header('Entering StandBy for %d mn'%minutes)
35 time.sleep(60*minutes)
38 def standby_generic (func):
40 minutes=int(func.__name__.split("_")[1])
41 return standby(minutes,self.options.dry_run)
44 def node_mapper (method):
45 def map_on_nodes(self,*args, **kwds):
47 node_method = TestNode.__dict__[method.__name__]
48 for test_node in self.all_nodes():
49 if not node_method(test_node, *args, **kwds): overall=False
51 # maintain __name__ for ignore_result
52 map_on_nodes.__name__=method.__name__
53 # restore the doc text
54 map_on_nodes.__doc__=TestNode.__dict__[method.__name__].__doc__
57 def slice_mapper (method):
58 def map_on_slices(self):
60 slice_method = TestSlice.__dict__[method.__name__]
61 for slice_spec in self.plc_spec['slices']:
62 site_spec = self.locate_site (slice_spec['sitename'])
63 test_site = TestSite(self,site_spec)
64 test_slice=TestSlice(self,test_site,slice_spec)
65 if not slice_method(test_slice,self.options): overall=False
67 # maintain __name__ for ignore_result
68 map_on_slices.__name__=method.__name__
69 # restore the doc text
70 map_on_slices.__doc__=TestSlice.__dict__[method.__name__].__doc__
73 # run a step but return True so that we can go on
74 def ignore_result (method):
76 # ssh_slice_ignore->ssh_slice
77 ref_name=method.__name__.replace('_ignore','').replace('force_','')
78 ref_method=TestPlc.__dict__[ref_name]
79 result=ref_method(self)
80 print "Actual (but ignored) result for %(ref_name)s is %(result)s"%locals()
81 return Ignored (result)
82 name=method.__name__.replace('_ignore','').replace('force_','')
83 ignoring.__name__=name
84 ignoring.__doc__="ignored version of " + name
87 # a variant that expects the TestSlice method to return a list of CompleterTasks that
88 # are then merged into a single Completer run to avoid wating for all the slices
89 # esp. useful when a test fails of course
90 # because we need to pass arguments we use a class instead..
91 class slice_mapper__tasks (object):
92 # could not get this to work with named arguments
93 def __init__ (self,timeout_minutes,silent_minutes,period_seconds):
94 self.timeout=timedelta(minutes=timeout_minutes)
95 self.silent=timedelta(minutes=silent_minutes)
96 self.period=timedelta(seconds=period_seconds)
97 def __call__ (self, method):
99 # compute augmented method name
100 method_name = method.__name__ + "__tasks"
101 # locate in TestSlice
102 slice_method = TestSlice.__dict__[ method_name ]
105 for slice_spec in self.plc_spec['slices']:
106 site_spec = self.locate_site (slice_spec['sitename'])
107 test_site = TestSite(self,site_spec)
108 test_slice=TestSlice(self,test_site,slice_spec)
109 tasks += slice_method (test_slice, self.options)
110 return Completer (tasks).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
111 # restore the doc text from the TestSlice method even if a bit odd
112 wrappee.__name__ = method.__name__
113 wrappee.__doc__ = slice_method.__doc__
116 def auth_sfa_mapper (method):
119 auth_method = TestAuthSfa.__dict__[method.__name__]
120 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
121 test_auth=TestAuthSfa(self,auth_spec)
122 if not auth_method(test_auth,self.options): overall=False
124 # restore the doc text
125 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
129 def __init__ (self,result):
139 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
140 'plc_install', 'plc_configure', 'plc_start', SEP,
141 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
142 'plcapi_urls','speed_up_slices', SEP,
143 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
144 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
145 # keep this our of the way for now
146 'check_vsys_defaults_ignore', SEP,
147 # run this first off so it's easier to re-run on another qemu box
148 'qemu_kill_mine', SEP,
149 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
150 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP,
151 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
152 'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
153 'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
154 'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
155 'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
156 'sfa_discover@1', 'sfa_rspec@1', 'sfa_allocate@1', 'sfa_provision@1', SEPSFA,
157 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
158 'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
159 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
160 # but as the stress test might take a while, we sometimes missed the debug mode..
161 'probe_kvm_iptables',
162 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
163 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts_ignore', SEP,
164 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
165 'cross_check_tcp@1', 'check_system_slice', SEP,
166 # check slices are turned off properly
167 'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
168 # check they are properly re-created with the same name
169 'fill_slices', 'ssh_slice_again', SEP,
170 'gather_logs_force', SEP,
173 'export', 'show_boxes', 'super_speed_up_slices', SEP,
174 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
175 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
176 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
177 'delete_leases', 'list_leases', SEP,
179 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
180 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
181 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
182 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
183 'sfa_get_expires', SEPSFA,
184 'plc_db_dump' , 'plc_db_restore', SEP,
185 'check_netflow','check_drl', SEP,
186 'debug_nodemanager', 'slice_fs_present', SEP,
187 'standby_1_through_20','yes','no',SEP,
191 def printable_steps (list):
192 single_line=" ".join(list)+" "
193 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
195 def valid_step (step):
196 return step != SEP and step != SEPSFA
198 # turn off the sfa-related steps when build has skipped SFA
199 # this was originally for centos5 but is still valid
200 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
202 def _has_sfa_cached (rpms_url):
203 if os.path.isfile(has_sfa_cache_filename):
204 cached=file(has_sfa_cache_filename).read()=="yes"
205 utils.header("build provides SFA (cached):%s"%cached)
207 # warning, we're now building 'sface' so let's be a bit more picky
208 # full builds are expected to return with 0 here
209 utils.header ("Checking if build provides SFA package...")
210 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)==0
211 encoded='yes' if retcod else 'no'
212 file(has_sfa_cache_filename,'w').write(encoded)
216 def check_whether_build_has_sfa (rpms_url):
217 has_sfa=TestPlc._has_sfa_cached(rpms_url)
219 utils.header("build does provide SFA")
221 # move all steps containing 'sfa' from default_steps to other_steps
222 utils.header("SFA package not found - removing steps with sfa or sfi")
223 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
224 TestPlc.other_steps += sfa_steps
225 for step in sfa_steps: TestPlc.default_steps.remove(step)
227 def __init__ (self,plc_spec,options):
228 self.plc_spec=plc_spec
230 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
231 self.vserverip=plc_spec['vserverip']
232 self.vservername=plc_spec['vservername']
233 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
234 self.apiserver=TestApiserver(self.url,options.dry_run)
235 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
236 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
238 def has_addresses_api (self):
239 return self.apiserver.has_method('AddIpAddress')
242 name=self.plc_spec['name']
243 return "%s.%s"%(name,self.vservername)
246 return self.plc_spec['host_box']
249 return self.test_ssh.is_local()
251 # define the API methods on this object through xmlrpc
252 # would help, but not strictly necessary
256 def actual_command_in_guest (self,command, backslash=False):
257 raw1=self.host_to_guest(command)
258 raw2=self.test_ssh.actual_command(raw1,dry_run=self.options.dry_run, backslash=backslash)
261 def start_guest (self):
262 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),dry_run=self.options.dry_run))
264 def stop_guest (self):
265 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),dry_run=self.options.dry_run))
267 def run_in_guest (self,command,backslash=False):
268 raw=self.actual_command_in_guest(command,backslash)
269 return utils.system(raw)
271 def run_in_host (self,command):
272 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
274 # backslashing turned out so awful at some point that I've turned off auto-backslashing
275 # see e.g. plc_start esp. the version for f14
276 #command gets run in the plc's vm
277 def host_to_guest(self,command):
278 vservername=self.vservername
279 personality=self.options.personality
280 raw="%(personality)s virsh -c lxc:/// lxc-enter-namespace %(vservername)s"%locals()
281 # f14 still needs some extra help
282 if self.options.fcdistro == 'f14':
283 raw +=" -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %(command)s" %locals()
285 raw +=" -- /usr/bin/env %(command)s"%locals()
288 # this /vservers thing is legacy...
289 def vm_root_in_host(self):
290 return "/vservers/%s/"%(self.vservername)
292 def vm_timestamp_path (self):
293 return "/vservers/%s/%s.timestamp"%(self.vservername,self.vservername)
295 #start/stop the vserver
296 def start_guest_in_host(self):
297 return "virsh -c lxc:/// start %s"%(self.vservername)
299 def stop_guest_in_host(self):
300 return "virsh -c lxc:/// destroy %s"%(self.vservername)
303 def run_in_guest_piped (self,local,remote):
304 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
306 def yum_check_installed (self, rpms):
307 if isinstance (rpms, list):
309 return self.run_in_guest("rpm -q %s"%rpms)==0
311 # does a yum install in the vs, ignore yum retcod, check with rpm
312 def yum_install (self, rpms):
313 if isinstance (rpms, list):
315 self.run_in_guest("yum -y install %s"%rpms)
316 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
317 self.run_in_guest("yum-complete-transaction -y")
318 return self.yum_check_installed (rpms)
320 def auth_root (self):
321 return {'Username':self.plc_spec['settings']['PLC_ROOT_USER'],
322 'AuthMethod':'password',
323 'AuthString':self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
324 'Role' : self.plc_spec['role']
326 def locate_site (self,sitename):
327 for site in self.plc_spec['sites']:
328 if site['site_fields']['name'] == sitename:
330 if site['site_fields']['login_base'] == sitename:
332 raise Exception,"Cannot locate site %s"%sitename
334 def locate_node (self,nodename):
335 for site in self.plc_spec['sites']:
336 for node in site['nodes']:
337 if node['name'] == nodename:
339 raise Exception,"Cannot locate node %s"%nodename
341 def locate_hostname (self,hostname):
342 for site in self.plc_spec['sites']:
343 for node in site['nodes']:
344 if node['node_fields']['hostname'] == hostname:
346 raise Exception,"Cannot locate hostname %s"%hostname
348 def locate_key (self,key_name):
349 for key in self.plc_spec['keys']:
350 if key['key_name'] == key_name:
352 raise Exception,"Cannot locate key %s"%key_name
354 def locate_private_key_from_key_names (self, key_names):
355 # locate the first avail. key
357 for key_name in key_names:
358 key_spec=self.locate_key(key_name)
359 test_key=TestKey(self,key_spec)
360 publickey=test_key.publicpath()
361 privatekey=test_key.privatepath()
362 if os.path.isfile(publickey) and os.path.isfile(privatekey):
364 if found: return privatekey
367 def locate_slice (self, slicename):
368 for slice in self.plc_spec['slices']:
369 if slice['slice_fields']['name'] == slicename:
371 raise Exception,"Cannot locate slice %s"%slicename
373 def all_sliver_objs (self):
375 for slice_spec in self.plc_spec['slices']:
376 slicename = slice_spec['slice_fields']['name']
377 for nodename in slice_spec['nodenames']:
378 result.append(self.locate_sliver_obj (nodename,slicename))
381 def locate_sliver_obj (self,nodename,slicename):
382 (site,node) = self.locate_node(nodename)
383 slice = self.locate_slice (slicename)
385 test_site = TestSite (self, site)
386 test_node = TestNode (self, test_site,node)
387 # xxx the slice site is assumed to be the node site - mhh - probably harmless
388 test_slice = TestSlice (self, test_site, slice)
389 return TestSliver (self, test_node, test_slice)
391 def locate_first_node(self):
392 nodename=self.plc_spec['slices'][0]['nodenames'][0]
393 (site,node) = self.locate_node(nodename)
394 test_site = TestSite (self, site)
395 test_node = TestNode (self, test_site,node)
398 def locate_first_sliver (self):
399 slice_spec=self.plc_spec['slices'][0]
400 slicename=slice_spec['slice_fields']['name']
401 nodename=slice_spec['nodenames'][0]
402 return self.locate_sliver_obj(nodename,slicename)
404 # all different hostboxes used in this plc
405 def get_BoxNodes(self):
406 # maps on sites and nodes, return [ (host_box,test_node) ]
408 for site_spec in self.plc_spec['sites']:
409 test_site = TestSite (self,site_spec)
410 for node_spec in site_spec['nodes']:
411 test_node = TestNode (self, test_site, node_spec)
412 if not test_node.is_real():
413 tuples.append( (test_node.host_box(),test_node) )
414 # transform into a dict { 'host_box' -> [ test_node .. ] }
416 for (box,node) in tuples:
417 if not result.has_key(box):
420 result[box].append(node)
423 # a step for checking this stuff
424 def show_boxes (self):
425 'print summary of nodes location'
426 for (box,nodes) in self.get_BoxNodes().iteritems():
427 print box,":"," + ".join( [ node.name() for node in nodes ] )
430 # make this a valid step
431 def qemu_kill_all(self):
432 'kill all qemu instances on the qemu boxes involved by this setup'
433 # this is the brute force version, kill all qemus on that host box
434 for (box,nodes) in self.get_BoxNodes().iteritems():
435 # pass the first nodename, as we don't push template-qemu on testboxes
436 nodedir=nodes[0].nodedir()
437 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
440 # make this a valid step
441 def qemu_list_all(self):
442 'list all qemu instances on the qemu boxes involved by this setup'
443 for (box,nodes) in self.get_BoxNodes().iteritems():
444 # this is the brute force version, kill all qemus on that host box
445 TestBoxQemu(box,self.options.buildname).qemu_list_all()
448 # kill only the qemus related to this test
449 def qemu_list_mine(self):
450 'list qemu instances for our nodes'
451 for (box,nodes) in self.get_BoxNodes().iteritems():
452 # the fine-grain version
457 # kill only the qemus related to this test
458 def qemu_clean_mine(self):
459 'cleanup (rm -rf) qemu instances for our nodes'
460 for (box,nodes) in self.get_BoxNodes().iteritems():
461 # the fine-grain version
466 # kill only the right qemus
467 def qemu_kill_mine(self):
468 'kill the qemu instances for our nodes'
469 for (box,nodes) in self.get_BoxNodes().iteritems():
470 # the fine-grain version
475 #################### display config
477 "show test configuration after localization"
482 # uggly hack to make sure 'run export' only reports about the 1st plc
483 # to avoid confusion - also we use 'inri_slice1' in various aliases..
486 "print cut'n paste-able stuff to export env variables to your shell"
487 # guess local domain from hostname
488 if TestPlc.exported_id>1:
489 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
491 TestPlc.exported_id+=1
492 domain=socket.gethostname().split('.',1)[1]
493 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
494 print "export BUILD=%s"%self.options.buildname
495 print "export PLCHOSTLXC=%s"%fqdn
496 print "export GUESTNAME=%s"%self.plc_spec['vservername']
497 vplcname=self.plc_spec['vservername'].split('-')[-1]
498 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
499 # find hostname of first node
500 (hostname,qemubox) = self.all_node_infos()[0]
501 print "export KVMHOST=%s.%s"%(qemubox,domain)
502 print "export NODE=%s"%(hostname)
506 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
507 def show_pass (self,passno):
508 for (key,val) in self.plc_spec.iteritems():
509 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
513 self.display_site_spec(site)
514 for node in site['nodes']:
515 self.display_node_spec(node)
516 elif key=='initscripts':
517 for initscript in val:
518 self.display_initscript_spec (initscript)
521 self.display_slice_spec (slice)
524 self.display_key_spec (key)
526 if key not in ['sites','initscripts','slices','keys']:
527 print '+ ',key,':',val
529 def display_site_spec (self,site):
530 print '+ ======== site',site['site_fields']['name']
531 for (k,v) in site.iteritems():
532 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
535 print '+ ','nodes : ',
537 print node['node_fields']['hostname'],'',
543 print user['name'],'',
545 elif k == 'site_fields':
546 print '+ login_base',':',v['login_base']
547 elif k == 'address_fields':
553 def display_initscript_spec (self,initscript):
554 print '+ ======== initscript',initscript['initscript_fields']['name']
556 def display_key_spec (self,key):
557 print '+ ======== key',key['key_name']
559 def display_slice_spec (self,slice):
560 print '+ ======== slice',slice['slice_fields']['name']
561 for (k,v) in slice.iteritems():
574 elif k=='slice_fields':
575 print '+ fields',':',
576 print 'max_nodes=',v['max_nodes'],
581 def display_node_spec (self,node):
582 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
583 print "hostname=",node['node_fields']['hostname'],
584 print "ip=",node['interface_fields']['ip']
585 if self.options.verbose:
586 utils.pprint("node details",node,depth=3)
588 # another entry point for just showing the boxes involved
589 def display_mapping (self):
590 TestPlc.display_mapping_plc(self.plc_spec)
594 def display_mapping_plc (plc_spec):
595 print '+ MyPLC',plc_spec['name']
596 # WARNING this would not be right for lxc-based PLC's - should be harmless though
597 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
598 print '+\tIP = %s/%s'%(plc_spec['settings']['PLC_API_HOST'],plc_spec['vserverip'])
599 for site_spec in plc_spec['sites']:
600 for node_spec in site_spec['nodes']:
601 TestPlc.display_mapping_node(node_spec)
604 def display_mapping_node (node_spec):
605 print '+ NODE %s'%(node_spec['name'])
606 print '+\tqemu box %s'%node_spec['host_box']
607 print '+\thostname=%s'%node_spec['node_fields']['hostname']
609 # write a timestamp in /vservers/<>.timestamp
610 # cannot be inside the vserver, that causes vserver .. build to cough
611 def plcvm_timestamp (self):
612 "Create a timestamp to remember creation date for this plc"
614 # TODO-lxc check this one
615 # a first approx. is to store the timestamp close to the VM root like vs does
616 stamp_path=self.vm_timestamp_path ()
617 stamp_dir = os.path.dirname (stamp_path)
618 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
619 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
621 # this is called inconditionnally at the beginning of the test sequence
622 # just in case this is a rerun, so if the vm is not running it's fine
623 def plcvm_delete(self):
624 "vserver delete the test myplc"
625 stamp_path=self.vm_timestamp_path()
626 self.run_in_host("rm -f %s"%stamp_path)
627 self.run_in_host("virsh -c lxc:// destroy %s"%self.vservername)
628 self.run_in_host("virsh -c lxc:// undefine %s"%self.vservername)
629 self.run_in_host("rm -fr /vservers/%s"%self.vservername)
633 # historically the build was being fetched by the tests
634 # now the build pushes itself as a subdir of the tests workdir
635 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
636 def plcvm_create (self):
637 "vserver creation (no install done)"
638 # push the local build/ dir to the testplc box
640 # a full path for the local calls
641 build_dir=os.path.dirname(sys.argv[0])
642 # sometimes this is empty - set to "." in such a case
643 if not build_dir: build_dir="."
644 build_dir += "/build"
646 # use a standard name - will be relative to remote buildname
648 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
649 self.test_ssh.rmdir(build_dir)
650 self.test_ssh.copy(build_dir,recursive=True)
651 # the repo url is taken from arch-rpms-url
652 # with the last step (i386) removed
653 repo_url = self.options.arch_rpms_url
654 for level in [ 'arch' ]:
655 repo_url = os.path.dirname(repo_url)
657 # invoke initvm (drop support for vs)
658 script="lbuild-initvm.sh"
660 # pass the vbuild-nightly options to [lv]test-initvm
661 script_options += " -p %s"%self.options.personality
662 script_options += " -d %s"%self.options.pldistro
663 script_options += " -f %s"%self.options.fcdistro
664 script_options += " -r %s"%repo_url
665 vserver_name = self.vservername
667 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
668 script_options += " -n %s"%vserver_hostname
670 print "Cannot reverse lookup %s"%self.vserverip
671 print "This is considered fatal, as this might pollute the test results"
673 create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s"%locals()
674 return self.run_in_host(create_vserver) == 0
677 def plc_install(self):
678 "yum install myplc, noderepo, and the plain bootstrapfs"
680 # workaround for getting pgsql8.2 on centos5
681 if self.options.fcdistro == "centos5":
682 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
685 if self.options.personality == "linux32":
687 elif self.options.personality == "linux64":
690 raise Exception, "Unsupported personality %r"%self.options.personality
691 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
694 pkgs_list.append ("slicerepo-%s"%nodefamily)
695 pkgs_list.append ("myplc")
696 pkgs_list.append ("noderepo-%s"%nodefamily)
697 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
698 pkgs_string=" ".join(pkgs_list)
699 return self.yum_install (pkgs_list)
702 def mod_python(self):
703 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
704 return self.yum_install ( [ 'mod_python' ] )
707 def plc_configure(self):
709 tmpname='%s.plc-config-tty'%(self.name())
710 fileconf=open(tmpname,'w')
711 for (var,value) in self.plc_spec['settings'].iteritems():
712 fileconf.write ('e %s\n%s\n'%(var,value))
713 fileconf.write('w\n')
714 fileconf.write('q\n')
716 utils.system('cat %s'%tmpname)
717 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
718 utils.system('rm %s'%tmpname)
721 # f14 is a bit odd in this respect, although this worked fine in guests up to f18
722 # however using a vplc guest under f20 requires this trick
723 # the symptom is this: service plc start
724 # Starting plc (via systemctl): Failed to get D-Bus connection: \
725 # Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
726 # weird thing is the doc says f14 uses upstart by default and not systemd
727 # so this sounds kind of harmless
728 def start_service (self,service): return self.start_stop_service (service,'start')
729 def stop_service (self,service): return self.start_stop_service (service,'stop')
731 def start_stop_service (self, service,start_or_stop):
732 "utility to start/stop a service with the special trick for f14"
733 if self.options.fcdistro != 'f14':
734 return self.run_in_guest ("service %s %s"%(service,start_or_stop))==0
736 # patch /sbin/service so it does not reset environment
737 self.run_in_guest ('sed -i -e \\"s,env -i,env,\\" /sbin/service')
738 # this is because our own scripts in turn call service
739 return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service %s %s"%(service,start_or_stop))==0
743 return self.start_service ('plc')
747 return self.stop_service ('plc')
749 def plcvm_start (self):
750 "start the PLC vserver"
754 def plcvm_stop (self):
755 "stop the PLC vserver"
759 # stores the keys from the config for further use
760 def keys_store(self):
761 "stores test users ssh keys in keys/"
762 for key_spec in self.plc_spec['keys']:
763 TestKey(self,key_spec).store_key()
766 def keys_clean(self):
767 "removes keys cached in keys/"
768 utils.system("rm -rf ./keys")
771 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
772 # for later direct access to the nodes
773 def keys_fetch(self):
774 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
776 if not os.path.isdir(dir):
778 vservername=self.vservername
779 vm_root=self.vm_root_in_host()
781 prefix = 'debug_ssh_key'
782 for ext in [ 'pub', 'rsa' ] :
783 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
784 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
785 if self.test_ssh.fetch(src,dst) != 0: overall=False
789 "create sites with PLCAPI"
790 return self.do_sites()
792 def delete_sites (self):
793 "delete sites with PLCAPI"
794 return self.do_sites(action="delete")
796 def do_sites (self,action="add"):
797 for site_spec in self.plc_spec['sites']:
798 test_site = TestSite (self,site_spec)
799 if (action != "add"):
800 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
801 test_site.delete_site()
802 # deleted with the site
803 #test_site.delete_users()
806 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
807 test_site.create_site()
808 test_site.create_users()
811 def delete_all_sites (self):
812 "Delete all sites in PLC, and related objects"
813 print 'auth_root',self.auth_root()
814 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
816 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
817 if site['login_base']==self.plc_spec['settings']['PLC_SLICE_PREFIX']: continue
818 site_id=site['site_id']
819 print 'Deleting site_id',site_id
820 self.apiserver.DeleteSite(self.auth_root(),site_id)
824 "create nodes with PLCAPI"
825 return self.do_nodes()
826 def delete_nodes (self):
827 "delete nodes with PLCAPI"
828 return self.do_nodes(action="delete")
830 def do_nodes (self,action="add"):
831 for site_spec in self.plc_spec['sites']:
832 test_site = TestSite (self,site_spec)
834 utils.header("Deleting nodes in site %s"%test_site.name())
835 for node_spec in site_spec['nodes']:
836 test_node=TestNode(self,test_site,node_spec)
837 utils.header("Deleting %s"%test_node.name())
838 test_node.delete_node()
840 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
841 for node_spec in site_spec['nodes']:
842 utils.pprint('Creating node %s'%node_spec,node_spec)
843 test_node = TestNode (self,test_site,node_spec)
844 test_node.create_node ()
847 def nodegroups (self):
848 "create nodegroups with PLCAPI"
849 return self.do_nodegroups("add")
850 def delete_nodegroups (self):
851 "delete nodegroups with PLCAPI"
852 return self.do_nodegroups("delete")
856 def translate_timestamp (start,grain,timestamp):
857 if timestamp < TestPlc.YEAR: return start+timestamp*grain
858 else: return timestamp
861 def timestamp_printable (timestamp):
862 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
865 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
867 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
868 print 'API answered grain=',grain
869 start=(now/grain)*grain
871 # find out all nodes that are reservable
872 nodes=self.all_reservable_nodenames()
874 utils.header ("No reservable node found - proceeding without leases")
877 # attach them to the leases as specified in plc_specs
878 # this is where the 'leases' field gets interpreted as relative of absolute
879 for lease_spec in self.plc_spec['leases']:
880 # skip the ones that come with a null slice id
881 if not lease_spec['slice']: continue
882 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
883 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
884 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
885 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
886 if lease_addition['errors']:
887 utils.header("Cannot create leases, %s"%lease_addition['errors'])
890 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
891 (nodes,lease_spec['slice'],
892 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
893 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
897 def delete_leases (self):
898 "remove all leases in the myplc side"
899 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
900 utils.header("Cleaning leases %r"%lease_ids)
901 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
904 def list_leases (self):
905 "list all leases known to the myplc"
906 leases = self.apiserver.GetLeases(self.auth_root())
909 current=l['t_until']>=now
910 if self.options.verbose or current:
911 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
912 TestPlc.timestamp_printable(l['t_from']),
913 TestPlc.timestamp_printable(l['t_until'])))
916 # create nodegroups if needed, and populate
917 def do_nodegroups (self, action="add"):
918 # 1st pass to scan contents
920 for site_spec in self.plc_spec['sites']:
921 test_site = TestSite (self,site_spec)
922 for node_spec in site_spec['nodes']:
923 test_node=TestNode (self,test_site,node_spec)
924 if node_spec.has_key('nodegroups'):
925 nodegroupnames=node_spec['nodegroups']
926 if isinstance(nodegroupnames,StringTypes):
927 nodegroupnames = [ nodegroupnames ]
928 for nodegroupname in nodegroupnames:
929 if not groups_dict.has_key(nodegroupname):
930 groups_dict[nodegroupname]=[]
931 groups_dict[nodegroupname].append(test_node.name())
932 auth=self.auth_root()
934 for (nodegroupname,group_nodes) in groups_dict.iteritems():
936 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
937 # first, check if the nodetagtype is here
938 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
940 tag_type_id = tag_types[0]['tag_type_id']
942 tag_type_id = self.apiserver.AddTagType(auth,
943 {'tagname':nodegroupname,
944 'description': 'for nodegroup %s'%nodegroupname,
946 print 'located tag (type)',nodegroupname,'as',tag_type_id
948 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
950 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
951 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
952 # set node tag on all nodes, value='yes'
953 for nodename in group_nodes:
955 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
957 traceback.print_exc()
958 print 'node',nodename,'seems to already have tag',nodegroupname
961 expect_yes = self.apiserver.GetNodeTags(auth,
962 {'hostname':nodename,
963 'tagname':nodegroupname},
964 ['value'])[0]['value']
965 if expect_yes != "yes":
966 print 'Mismatch node tag on node',nodename,'got',expect_yes
969 if not self.options.dry_run:
970 print 'Cannot find tag',nodegroupname,'on node',nodename
974 print 'cleaning nodegroup',nodegroupname
975 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
977 traceback.print_exc()
981 # a list of TestNode objs
982 def all_nodes (self):
984 for site_spec in self.plc_spec['sites']:
985 test_site = TestSite (self,site_spec)
986 for node_spec in site_spec['nodes']:
987 nodes.append(TestNode (self,test_site,node_spec))
990 # return a list of tuples (nodename,qemuname)
991 def all_node_infos (self) :
993 for site_spec in self.plc_spec['sites']:
994 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
995 for node_spec in site_spec['nodes'] ]
998 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
999 def all_reservable_nodenames (self):
1001 for site_spec in self.plc_spec['sites']:
1002 for node_spec in site_spec['nodes']:
1003 node_fields=node_spec['node_fields']
1004 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
1005 res.append(node_fields['hostname'])
1008 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1009 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
1010 if self.options.dry_run:
1014 class CompleterTaskBootState (CompleterTask):
1015 def __init__ (self, test_plc,hostname):
1016 self.test_plc=test_plc
1017 self.hostname=hostname
1018 self.last_boot_state='undef'
1019 def actual_run (self):
1021 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
1023 self.last_boot_state = node['boot_state']
1024 return self.last_boot_state == target_boot_state
1028 return "CompleterTaskBootState with node %s"%self.hostname
1029 def failure_epilogue (self):
1030 print "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
1032 timeout = timedelta(minutes=timeout_minutes)
1033 graceout = timedelta(minutes=silent_minutes)
1034 period = timedelta(seconds=period_seconds)
1035 # the nodes that haven't checked yet - start with a full list and shrink over time
1036 utils.header("checking nodes boot state (expected %s)"%target_boot_state)
1037 tasks = [ CompleterTaskBootState (self,hostname) \
1038 for (hostname,_) in self.all_node_infos() ]
1039 return Completer (tasks).run (timeout, graceout, period)
1041 def nodes_booted(self):
1042 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
1044 def probe_kvm_iptables (self):
1045 (_,kvmbox) = self.all_node_infos()[0]
1046 TestSsh(kvmbox).run("iptables-save")
1050 def check_nodes_ping(self,timeout_seconds=120,period_seconds=10):
1051 class CompleterTaskPingNode (CompleterTask):
1052 def __init__ (self, hostname):
1053 self.hostname=hostname
1054 def run(self,silent):
1055 command="ping -c 1 -w 1 %s >& /dev/null"%self.hostname
1056 return utils.system (command, silent=silent)==0
1057 def failure_epilogue (self):
1058 print "Cannot ping node with name %s"%self.hostname
1059 timeout=timedelta (seconds=timeout_seconds)
1061 period=timedelta (seconds=period_seconds)
1062 node_infos = self.all_node_infos()
1063 tasks = [ CompleterTaskPingNode (h) for (h,_) in node_infos ]
1064 return Completer (tasks).run (timeout, graceout, period)
1066 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1067 def ping_node (self):
1069 return self.check_nodes_ping ()
1071 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
1073 timeout = timedelta(minutes=timeout_minutes)
1074 graceout = timedelta(minutes=silent_minutes)
1075 period = timedelta(seconds=period_seconds)
1076 vservername=self.vservername
1079 local_key = "keys/%(vservername)s-debug.rsa"%locals()
1082 local_key = "keys/key_admin.rsa"
1083 utils.header("checking ssh access to nodes (expected in %s mode)"%message)
1084 node_infos = self.all_node_infos()
1085 tasks = [ CompleterTaskNodeSsh (nodename, qemuname, local_key, boot_state=message) \
1086 for (nodename,qemuname) in node_infos ]
1087 return Completer (tasks).run (timeout, graceout, period)
1089 def ssh_node_debug(self):
1090 "Tries to ssh into nodes in debug mode with the debug ssh key"
1091 return self.check_nodes_ssh(debug=True,
1092 timeout_minutes=self.ssh_node_debug_timeout,
1093 silent_minutes=self.ssh_node_debug_silent)
1095 def ssh_node_boot(self):
1096 "Tries to ssh into nodes in production mode with the root ssh key"
1097 return self.check_nodes_ssh(debug=False,
1098 timeout_minutes=self.ssh_node_boot_timeout,
1099 silent_minutes=self.ssh_node_boot_silent)
1101 def node_bmlogs(self):
1102 "Checks that there's a non-empty dir. /var/log/bm/raw"
1103 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
1106 def qemu_local_init (self): pass
1108 def bootcd (self): pass
1110 def qemu_local_config (self): pass
1112 def nodestate_reinstall (self): pass
1114 def nodestate_safeboot (self): pass
1116 def nodestate_boot (self): pass
1118 def nodestate_show (self): pass
1120 def qemu_export (self): pass
1122 ### check hooks : invoke scripts from hooks/{node,slice}
1123 def check_hooks_node (self):
1124 return self.locate_first_node().check_hooks()
1125 def check_hooks_sliver (self) :
1126 return self.locate_first_sliver().check_hooks()
1128 def check_hooks (self):
1129 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1130 return self.check_hooks_node() and self.check_hooks_sliver()
1133 def do_check_initscripts(self):
1134 class CompleterTaskInitscript (CompleterTask):
1135 def __init__ (self, test_sliver, stamp):
1136 self.test_sliver=test_sliver
1138 def actual_run (self):
1139 return self.test_sliver.check_initscript_stamp (self.stamp)
1141 return "initscript checker for %s"%self.test_sliver.name()
1142 def failure_epilogue (self):
1143 print "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
1146 for slice_spec in self.plc_spec['slices']:
1147 if not slice_spec.has_key('initscriptstamp'):
1149 stamp=slice_spec['initscriptstamp']
1150 slicename=slice_spec['slice_fields']['name']
1151 for nodename in slice_spec['nodenames']:
1152 print 'nodename',nodename,'slicename',slicename,'stamp',stamp
1153 (site,node) = self.locate_node (nodename)
1154 # xxx - passing the wrong site - probably harmless
1155 test_site = TestSite (self,site)
1156 test_slice = TestSlice (self,test_site,slice_spec)
1157 test_node = TestNode (self,test_site,node)
1158 test_sliver = TestSliver (self, test_node, test_slice)
1159 tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
1160 return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1162 def check_initscripts(self):
1163 "check that the initscripts have triggered"
1164 return self.do_check_initscripts()
1166 def initscripts (self):
1167 "create initscripts with PLCAPI"
1168 for initscript in self.plc_spec['initscripts']:
1169 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1170 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1173 def delete_initscripts (self):
1174 "delete initscripts with PLCAPI"
1175 for initscript in self.plc_spec['initscripts']:
1176 initscript_name = initscript['initscript_fields']['name']
1177 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1179 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1180 print initscript_name,'deleted'
1182 print 'deletion went wrong - probably did not exist'
1187 "create slices with PLCAPI"
1188 return self.do_slices(action="add")
1190 def delete_slices (self):
1191 "delete slices with PLCAPI"
1192 return self.do_slices(action="delete")
1194 def fill_slices (self):
1195 "add nodes in slices with PLCAPI"
1196 return self.do_slices(action="fill")
1198 def empty_slices (self):
1199 "remove nodes from slices with PLCAPI"
1200 return self.do_slices(action="empty")
1202 def do_slices (self, action="add"):
1203 for slice in self.plc_spec['slices']:
1204 site_spec = self.locate_site (slice['sitename'])
1205 test_site = TestSite(self,site_spec)
1206 test_slice=TestSlice(self,test_site,slice)
1207 if action == "delete":
1208 test_slice.delete_slice()
1209 elif action=="fill":
1210 test_slice.add_nodes()
1211 elif action=="empty":
1212 test_slice.delete_nodes()
1214 test_slice.create_slice()
1217 @slice_mapper__tasks(20,10,15)
1218 def ssh_slice(self): pass
1219 @slice_mapper__tasks(20,19,15)
1220 def ssh_slice_off (self): pass
1221 @slice_mapper__tasks(1,1,15)
1222 def slice_fs_present(self): pass
1223 @slice_mapper__tasks(1,1,15)
1224 def slice_fs_deleted(self): pass
1226 # use another name so we can exclude/ignore it from the tests on the nightly command line
1227 def ssh_slice_again(self): return self.ssh_slice()
1228 # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1229 # but for some reason the ignore-wrapping thing would not
1232 def ssh_slice_basics(self): pass
1234 def check_vsys_defaults(self): pass
1237 def keys_clear_known_hosts (self): pass
1239 def plcapi_urls (self):
1240 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1242 def speed_up_slices (self):
1243 "tweak nodemanager cycle (wait time) to 30+/-10 s"
1244 return self._speed_up_slices (30,10)
1245 def super_speed_up_slices (self):
1246 "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
1247 return self._speed_up_slices (5,1)
1249 def _speed_up_slices (self, p, r):
1250 # create the template on the server-side
1251 template="%s.nodemanager"%self.name()
1252 template_file = open (template,"w")
1253 template_file.write('OPTIONS="-p %s -r %s -d"\n'%(p,r))
1254 template_file.close()
1255 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1256 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1257 self.test_ssh.copy_abs(template,remote)
1259 if not self.apiserver.GetConfFiles (self.auth_root(),
1260 {'dest':'/etc/sysconfig/nodemanager'}):
1261 self.apiserver.AddConfFile (self.auth_root(),
1262 {'dest':'/etc/sysconfig/nodemanager',
1263 'source':'PlanetLabConf/nodemanager',
1264 'postinstall_cmd':'service nm restart',})
1267 def debug_nodemanager (self):
1268 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1269 template="%s.nodemanager"%self.name()
1270 template_file = open (template,"w")
1271 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1272 template_file.close()
1273 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1274 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1275 self.test_ssh.copy_abs(template,remote)
1279 def qemu_start (self) : pass
1282 def qemu_timestamp (self) : pass
1284 # when a spec refers to a node possibly on another plc
1285 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1286 for plc in [ self ] + other_plcs:
1288 return plc.locate_sliver_obj (nodename, slicename)
1291 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1293 # implement this one as a cross step so that we can take advantage of different nodes
1294 # in multi-plcs mode
1295 def cross_check_tcp (self, other_plcs):
1296 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1297 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1298 utils.header ("check_tcp: no/empty config found")
1300 specs = self.plc_spec['tcp_specs']
1305 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1306 if not s_test_sliver.run_tcp_server(port,timeout=20):
1310 # idem for the client side
1311 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1312 # use nodename from locatesd sliver, unless 'client_connect' is set
1313 if 'client_connect' in spec:
1314 destination = spec['client_connect']
1316 destination=s_test_sliver.test_node.name()
1317 if not c_test_sliver.run_tcp_client(destination,port):
1321 # painfully enough, we need to allow for some time as netflow might show up last
1322 def check_system_slice (self):
1323 "all nodes: check that a system slice is alive"
1324 # netflow currently not working in the lxc distro
1325 # drl not built at all in the wtx distro
1326 # if we find either of them we're happy
1327 return self.check_netflow() or self.check_drl()
1330 def check_netflow (self): return self._check_system_slice ('netflow')
1331 def check_drl (self): return self._check_system_slice ('drl')
1333 # we have the slices up already here, so it should not take too long
1334 def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
1335 class CompleterTaskSystemSlice (CompleterTask):
1336 def __init__ (self, test_node, dry_run):
1337 self.test_node=test_node
1338 self.dry_run=dry_run
1339 def actual_run (self):
1340 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
1342 return "System slice %s @ %s"%(slicename, self.test_node.name())
1343 def failure_epilogue (self):
1344 print "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
1345 timeout = timedelta(minutes=timeout_minutes)
1346 silent = timedelta (0)
1347 period = timedelta (seconds=period_seconds)
1348 tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
1349 for test_node in self.all_nodes() ]
1350 return Completer (tasks) . run (timeout, silent, period)
1352 def plcsh_stress_test (self):
1353 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1354 # install the stress-test in the plc image
1355 location = "/usr/share/plc_api/plcsh_stress_test.py"
1356 remote="%s/%s"%(self.vm_root_in_host(),location)
1357 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1359 command += " -- --check"
1360 if self.options.size == 1:
1361 command += " --tiny"
1362 return ( self.run_in_guest(command) == 0)
1364 # populate runs the same utility without slightly different options
1365 # in particular runs with --preserve (dont cleanup) and without --check
1366 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1368 def sfa_install_all (self):
1369 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1370 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1372 def sfa_install_core(self):
1374 return self.yum_install ("sfa")
1376 def sfa_install_plc(self):
1377 "yum install sfa-plc"
1378 return self.yum_install("sfa-plc")
1380 def sfa_install_sfatables(self):
1381 "yum install sfa-sfatables"
1382 return self.yum_install ("sfa-sfatables")
1384 # for some very odd reason, this sometimes fails with the following symptom
1385 # # yum install sfa-client
1386 # Setting up Install Process
1388 # Downloading Packages:
1389 # Running rpm_check_debug
1390 # Running Transaction Test
1391 # Transaction Test Succeeded
1392 # Running Transaction
1393 # Transaction couldn't start:
1394 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1395 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1396 # even though in the same context I have
1397 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1398 # Filesystem Size Used Avail Use% Mounted on
1399 # /dev/hdv1 806G 264G 501G 35% /
1400 # none 16M 36K 16M 1% /tmp
1402 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1403 def sfa_install_client(self):
1404 "yum install sfa-client"
1405 first_try=self.yum_install("sfa-client")
1406 if first_try: return True
1407 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1408 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1409 utils.header("rpm_path=<<%s>>"%rpm_path)
1411 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1412 return self.yum_check_installed ("sfa-client")
1414 def sfa_dbclean(self):
1415 "thoroughly wipes off the SFA database"
1416 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1417 self.run_in_guest("sfa-nuke.py")==0 or \
1418 self.run_in_guest("sfa-nuke-plc.py")==0
1420 def sfa_fsclean(self):
1421 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1422 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1425 def sfa_plcclean(self):
1426 "cleans the PLC entries that were created as a side effect of running the script"
1428 sfa_spec=self.plc_spec['sfa']
1430 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1431 login_base=auth_sfa_spec['login_base']
1432 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1433 except: print "Site %s already absent from PLC db"%login_base
1435 for spec_name in ['pi_spec','user_spec']:
1436 user_spec=auth_sfa_spec[spec_name]
1437 username=user_spec['email']
1438 try: self.apiserver.DeletePerson(self.auth_root(),username)
1440 # this in fact is expected as sites delete their members
1441 #print "User %s already absent from PLC db"%username
1444 print "REMEMBER TO RUN sfa_import AGAIN"
1447 def sfa_uninstall(self):
1448 "uses rpm to uninstall sfa - ignore result"
1449 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1450 self.run_in_guest("rm -rf /var/lib/sfa")
1451 self.run_in_guest("rm -rf /etc/sfa")
1452 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1454 self.run_in_guest("rpm -e --noscripts sfa-plc")
1457 ### run unit tests for SFA
1458 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1459 # Running Transaction
1460 # Transaction couldn't start:
1461 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1462 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1463 # no matter how many Gbs are available on the testplc
1464 # could not figure out what's wrong, so...
1465 # if the yum install phase fails, consider the test is successful
1466 # other combinations will eventually run it hopefully
1467 def sfa_utest(self):
1468 "yum install sfa-tests and run SFA unittests"
1469 self.run_in_guest("yum -y install sfa-tests")
1470 # failed to install - forget it
1471 if self.run_in_guest("rpm -q sfa-tests")!=0:
1472 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1474 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1478 dirname="conf.%s"%self.plc_spec['name']
1479 if not os.path.isdir(dirname):
1480 utils.system("mkdir -p %s"%dirname)
1481 if not os.path.isdir(dirname):
1482 raise Exception,"Cannot create config dir for plc %s"%self.name()
1485 def conffile(self,filename):
1486 return "%s/%s"%(self.confdir(),filename)
1487 def confsubdir(self,dirname,clean,dry_run=False):
1488 subdirname="%s/%s"%(self.confdir(),dirname)
1490 utils.system("rm -rf %s"%subdirname)
1491 if not os.path.isdir(subdirname):
1492 utils.system("mkdir -p %s"%subdirname)
1493 if not dry_run and not os.path.isdir(subdirname):
1494 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1497 def conffile_clean (self,filename):
1498 filename=self.conffile(filename)
1499 return utils.system("rm -rf %s"%filename)==0
1502 def sfa_configure(self):
1503 "run sfa-config-tty"
1504 tmpname=self.conffile("sfa-config-tty")
1505 fileconf=open(tmpname,'w')
1506 for (var,value) in self.plc_spec['sfa']['settings'].iteritems():
1507 fileconf.write ('e %s\n%s\n'%(var,value))
1508 # # the way plc_config handles booleans just sucks..
1511 # if self.plc_spec['sfa'][var]: val='true'
1512 # fileconf.write ('e %s\n%s\n'%(var,val))
1513 fileconf.write('w\n')
1514 fileconf.write('R\n')
1515 fileconf.write('q\n')
1517 utils.system('cat %s'%tmpname)
1518 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1521 def aggregate_xml_line(self):
1522 port=self.plc_spec['sfa']['neighbours-port']
1523 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1524 (self.vserverip,self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'],port)
1526 def registry_xml_line(self):
1527 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1528 (self.vserverip,self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1531 # a cross step that takes all other plcs in argument
1532 def cross_sfa_configure(self, other_plcs):
1533 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1534 # of course with a single plc, other_plcs is an empty list
1537 agg_fname=self.conffile("agg.xml")
1538 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1539 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1540 utils.header ("(Over)wrote %s"%agg_fname)
1541 reg_fname=self.conffile("reg.xml")
1542 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1543 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1544 utils.header ("(Over)wrote %s"%reg_fname)
1545 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1546 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1548 def sfa_import(self):
1549 "use sfaadmin to import from plc"
1550 auth=self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1551 return self.run_in_guest('sfaadmin reg import_registry')==0
1553 def sfa_start(self):
1555 return self.start_service('sfa')
1558 def sfi_configure(self):
1559 "Create /root/sfi on the plc side for sfi client configuration"
1560 if self.options.dry_run:
1561 utils.header("DRY RUN - skipping step")
1563 sfa_spec=self.plc_spec['sfa']
1564 # cannot use auth_sfa_mapper to pass dir_name
1565 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1566 test_slice=TestAuthSfa(self,slice_spec)
1567 dir_basename=os.path.basename(test_slice.sfi_path())
1568 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1569 test_slice.sfi_configure(dir_name)
1570 # push into the remote /root/sfi area
1571 location = test_slice.sfi_path()
1572 remote="%s/%s"%(self.vm_root_in_host(),location)
1573 self.test_ssh.mkdir(remote,abs=True)
1574 # need to strip last level or remote otherwise we get an extra dir level
1575 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1579 def sfi_clean (self):
1580 "clean up /root/sfi on the plc side"
1581 self.run_in_guest("rm -rf /root/sfi")
1585 def sfa_register_site (self): pass
1587 def sfa_register_pi (self): pass
1589 def sfa_register_user(self): pass
1591 def sfa_update_user(self): pass
1593 def sfa_register_slice(self): pass
1595 def sfa_renew_slice(self): pass
1597 def sfa_get_expires(self): pass
1599 def sfa_discover(self): pass
1601 def sfa_rspec(self): pass
1603 def sfa_allocate(self): pass
1605 def sfa_provision(self): pass
1607 def sfa_check_slice_plc(self): pass
1609 def sfa_update_slice(self): pass
1611 def sfa_remove_user_from_slice(self): pass
1613 def sfa_insert_user_in_slice(self): pass
1615 def sfi_list(self): pass
1617 def sfi_show_site(self): pass
1619 def sfi_show_slice(self): pass
1621 def sfi_show_slice_researchers(self): pass
1623 def ssh_slice_sfa(self): pass
1625 def sfa_delete_user(self): pass
1627 def sfa_delete_slice(self): pass
1631 return self.stop_service ('sfa')
1633 def populate (self):
1634 "creates random entries in the PLCAPI"
1635 # install the stress-test in the plc image
1636 location = "/usr/share/plc_api/plcsh_stress_test.py"
1637 remote="%s/%s"%(self.vm_root_in_host(),location)
1638 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1640 command += " -- --preserve --short-names"
1641 local = (self.run_in_guest(command) == 0);
1642 # second run with --foreign
1643 command += ' --foreign'
1644 remote = (self.run_in_guest(command) == 0);
1645 return ( local and remote)
1647 def gather_logs (self):
1648 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1649 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1650 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1651 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1652 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1653 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1654 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1656 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1657 self.gather_var_logs ()
1659 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1660 self.gather_pgsql_logs ()
1662 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1663 self.gather_root_sfi ()
1665 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1666 for site_spec in self.plc_spec['sites']:
1667 test_site = TestSite (self,site_spec)
1668 for node_spec in site_spec['nodes']:
1669 test_node=TestNode(self,test_site,node_spec)
1670 test_node.gather_qemu_logs()
1672 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1673 self.gather_nodes_var_logs()
1675 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1676 self.gather_slivers_var_logs()
1679 def gather_slivers_var_logs(self):
1680 for test_sliver in self.all_sliver_objs():
1681 remote = test_sliver.tar_var_logs()
1682 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1683 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1684 utils.system(command)
1687 def gather_var_logs (self):
1688 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1689 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1690 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1691 utils.system(command)
1692 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1693 utils.system(command)
1695 def gather_pgsql_logs (self):
1696 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1697 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1698 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1699 utils.system(command)
1701 def gather_root_sfi (self):
1702 utils.system("mkdir -p logs/sfi.%s"%self.name())
1703 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1704 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1705 utils.system(command)
1707 def gather_nodes_var_logs (self):
1708 for site_spec in self.plc_spec['sites']:
1709 test_site = TestSite (self,site_spec)
1710 for node_spec in site_spec['nodes']:
1711 test_node=TestNode(self,test_site,node_spec)
1712 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1713 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1714 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1715 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1716 utils.system(command)
1719 # returns the filename to use for sql dump/restore, using options.dbname if set
1720 def dbfile (self, database):
1721 # uses options.dbname if it is found
1723 name=self.options.dbname
1724 if not isinstance(name,StringTypes):
1730 return "/root/%s-%s.sql"%(database,name)
1732 def plc_db_dump(self):
1733 'dump the planetlab5 DB in /root in the PLC - filename has time'
1734 dump=self.dbfile("planetab5")
1735 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1736 utils.header('Dumped planetlab5 database in %s'%dump)
1739 def plc_db_restore(self):
1740 'restore the planetlab5 DB - looks broken, but run -n might help'
1741 dump=self.dbfile("planetab5")
1742 ##stop httpd service
1743 self.run_in_guest('service httpd stop')
1744 # xxx - need another wrapper
1745 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1746 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1747 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1748 ##starting httpd service
1749 self.run_in_guest('service httpd start')
1751 utils.header('Database restored from ' + dump)
1754 def create_ignore_steps ():
1755 for step in TestPlc.default_steps + TestPlc.other_steps:
1756 # default step can have a plc qualifier
1757 if '@' in step: (step,qualifier)=step.split('@')
1758 # or be defined as forced or ignored by default
1759 for keyword in ['_ignore','_force']:
1760 if step.endswith (keyword): step=step.replace(keyword,'')
1761 if step == SEP or step == SEPSFA : continue
1762 method=getattr(TestPlc,step)
1764 wrapped=ignore_result(method)
1765 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1766 setattr(TestPlc, name, wrapped)
1769 # def ssh_slice_again_ignore (self): pass
1771 # def check_initscripts_ignore (self): pass
1773 def standby_1_through_20(self):
1774 """convenience function to wait for a specified number of minutes"""
1777 def standby_1(): pass
1779 def standby_2(): pass
1781 def standby_3(): pass
1783 def standby_4(): pass
1785 def standby_5(): pass
1787 def standby_6(): pass
1789 def standby_7(): pass
1791 def standby_8(): pass
1793 def standby_9(): pass
1795 def standby_10(): pass
1797 def standby_11(): pass
1799 def standby_12(): pass
1801 def standby_13(): pass
1803 def standby_14(): pass
1805 def standby_15(): pass
1807 def standby_16(): pass
1809 def standby_17(): pass
1811 def standby_18(): pass
1813 def standby_19(): pass
1815 def standby_20(): pass
1817 # convenience for debugging the test logic
1818 def yes (self): return True
1819 def no (self): return False