1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
10 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
24 from Completer import Completer, CompleterTask
26 # step methods must take (self) and return a boolean (options is a member of the class)
28 def standby(minutes,dry_run):
29 utils.header('Entering StandBy for %d mn'%minutes)
33 time.sleep(60*minutes)
36 def standby_generic (func):
38 minutes=int(func.__name__.split("_")[1])
39 return standby(minutes,self.options.dry_run)
42 def node_mapper (method):
43 def actual(self,*args, **kwds):
45 node_method = TestNode.__dict__[method.__name__]
46 for test_node in self.all_nodes():
47 if not node_method(test_node, *args, **kwds): overall=False
49 # restore the doc text
50 actual.__doc__=TestNode.__dict__[method.__name__].__doc__
53 def slice_mapper (method):
56 slice_method = TestSlice.__dict__[method.__name__]
57 for slice_spec in self.plc_spec['slices']:
58 site_spec = self.locate_site (slice_spec['sitename'])
59 test_site = TestSite(self,site_spec)
60 test_slice=TestSlice(self,test_site,slice_spec)
61 if not slice_method(test_slice,self.options): overall=False
63 # restore the doc text
64 actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
67 # run a step but return True so that we can go on
68 def ignore_result (method):
70 # ssh_slice_ignore->ssh_slice
71 ref_name=method.__name__.replace('_ignore','').replace('ignore_','')
72 ref_method=TestPlc.__dict__[ref_name]
73 result=ref_method(self)
74 print "Actual - but ignored - result for %(ref_name)s is %(result)s"%locals()
75 return Ignored (result)
76 wrappee.__doc__="ignored version of " + method.__name__.replace('_ignore','').replace('ignore_','')
79 # a variant that expects the TestSlice method to return a list of CompleterTasks that
80 # are then merged into a single Completer run to avoid wating for all the slices
81 # esp. useful when a test fails of course
82 # because we need to pass arguments we use a class instead..
83 class slice_mapper__tasks (object):
84 # could not get this to work with named arguments
85 def __init__ (self,timeout_minutes,silent_minutes,period_seconds):
86 print "timeout_minutes,silent_minutes,period_seconds",timeout_minutes,silent_minutes,period_seconds
87 self.timeout=timedelta(minutes=timeout_minutes)
88 self.silent=timedelta(minutes=silent_minutes)
89 self.period=timedelta(seconds=period_seconds)
90 def __call__ (self, method):
92 # compute augmented method name
93 method_name = method.__name__ + "__tasks"
95 slice_method = TestSlice.__dict__[ method_name ]
98 for slice_spec in self.plc_spec['slices']:
99 site_spec = self.locate_site (slice_spec['sitename'])
100 test_site = TestSite(self,site_spec)
101 test_slice=TestSlice(self,test_site,slice_spec)
102 tasks += slice_method (test_slice, self.options)
103 return Completer (tasks).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
104 # restore the doc text from the TestSlice method even if a bit odd
105 wrappee.__doc__ = slice_method.__doc__
108 def auth_sfa_mapper (method):
111 auth_method = TestAuthSfa.__dict__[method.__name__]
112 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
113 test_auth=TestAuthSfa(self,auth_spec)
114 if not auth_method(test_auth,self.options): overall=False
116 # restore the doc text
117 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
121 def __init__ (self,result):
131 'vs_delete','timestamp_vs','vs_create', SEP,
132 # 'plc_install', 'mod_python', 'plc_configure', 'plc_start', SEP,
133 'plc_install', 'plc_configure', 'plc_start', SEP,
134 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
135 'plcapi_urls','speed_up_slices', SEP,
136 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
137 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
138 # keep this our of the way for now
139 # 'check_vsys_defaults', SEP,
140 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
141 'qemu_kill_mine','qemu_clean_mine', 'qemu_export', 'qemu_start', 'timestamp_qemu', SEP,
142 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
143 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
144 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_renew_slice@1', SEPSFA,
145 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
146 'sfi_list@1', 'sfi_show@1', 'sfa_utest@1', SEPSFA,
147 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
148 # but as the stress test might take a while, we sometimes missed the debug mode..
149 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
150 'ssh_node_boot@1', 'node_bmlogs@1', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts_ignore', SEP,
151 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
152 'cross_check_tcp@1', 'check_system_slice', SEP,
153 # check slices are turned off properly
154 'empty_slices', 'ssh_slice_off', SEP,
155 # check they are properly re-created with the same name
156 'fill_slices', 'ssh_slice_again_ignore', SEP,
157 'gather_logs_force', SEP,
160 'export', 'show_boxes', SEP,
161 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
162 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
163 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
164 'delete_leases', 'list_leases', SEP,
166 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
167 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
168 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
169 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
170 'plc_db_dump' , 'plc_db_restore', SEP,
171 'check_netflow','check_drl', SEP,
172 'debug_nodemanager', SEP,
173 'standby_1_through_20',SEP,
177 def printable_steps (list):
178 single_line=" ".join(list)+" "
179 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
181 def valid_step (step):
182 return step != SEP and step != SEPSFA
184 # turn off the sfa-related steps when build has skipped SFA
185 # this was originally for centos5 but is still valid
186 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
188 def check_whether_build_has_sfa (rpms_url):
189 utils.header ("Checking if build provides SFA package...")
190 # warning, we're now building 'sface' so let's be a bit more picky
191 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
192 # full builds are expected to return with 0 here
194 utils.header("build does provide SFA")
196 # move all steps containing 'sfa' from default_steps to other_steps
197 utils.header("SFA package not found - removing steps with sfa or sfi")
198 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
199 TestPlc.other_steps += sfa_steps
200 for step in sfa_steps: TestPlc.default_steps.remove(step)
202 def __init__ (self,plc_spec,options):
203 self.plc_spec=plc_spec
205 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
206 self.vserverip=plc_spec['vserverip']
207 self.vservername=plc_spec['vservername']
208 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
209 self.apiserver=TestApiserver(self.url,options.dry_run)
210 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
211 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
213 def has_addresses_api (self):
214 return self.apiserver.has_method('AddIpAddress')
217 name=self.plc_spec['name']
218 return "%s.%s"%(name,self.vservername)
221 return self.plc_spec['host_box']
224 return self.test_ssh.is_local()
226 # define the API methods on this object through xmlrpc
227 # would help, but not strictly necessary
231 def actual_command_in_guest (self,command):
232 return self.test_ssh.actual_command(self.host_to_guest(command),dry_run=self.options.dry_run)
234 def start_guest (self):
235 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),dry_run=self.options.dry_run))
237 def stop_guest (self):
238 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),dry_run=self.options.dry_run))
240 def run_in_guest (self,command):
241 return utils.system(self.actual_command_in_guest(command))
243 def run_in_host (self,command):
244 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
246 #command gets run in the plc's vm
247 def host_to_guest(self,command):
248 if self.options.plcs_use_lxc:
249 return "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s %s"%(self.vserverip,command)
251 return "vserver %s exec %s"%(self.vservername,command)
253 def vm_root_in_host(self):
254 if self.options.plcs_use_lxc:
255 return "/vservers/%s/rootfs/"%(self.vservername)
257 return "/vservers/%s"%(self.vservername)
259 def vm_timestamp_path (self):
260 if self.options.plcs_use_lxc:
261 return "/vservers/%s/%s.timestamp"%(self.vservername,self.vservername)
263 return "/vservers/%s.timestamp"%(self.vservername)
265 #start/stop the vserver
266 def start_guest_in_host(self):
267 if self.options.plcs_use_lxc:
268 return "virsh -c lxc:// start %s"%(self.vservername)
270 return "vserver %s start"%(self.vservername)
272 def stop_guest_in_host(self):
273 if self.options.plcs_use_lxc:
274 return "virsh -c lxc:// destroy %s"%(self.vservername)
276 return "vserver %s stop"%(self.vservername)
279 def run_in_guest_piped (self,local,remote):
280 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
282 def yum_check_installed (self, rpms):
283 if isinstance (rpms, list):
285 return self.run_in_guest("rpm -q %s"%rpms)==0
287 # does a yum install in the vs, ignore yum retcod, check with rpm
288 def yum_install (self, rpms):
289 if isinstance (rpms, list):
291 self.run_in_guest("yum -y install %s"%rpms)
292 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
293 self.run_in_guest("yum-complete-transaction -y")
294 return self.yum_check_installed (rpms)
296 def auth_root (self):
297 return {'Username':self.plc_spec['PLC_ROOT_USER'],
298 'AuthMethod':'password',
299 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
300 'Role' : self.plc_spec['role']
302 def locate_site (self,sitename):
303 for site in self.plc_spec['sites']:
304 if site['site_fields']['name'] == sitename:
306 if site['site_fields']['login_base'] == sitename:
308 raise Exception,"Cannot locate site %s"%sitename
310 def locate_node (self,nodename):
311 for site in self.plc_spec['sites']:
312 for node in site['nodes']:
313 if node['name'] == nodename:
315 raise Exception,"Cannot locate node %s"%nodename
317 def locate_hostname (self,hostname):
318 for site in self.plc_spec['sites']:
319 for node in site['nodes']:
320 if node['node_fields']['hostname'] == hostname:
322 raise Exception,"Cannot locate hostname %s"%hostname
324 def locate_key (self,key_name):
325 for key in self.plc_spec['keys']:
326 if key['key_name'] == key_name:
328 raise Exception,"Cannot locate key %s"%key_name
330 def locate_private_key_from_key_names (self, key_names):
331 # locate the first avail. key
333 for key_name in key_names:
334 key_spec=self.locate_key(key_name)
335 test_key=TestKey(self,key_spec)
336 publickey=test_key.publicpath()
337 privatekey=test_key.privatepath()
338 if os.path.isfile(publickey) and os.path.isfile(privatekey):
340 if found: return privatekey
343 def locate_slice (self, slicename):
344 for slice in self.plc_spec['slices']:
345 if slice['slice_fields']['name'] == slicename:
347 raise Exception,"Cannot locate slice %s"%slicename
349 def all_sliver_objs (self):
351 for slice_spec in self.plc_spec['slices']:
352 slicename = slice_spec['slice_fields']['name']
353 for nodename in slice_spec['nodenames']:
354 result.append(self.locate_sliver_obj (nodename,slicename))
357 def locate_sliver_obj (self,nodename,slicename):
358 (site,node) = self.locate_node(nodename)
359 slice = self.locate_slice (slicename)
361 test_site = TestSite (self, site)
362 test_node = TestNode (self, test_site,node)
363 # xxx the slice site is assumed to be the node site - mhh - probably harmless
364 test_slice = TestSlice (self, test_site, slice)
365 return TestSliver (self, test_node, test_slice)
367 def locate_first_node(self):
368 nodename=self.plc_spec['slices'][0]['nodenames'][0]
369 (site,node) = self.locate_node(nodename)
370 test_site = TestSite (self, site)
371 test_node = TestNode (self, test_site,node)
374 def locate_first_sliver (self):
375 slice_spec=self.plc_spec['slices'][0]
376 slicename=slice_spec['slice_fields']['name']
377 nodename=slice_spec['nodenames'][0]
378 return self.locate_sliver_obj(nodename,slicename)
380 # all different hostboxes used in this plc
381 def get_BoxNodes(self):
382 # maps on sites and nodes, return [ (host_box,test_node) ]
384 for site_spec in self.plc_spec['sites']:
385 test_site = TestSite (self,site_spec)
386 for node_spec in site_spec['nodes']:
387 test_node = TestNode (self, test_site, node_spec)
388 if not test_node.is_real():
389 tuples.append( (test_node.host_box(),test_node) )
390 # transform into a dict { 'host_box' -> [ test_node .. ] }
392 for (box,node) in tuples:
393 if not result.has_key(box):
396 result[box].append(node)
399 # a step for checking this stuff
400 def show_boxes (self):
401 'print summary of nodes location'
402 for (box,nodes) in self.get_BoxNodes().iteritems():
403 print box,":"," + ".join( [ node.name() for node in nodes ] )
406 # make this a valid step
407 def qemu_kill_all(self):
408 'kill all qemu instances on the qemu boxes involved by this setup'
409 # this is the brute force version, kill all qemus on that host box
410 for (box,nodes) in self.get_BoxNodes().iteritems():
411 # pass the first nodename, as we don't push template-qemu on testboxes
412 nodedir=nodes[0].nodedir()
413 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
416 # make this a valid step
417 def qemu_list_all(self):
418 'list all qemu instances on the qemu boxes involved by this setup'
419 for (box,nodes) in self.get_BoxNodes().iteritems():
420 # this is the brute force version, kill all qemus on that host box
421 TestBoxQemu(box,self.options.buildname).qemu_list_all()
424 # kill only the qemus related to this test
425 def qemu_list_mine(self):
426 'list qemu instances for our nodes'
427 for (box,nodes) in self.get_BoxNodes().iteritems():
428 # the fine-grain version
433 # kill only the qemus related to this test
434 def qemu_clean_mine(self):
435 'cleanup (rm -rf) qemu instances for our nodes'
436 for (box,nodes) in self.get_BoxNodes().iteritems():
437 # the fine-grain version
442 # kill only the right qemus
443 def qemu_kill_mine(self):
444 'kill the qemu instances for our nodes'
445 for (box,nodes) in self.get_BoxNodes().iteritems():
446 # the fine-grain version
451 #################### display config
453 "show test configuration after localization"
458 # uggly hack to make sure 'run export' only reports about the 1st plc
459 # to avoid confusion - also we use 'inri_slice1' in various aliases..
462 "print cut'n paste-able stuff to export env variables to your shell"
463 # guess local domain from hostname
464 if TestPlc.exported_id>1:
465 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
467 TestPlc.exported_id+=1
468 domain=socket.gethostname().split('.',1)[1]
469 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
470 print "export BUILD=%s"%self.options.buildname
471 if self.options.plcs_use_lxc:
472 print "export PLCHOSTLXC=%s"%fqdn
474 print "export PLCHOSTVS=%s"%fqdn
475 print "export GUESTNAME=%s"%self.plc_spec['vservername']
476 vplcname=self.plc_spec['vservername'].split('-')[-1]
477 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
478 # find hostname of first node
479 (hostname,qemubox) = self.all_node_infos()[0]
480 print "export KVMHOST=%s.%s"%(qemubox,domain)
481 print "export NODE=%s"%(hostname)
485 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
486 def show_pass (self,passno):
487 for (key,val) in self.plc_spec.iteritems():
488 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
492 self.display_site_spec(site)
493 for node in site['nodes']:
494 self.display_node_spec(node)
495 elif key=='initscripts':
496 for initscript in val:
497 self.display_initscript_spec (initscript)
500 self.display_slice_spec (slice)
503 self.display_key_spec (key)
505 if key not in ['sites','initscripts','slices','keys', 'sfa']:
506 print '+ ',key,':',val
508 def display_site_spec (self,site):
509 print '+ ======== site',site['site_fields']['name']
510 for (k,v) in site.iteritems():
511 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
514 print '+ ','nodes : ',
516 print node['node_fields']['hostname'],'',
522 print user['name'],'',
524 elif k == 'site_fields':
525 print '+ login_base',':',v['login_base']
526 elif k == 'address_fields':
532 def display_initscript_spec (self,initscript):
533 print '+ ======== initscript',initscript['initscript_fields']['name']
535 def display_key_spec (self,key):
536 print '+ ======== key',key['key_name']
538 def display_slice_spec (self,slice):
539 print '+ ======== slice',slice['slice_fields']['name']
540 for (k,v) in slice.iteritems():
553 elif k=='slice_fields':
554 print '+ fields',':',
555 print 'max_nodes=',v['max_nodes'],
560 def display_node_spec (self,node):
561 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
562 print "hostname=",node['node_fields']['hostname'],
563 print "ip=",node['interface_fields']['ip']
564 if self.options.verbose:
565 utils.pprint("node details",node,depth=3)
567 # another entry point for just showing the boxes involved
568 def display_mapping (self):
569 TestPlc.display_mapping_plc(self.plc_spec)
573 def display_mapping_plc (plc_spec):
574 print '+ MyPLC',plc_spec['name']
575 # WARNING this would not be right for lxc-based PLC's - should be harmless though
576 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
577 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
578 for site_spec in plc_spec['sites']:
579 for node_spec in site_spec['nodes']:
580 TestPlc.display_mapping_node(node_spec)
583 def display_mapping_node (node_spec):
584 print '+ NODE %s'%(node_spec['name'])
585 print '+\tqemu box %s'%node_spec['host_box']
586 print '+\thostname=%s'%node_spec['node_fields']['hostname']
588 # write a timestamp in /vservers/<>.timestamp
589 # cannot be inside the vserver, that causes vserver .. build to cough
590 def timestamp_vs (self):
591 "Create a timestamp to remember creation date for this plc"
593 # TODO-lxc check this one
594 # a first approx. is to store the timestamp close to the VM root like vs does
595 stamp_path=self.vm_timestamp_path ()
596 stamp_dir = os.path.dirname (stamp_path)
597 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
598 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
600 # this is called inconditionnally at the beginning of the test sequence
601 # just in case this is a rerun, so if the vm is not running it's fine
603 "vserver delete the test myplc"
604 stamp_path=self.vm_timestamp_path()
605 self.run_in_host("rm -f %s"%stamp_path)
606 if self.options.plcs_use_lxc:
607 self.run_in_host("virsh -c lxc:// destroy %s"%self.vservername)
608 self.run_in_host("virsh -c lxc:// undefine %s"%self.vservername)
609 self.run_in_host("rm -fr /vservers/%s"%self.vservername)
612 self.run_in_host("vserver --silent %s delete"%self.vservername)
616 # historically the build was being fetched by the tests
617 # now the build pushes itself as a subdir of the tests workdir
618 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
619 def vs_create (self):
620 "vserver creation (no install done)"
621 # push the local build/ dir to the testplc box
623 # a full path for the local calls
624 build_dir=os.path.dirname(sys.argv[0])
625 # sometimes this is empty - set to "." in such a case
626 if not build_dir: build_dir="."
627 build_dir += "/build"
629 # use a standard name - will be relative to remote buildname
631 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
632 self.test_ssh.rmdir(build_dir)
633 self.test_ssh.copy(build_dir,recursive=True)
634 # the repo url is taken from arch-rpms-url
635 # with the last step (i386) removed
636 repo_url = self.options.arch_rpms_url
637 for level in [ 'arch' ]:
638 repo_url = os.path.dirname(repo_url)
640 # invoke initvm (drop support for vs)
641 script="ltest-initvm.sh"
643 # pass the vbuild-nightly options to [lv]test-initvm
644 script_options += " -p %s"%self.options.personality
645 script_options += " -d %s"%self.options.pldistro
646 script_options += " -f %s"%self.options.fcdistro
647 script_options += " -r %s"%repo_url
648 vserver_name = self.vservername
650 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
651 script_options += " -n %s"%vserver_hostname
653 print "Cannot reverse lookup %s"%self.vserverip
654 print "This is considered fatal, as this might pollute the test results"
656 create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s"%locals()
657 return self.run_in_host(create_vserver) == 0
660 def plc_install(self):
661 "yum install myplc, noderepo, and the plain bootstrapfs"
663 # workaround for getting pgsql8.2 on centos5
664 if self.options.fcdistro == "centos5":
665 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
668 if self.options.personality == "linux32":
670 elif self.options.personality == "linux64":
673 raise Exception, "Unsupported personality %r"%self.options.personality
674 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
677 pkgs_list.append ("slicerepo-%s"%nodefamily)
678 pkgs_list.append ("myplc")
679 pkgs_list.append ("noderepo-%s"%nodefamily)
680 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
681 pkgs_string=" ".join(pkgs_list)
682 return self.yum_install (pkgs_list)
685 def mod_python(self):
686 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
687 return self.yum_install ( [ 'mod_python' ] )
690 def plc_configure(self):
692 tmpname='%s.plc-config-tty'%(self.name())
693 fileconf=open(tmpname,'w')
694 for var in [ 'PLC_NAME',
699 'PLC_MAIL_SUPPORT_ADDRESS',
702 # Above line was added for integrating SFA Testing
708 'PLC_RESERVATION_GRANULARITY',
710 'PLC_OMF_XMPP_SERVER',
713 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
714 fileconf.write('w\n')
715 fileconf.write('q\n')
717 utils.system('cat %s'%tmpname)
718 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
719 utils.system('rm %s'%tmpname)
724 self.run_in_guest('service plc start')
729 self.run_in_guest('service plc stop')
733 "start the PLC vserver"
738 "stop the PLC vserver"
742 # stores the keys from the config for further use
743 def keys_store(self):
744 "stores test users ssh keys in keys/"
745 for key_spec in self.plc_spec['keys']:
746 TestKey(self,key_spec).store_key()
749 def keys_clean(self):
750 "removes keys cached in keys/"
751 utils.system("rm -rf ./keys")
754 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
755 # for later direct access to the nodes
756 def keys_fetch(self):
757 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
759 if not os.path.isdir(dir):
761 vservername=self.vservername
762 vm_root=self.vm_root_in_host()
764 prefix = 'debug_ssh_key'
765 for ext in [ 'pub', 'rsa' ] :
766 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
767 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
768 if self.test_ssh.fetch(src,dst) != 0: overall=False
772 "create sites with PLCAPI"
773 return self.do_sites()
775 def delete_sites (self):
776 "delete sites with PLCAPI"
777 return self.do_sites(action="delete")
779 def do_sites (self,action="add"):
780 for site_spec in self.plc_spec['sites']:
781 test_site = TestSite (self,site_spec)
782 if (action != "add"):
783 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
784 test_site.delete_site()
785 # deleted with the site
786 #test_site.delete_users()
789 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
790 test_site.create_site()
791 test_site.create_users()
794 def delete_all_sites (self):
795 "Delete all sites in PLC, and related objects"
796 print 'auth_root',self.auth_root()
797 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
799 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
800 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
801 site_id=site['site_id']
802 print 'Deleting site_id',site_id
803 self.apiserver.DeleteSite(self.auth_root(),site_id)
807 "create nodes with PLCAPI"
808 return self.do_nodes()
809 def delete_nodes (self):
810 "delete nodes with PLCAPI"
811 return self.do_nodes(action="delete")
813 def do_nodes (self,action="add"):
814 for site_spec in self.plc_spec['sites']:
815 test_site = TestSite (self,site_spec)
817 utils.header("Deleting nodes in site %s"%test_site.name())
818 for node_spec in site_spec['nodes']:
819 test_node=TestNode(self,test_site,node_spec)
820 utils.header("Deleting %s"%test_node.name())
821 test_node.delete_node()
823 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
824 for node_spec in site_spec['nodes']:
825 utils.pprint('Creating node %s'%node_spec,node_spec)
826 test_node = TestNode (self,test_site,node_spec)
827 test_node.create_node ()
830 def nodegroups (self):
831 "create nodegroups with PLCAPI"
832 return self.do_nodegroups("add")
833 def delete_nodegroups (self):
834 "delete nodegroups with PLCAPI"
835 return self.do_nodegroups("delete")
839 def translate_timestamp (start,grain,timestamp):
840 if timestamp < TestPlc.YEAR: return start+timestamp*grain
841 else: return timestamp
844 def timestamp_printable (timestamp):
845 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
848 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
850 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
851 print 'API answered grain=',grain
852 start=(now/grain)*grain
854 # find out all nodes that are reservable
855 nodes=self.all_reservable_nodenames()
857 utils.header ("No reservable node found - proceeding without leases")
860 # attach them to the leases as specified in plc_specs
861 # this is where the 'leases' field gets interpreted as relative of absolute
862 for lease_spec in self.plc_spec['leases']:
863 # skip the ones that come with a null slice id
864 if not lease_spec['slice']: continue
865 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
866 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
867 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
868 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
869 if lease_addition['errors']:
870 utils.header("Cannot create leases, %s"%lease_addition['errors'])
873 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
874 (nodes,lease_spec['slice'],
875 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
876 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
880 def delete_leases (self):
881 "remove all leases in the myplc side"
882 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
883 utils.header("Cleaning leases %r"%lease_ids)
884 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
887 def list_leases (self):
888 "list all leases known to the myplc"
889 leases = self.apiserver.GetLeases(self.auth_root())
892 current=l['t_until']>=now
893 if self.options.verbose or current:
894 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
895 TestPlc.timestamp_printable(l['t_from']),
896 TestPlc.timestamp_printable(l['t_until'])))
899 # create nodegroups if needed, and populate
900 def do_nodegroups (self, action="add"):
901 # 1st pass to scan contents
903 for site_spec in self.plc_spec['sites']:
904 test_site = TestSite (self,site_spec)
905 for node_spec in site_spec['nodes']:
906 test_node=TestNode (self,test_site,node_spec)
907 if node_spec.has_key('nodegroups'):
908 nodegroupnames=node_spec['nodegroups']
909 if isinstance(nodegroupnames,StringTypes):
910 nodegroupnames = [ nodegroupnames ]
911 for nodegroupname in nodegroupnames:
912 if not groups_dict.has_key(nodegroupname):
913 groups_dict[nodegroupname]=[]
914 groups_dict[nodegroupname].append(test_node.name())
915 auth=self.auth_root()
917 for (nodegroupname,group_nodes) in groups_dict.iteritems():
919 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
920 # first, check if the nodetagtype is here
921 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
923 tag_type_id = tag_types[0]['tag_type_id']
925 tag_type_id = self.apiserver.AddTagType(auth,
926 {'tagname':nodegroupname,
927 'description': 'for nodegroup %s'%nodegroupname,
929 print 'located tag (type)',nodegroupname,'as',tag_type_id
931 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
933 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
934 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
935 # set node tag on all nodes, value='yes'
936 for nodename in group_nodes:
938 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
940 traceback.print_exc()
941 print 'node',nodename,'seems to already have tag',nodegroupname
944 expect_yes = self.apiserver.GetNodeTags(auth,
945 {'hostname':nodename,
946 'tagname':nodegroupname},
947 ['value'])[0]['value']
948 if expect_yes != "yes":
949 print 'Mismatch node tag on node',nodename,'got',expect_yes
952 if not self.options.dry_run:
953 print 'Cannot find tag',nodegroupname,'on node',nodename
957 print 'cleaning nodegroup',nodegroupname
958 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
960 traceback.print_exc()
964 # a list of TestNode objs
965 def all_nodes (self):
967 for site_spec in self.plc_spec['sites']:
968 test_site = TestSite (self,site_spec)
969 for node_spec in site_spec['nodes']:
970 nodes.append(TestNode (self,test_site,node_spec))
973 # return a list of tuples (nodename,qemuname)
974 def all_node_infos (self) :
976 for site_spec in self.plc_spec['sites']:
977 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
978 for node_spec in site_spec['nodes'] ]
981 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
982 def all_reservable_nodenames (self):
984 for site_spec in self.plc_spec['sites']:
985 for node_spec in site_spec['nodes']:
986 node_fields=node_spec['node_fields']
987 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
988 res.append(node_fields['hostname'])
991 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
992 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
993 if self.options.dry_run:
997 class CompleterTaskBootState (CompleterTask):
998 def __init__ (self, test_plc,hostname):
999 self.test_plc=test_plc
1000 self.hostname=hostname
1001 self.last_boot_state='undef'
1002 def actual_run (self):
1004 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
1006 self.last_boot_state = node['boot_state']
1007 return self.last_boot_state == target_boot_state
1011 return "CompleterTaskBootState with node %s"%self.hostname
1012 def failure_message (self):
1013 return "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
1015 timeout = timedelta(minutes=timeout_minutes)
1016 graceout = timedelta(minutes=silent_minutes)
1017 period = timedelta(seconds=period_seconds)
1018 # the nodes that haven't checked yet - start with a full list and shrink over time
1019 utils.header("checking nodes boot state (expected %s)"%target_boot_state)
1020 tasks = [ CompleterTaskBootState (self,hostname) \
1021 for (hostname,_) in self.all_node_infos() ]
1022 return Completer (tasks).run (timeout, graceout, period)
1024 def nodes_booted(self):
1025 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
1027 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
1028 class CompleterTaskNodeSsh (CompleterTask):
1029 def __init__ (self, hostname, qemuname, boot_state, local_key):
1030 self.hostname=hostname
1031 self.qemuname=qemuname
1032 self.boot_state=boot_state
1033 self.local_key=local_key
1034 def run (self, silent):
1035 command = TestSsh (self.hostname,key=self.local_key).actual_command("hostname;uname -a")
1036 return utils.system (command, silent=silent)==0
1037 def failure_message (self):
1038 return "Cannot reach %s @ %s in %s mode"%(self.hostname, self.qemuname, self.boot_state)
1041 timeout = timedelta(minutes=timeout_minutes)
1042 graceout = timedelta(minutes=silent_minutes)
1043 period = timedelta(seconds=period_seconds)
1044 vservername=self.vservername
1047 local_key = "keys/%(vservername)s-debug.rsa"%locals()
1050 local_key = "keys/key_admin.rsa"
1051 utils.header("checking ssh access to nodes (expected in %s mode)"%message)
1052 node_infos = self.all_node_infos()
1053 tasks = [ CompleterTaskNodeSsh (nodename, qemuname, message, local_key) \
1054 for (nodename,qemuname) in node_infos ]
1055 return Completer (tasks).run (timeout, graceout, period)
1057 def ssh_node_debug(self):
1058 "Tries to ssh into nodes in debug mode with the debug ssh key"
1059 return self.check_nodes_ssh(debug=True,
1060 timeout_minutes=self.ssh_node_debug_timeout,
1061 silent_minutes=self.ssh_node_debug_silent)
1063 def ssh_node_boot(self):
1064 "Tries to ssh into nodes in production mode with the root ssh key"
1065 return self.check_nodes_ssh(debug=False,
1066 timeout_minutes=self.ssh_node_boot_timeout,
1067 silent_minutes=self.ssh_node_boot_silent)
1069 def node_bmlogs(self):
1070 "Checks that there's a non-empty dir. /var/log/bm/raw"
1071 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
1074 def qemu_local_init (self): pass
1076 def bootcd (self): pass
1078 def qemu_local_config (self): pass
1080 def nodestate_reinstall (self): pass
1082 def nodestate_safeboot (self): pass
1084 def nodestate_boot (self): pass
1086 def nodestate_show (self): pass
1088 def qemu_export (self): pass
1090 ### check hooks : invoke scripts from hooks/{node,slice}
1091 def check_hooks_node (self):
1092 return self.locate_first_node().check_hooks()
1093 def check_hooks_sliver (self) :
1094 return self.locate_first_sliver().check_hooks()
1096 def check_hooks (self):
1097 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1098 return self.check_hooks_node() and self.check_hooks_sliver()
1101 def do_check_initscripts(self):
1102 class CompleterTaskInitscript (CompleterTask):
1103 def __init__ (self, test_sliver, stamp):
1104 self.test_sliver=test_sliver
1106 def actual_run (self):
1107 return self.test_sliver.check_initscript_stamp (self.stamp)
1109 return "initscript checker for %s"%self.test_sliver.name()
1110 def failure_message (self):
1111 return "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
1114 for slice_spec in self.plc_spec['slices']:
1115 if not slice_spec.has_key('initscriptstamp'):
1117 stamp=slice_spec['initscriptstamp']
1118 slicename=slice_spec['slice_fields']['name']
1119 for nodename in slice_spec['nodenames']:
1120 print 'nodename',nodename,'slicename',slicename,'stamp',stamp
1121 (site,node) = self.locate_node (nodename)
1122 # xxx - passing the wrong site - probably harmless
1123 test_site = TestSite (self,site)
1124 test_slice = TestSlice (self,test_site,slice_spec)
1125 test_node = TestNode (self,test_site,node)
1126 test_sliver = TestSliver (self, test_node, test_slice)
1127 tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
1128 return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1130 def check_initscripts(self):
1131 "check that the initscripts have triggered"
1132 return self.do_check_initscripts()
1134 def initscripts (self):
1135 "create initscripts with PLCAPI"
1136 for initscript in self.plc_spec['initscripts']:
1137 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1138 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1141 def delete_initscripts (self):
1142 "delete initscripts with PLCAPI"
1143 for initscript in self.plc_spec['initscripts']:
1144 initscript_name = initscript['initscript_fields']['name']
1145 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1147 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1148 print initscript_name,'deleted'
1150 print 'deletion went wrong - probably did not exist'
1155 "create slices with PLCAPI"
1156 return self.do_slices(action="add")
1158 def delete_slices (self):
1159 "delete slices with PLCAPI"
1160 return self.do_slices(action="delete")
1162 def fill_slices (self):
1163 "add nodes in slices with PLCAPI"
1164 return self.do_slices(action="fill")
1166 def empty_slices (self):
1167 "remove nodes from slices with PLCAPI"
1168 return self.do_slices(action="empty")
1170 def do_slices (self, action="add"):
1171 for slice in self.plc_spec['slices']:
1172 site_spec = self.locate_site (slice['sitename'])
1173 test_site = TestSite(self,site_spec)
1174 test_slice=TestSlice(self,test_site,slice)
1175 if action == "delete":
1176 test_slice.delete_slice()
1177 elif action=="fill":
1178 test_slice.add_nodes()
1179 elif action=="empty":
1180 test_slice.delete_nodes()
1182 test_slice.create_slice()
1185 @slice_mapper__tasks(20,10,15)
1186 def ssh_slice(self): pass
1187 @slice_mapper__tasks(20,19,15)
1188 def ssh_slice_off (self): pass
1190 # this is semantically just equivalent to ssh_slice
1191 # but we use another name so we can exclude it from the tests on the nightly command line
1192 ssh_slice_again=ssh_slice
1195 def ssh_slice_basics(self): pass
1198 def check_vsys_defaults(self): pass
1201 def keys_clear_known_hosts (self): pass
1203 def plcapi_urls (self):
1204 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1206 def speed_up_slices (self):
1207 "tweak nodemanager settings on all nodes using a conf file"
1208 # create the template on the server-side
1209 template="%s.nodemanager"%self.name()
1210 template_file = open (template,"w")
1211 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1212 template_file.close()
1213 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1214 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1215 self.test_ssh.copy_abs(template,remote)
1217 self.apiserver.AddConfFile (self.auth_root(),
1218 {'dest':'/etc/sysconfig/nodemanager',
1219 'source':'PlanetLabConf/nodemanager',
1220 'postinstall_cmd':'service nm restart',})
1223 def debug_nodemanager (self):
1224 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1225 template="%s.nodemanager"%self.name()
1226 template_file = open (template,"w")
1227 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1228 template_file.close()
1229 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1230 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1231 self.test_ssh.copy_abs(template,remote)
1235 def qemu_start (self) : pass
1238 def timestamp_qemu (self) : pass
1240 # when a spec refers to a node possibly on another plc
1241 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1242 for plc in [ self ] + other_plcs:
1244 return plc.locate_sliver_obj (nodename, slicename)
1247 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1249 # implement this one as a cross step so that we can take advantage of different nodes
1250 # in multi-plcs mode
1251 def cross_check_tcp (self, other_plcs):
1252 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1253 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1254 utils.header ("check_tcp: no/empty config found")
1256 specs = self.plc_spec['tcp_specs']
1261 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1262 if not s_test_sliver.run_tcp_server(port,timeout=20):
1266 # idem for the client side
1267 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1268 # use nodename from locatesd sliver, unless 'client_connect' is set
1269 if 'client_connect' in spec:
1270 destination = spec['client_connect']
1272 destination=s_test_sliver.test_node.name()
1273 if not c_test_sliver.run_tcp_client(destination,port):
1277 # painfully enough, we need to allow for some time as netflow might show up last
1278 def check_system_slice (self):
1279 "all nodes: check that a system slice is alive"
1280 # netflow currently not working in the lxc distro
1281 # drl not built at all in the wtx distro
1282 # if we find either of them we're happy
1283 return self.check_netflow() or self.check_drl()
1286 def check_netflow (self): return self._check_system_slice ('netflow')
1287 def check_drl (self): return self._check_system_slice ('drl')
1289 # we have the slices up already here, so it should not take too long
1290 def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
1291 class CompleterTaskSystemSlice (CompleterTask):
1292 def __init__ (self, test_node, dry_run):
1293 self.test_node=test_node
1294 self.dry_run=dry_run
1295 def actual_run (self):
1296 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
1298 return "System slice %s @ %s"%(slicename, self.test_node.name())
1299 def failure_message (self):
1300 return "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
1301 timeout = timedelta(minutes=timeout_minutes)
1302 silent = timedelta (0)
1303 period = timedelta (seconds=period_seconds)
1304 tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
1305 for test_node in self.all_nodes() ]
1306 return Completer (tasks) . run (timeout, silent, period)
1308 def plcsh_stress_test (self):
1309 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1310 # install the stress-test in the plc image
1311 location = "/usr/share/plc_api/plcsh_stress_test.py"
1312 remote="%s/%s"%(self.vm_root_in_host(),location)
1313 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1315 command += " -- --check"
1316 if self.options.size == 1:
1317 command += " --tiny"
1318 return ( self.run_in_guest(command) == 0)
1320 # populate runs the same utility without slightly different options
1321 # in particular runs with --preserve (dont cleanup) and without --check
1322 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1324 def sfa_install_all (self):
1325 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1326 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1328 def sfa_install_core(self):
1330 return self.yum_install ("sfa")
1332 def sfa_install_plc(self):
1333 "yum install sfa-plc"
1334 return self.yum_install("sfa-plc")
1336 def sfa_install_sfatables(self):
1337 "yum install sfa-sfatables"
1338 return self.yum_install ("sfa-sfatables")
1340 # for some very odd reason, this sometimes fails with the following symptom
1341 # # yum install sfa-client
1342 # Setting up Install Process
1344 # Downloading Packages:
1345 # Running rpm_check_debug
1346 # Running Transaction Test
1347 # Transaction Test Succeeded
1348 # Running Transaction
1349 # Transaction couldn't start:
1350 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1351 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1352 # even though in the same context I have
1353 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1354 # Filesystem Size Used Avail Use% Mounted on
1355 # /dev/hdv1 806G 264G 501G 35% /
1356 # none 16M 36K 16M 1% /tmp
1358 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1359 def sfa_install_client(self):
1360 "yum install sfa-client"
1361 first_try=self.yum_install("sfa-client")
1362 if first_try: return True
1363 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1364 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1365 utils.header("rpm_path=<<%s>>"%rpm_path)
1367 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1368 return self.yum_check_installed ("sfa-client")
1370 def sfa_dbclean(self):
1371 "thoroughly wipes off the SFA database"
1372 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1373 self.run_in_guest("sfa-nuke.py")==0 or \
1374 self.run_in_guest("sfa-nuke-plc.py")==0
1376 def sfa_fsclean(self):
1377 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1378 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1381 def sfa_plcclean(self):
1382 "cleans the PLC entries that were created as a side effect of running the script"
1384 sfa_spec=self.plc_spec['sfa']
1386 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1387 login_base=auth_sfa_spec['login_base']
1388 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1389 except: print "Site %s already absent from PLC db"%login_base
1391 for spec_name in ['pi_spec','user_spec']:
1392 user_spec=auth_sfa_spec[spec_name]
1393 username=user_spec['email']
1394 try: self.apiserver.DeletePerson(self.auth_root(),username)
1396 # this in fact is expected as sites delete their members
1397 #print "User %s already absent from PLC db"%username
1400 print "REMEMBER TO RUN sfa_import AGAIN"
1403 def sfa_uninstall(self):
1404 "uses rpm to uninstall sfa - ignore result"
1405 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1406 self.run_in_guest("rm -rf /var/lib/sfa")
1407 self.run_in_guest("rm -rf /etc/sfa")
1408 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1410 self.run_in_guest("rpm -e --noscripts sfa-plc")
1413 ### run unit tests for SFA
1414 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1415 # Running Transaction
1416 # Transaction couldn't start:
1417 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1418 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1419 # no matter how many Gbs are available on the testplc
1420 # could not figure out what's wrong, so...
1421 # if the yum install phase fails, consider the test is successful
1422 # other combinations will eventually run it hopefully
1423 def sfa_utest(self):
1424 "yum install sfa-tests and run SFA unittests"
1425 self.run_in_guest("yum -y install sfa-tests")
1426 # failed to install - forget it
1427 if self.run_in_guest("rpm -q sfa-tests")!=0:
1428 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1430 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1434 dirname="conf.%s"%self.plc_spec['name']
1435 if not os.path.isdir(dirname):
1436 utils.system("mkdir -p %s"%dirname)
1437 if not os.path.isdir(dirname):
1438 raise Exception,"Cannot create config dir for plc %s"%self.name()
1441 def conffile(self,filename):
1442 return "%s/%s"%(self.confdir(),filename)
1443 def confsubdir(self,dirname,clean,dry_run=False):
1444 subdirname="%s/%s"%(self.confdir(),dirname)
1446 utils.system("rm -rf %s"%subdirname)
1447 if not os.path.isdir(subdirname):
1448 utils.system("mkdir -p %s"%subdirname)
1449 if not dry_run and not os.path.isdir(subdirname):
1450 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1453 def conffile_clean (self,filename):
1454 filename=self.conffile(filename)
1455 return utils.system("rm -rf %s"%filename)==0
1458 def sfa_configure(self):
1459 "run sfa-config-tty"
1460 tmpname=self.conffile("sfa-config-tty")
1461 fileconf=open(tmpname,'w')
1462 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1463 'SFA_INTERFACE_HRN',
1464 'SFA_REGISTRY_LEVEL1_AUTH',
1465 'SFA_REGISTRY_HOST',
1466 'SFA_AGGREGATE_HOST',
1476 'SFA_GENERIC_FLAVOUR',
1477 'SFA_AGGREGATE_ENABLED',
1479 if self.plc_spec['sfa'].has_key(var):
1480 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1481 # the way plc_config handles booleans just sucks..
1484 if self.plc_spec['sfa'][var]: val='true'
1485 fileconf.write ('e %s\n%s\n'%(var,val))
1486 fileconf.write('w\n')
1487 fileconf.write('R\n')
1488 fileconf.write('q\n')
1490 utils.system('cat %s'%tmpname)
1491 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1494 def aggregate_xml_line(self):
1495 port=self.plc_spec['sfa']['neighbours-port']
1496 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1497 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1499 def registry_xml_line(self):
1500 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1501 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1504 # a cross step that takes all other plcs in argument
1505 def cross_sfa_configure(self, other_plcs):
1506 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1507 # of course with a single plc, other_plcs is an empty list
1510 agg_fname=self.conffile("agg.xml")
1511 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1512 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1513 utils.header ("(Over)wrote %s"%agg_fname)
1514 reg_fname=self.conffile("reg.xml")
1515 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1516 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1517 utils.header ("(Over)wrote %s"%reg_fname)
1518 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1519 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1521 def sfa_import(self):
1522 "use sfaadmin to import from plc"
1523 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1525 self.run_in_guest('sfaadmin reg import_registry')==0
1526 # not needed anymore
1527 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1529 def sfa_start(self):
1531 return self.run_in_guest('service sfa start')==0
1533 def sfi_configure(self):
1534 "Create /root/sfi on the plc side for sfi client configuration"
1535 if self.options.dry_run:
1536 utils.header("DRY RUN - skipping step")
1538 sfa_spec=self.plc_spec['sfa']
1539 # cannot use auth_sfa_mapper to pass dir_name
1540 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1541 test_slice=TestAuthSfa(self,slice_spec)
1542 dir_basename=os.path.basename(test_slice.sfi_path())
1543 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1544 test_slice.sfi_configure(dir_name)
1545 # push into the remote /root/sfi area
1546 location = test_slice.sfi_path()
1547 remote="%s/%s"%(self.vm_root_in_host(),location)
1548 self.test_ssh.mkdir(remote,abs=True)
1549 # need to strip last level or remote otherwise we get an extra dir level
1550 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1554 def sfi_clean (self):
1555 "clean up /root/sfi on the plc side"
1556 self.run_in_guest("rm -rf /root/sfi")
1560 def sfa_add_site (self): pass
1562 def sfa_add_pi (self): pass
1564 def sfa_add_user(self): pass
1566 def sfa_update_user(self): pass
1568 def sfa_add_slice(self): pass
1570 def sfa_renew_slice(self): pass
1572 def sfa_discover(self): pass
1574 def sfa_create_slice(self): pass
1576 def sfa_check_slice_plc(self): pass
1578 def sfa_update_slice(self): pass
1580 def sfi_list(self): pass
1582 def sfi_show(self): pass
1584 def ssh_slice_sfa(self): pass
1586 def sfa_delete_user(self): pass
1588 def sfa_delete_slice(self): pass
1592 self.run_in_guest('service sfa stop')==0
1595 def populate (self):
1596 "creates random entries in the PLCAPI"
1597 # install the stress-test in the plc image
1598 location = "/usr/share/plc_api/plcsh_stress_test.py"
1599 remote="%s/%s"%(self.vm_root_in_host(),location)
1600 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1602 command += " -- --preserve --short-names"
1603 local = (self.run_in_guest(command) == 0);
1604 # second run with --foreign
1605 command += ' --foreign'
1606 remote = (self.run_in_guest(command) == 0);
1607 return ( local and remote)
1609 def gather_logs (self):
1610 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1611 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1612 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1613 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1614 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1615 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1616 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1618 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1619 self.gather_var_logs ()
1621 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1622 self.gather_pgsql_logs ()
1624 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1625 self.gather_root_sfi ()
1627 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1628 for site_spec in self.plc_spec['sites']:
1629 test_site = TestSite (self,site_spec)
1630 for node_spec in site_spec['nodes']:
1631 test_node=TestNode(self,test_site,node_spec)
1632 test_node.gather_qemu_logs()
1634 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1635 self.gather_nodes_var_logs()
1637 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1638 self.gather_slivers_var_logs()
1641 def gather_slivers_var_logs(self):
1642 for test_sliver in self.all_sliver_objs():
1643 remote = test_sliver.tar_var_logs()
1644 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1645 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1646 utils.system(command)
1649 def gather_var_logs (self):
1650 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1651 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1652 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1653 utils.system(command)
1654 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1655 utils.system(command)
1657 def gather_pgsql_logs (self):
1658 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1659 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1660 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1661 utils.system(command)
1663 def gather_root_sfi (self):
1664 utils.system("mkdir -p logs/sfi.%s"%self.name())
1665 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1666 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1667 utils.system(command)
1669 def gather_nodes_var_logs (self):
1670 for site_spec in self.plc_spec['sites']:
1671 test_site = TestSite (self,site_spec)
1672 for node_spec in site_spec['nodes']:
1673 test_node=TestNode(self,test_site,node_spec)
1674 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1675 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1676 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1677 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1678 utils.system(command)
1681 # returns the filename to use for sql dump/restore, using options.dbname if set
1682 def dbfile (self, database):
1683 # uses options.dbname if it is found
1685 name=self.options.dbname
1686 if not isinstance(name,StringTypes):
1692 return "/root/%s-%s.sql"%(database,name)
1694 def plc_db_dump(self):
1695 'dump the planetlab5 DB in /root in the PLC - filename has time'
1696 dump=self.dbfile("planetab5")
1697 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1698 utils.header('Dumped planetlab5 database in %s'%dump)
1701 def plc_db_restore(self):
1702 'restore the planetlab5 DB - looks broken, but run -n might help'
1703 dump=self.dbfile("planetab5")
1704 ##stop httpd service
1705 self.run_in_guest('service httpd stop')
1706 # xxx - need another wrapper
1707 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1708 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1709 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1710 ##starting httpd service
1711 self.run_in_guest('service httpd start')
1713 utils.header('Database restored from ' + dump)
1716 def create_ignore_steps ():
1717 for step in TestPlc.default_steps + TestPlc.other_steps:
1718 # default step can have a plc qualifier
1719 if '@' in step: (step,qualifier)=step.split('@')
1720 # or be defined as forced or ignored by default
1721 for keyword in ['_ignore','_force']:
1722 if step.endswith (keyword): step=step.replace(keyword,'')
1723 if step == SEP or step == SEPSFA : continue
1724 method=getattr(TestPlc,step)
1726 wrapped=ignore_result(method)
1727 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1728 setattr(TestPlc, name, wrapped)
1731 # def ssh_slice_again_ignore (self): pass
1733 # def check_initscripts_ignore (self): pass
1735 def standby_1_through_20(self):
1736 """convenience function to wait for a specified number of minutes"""
1739 def standby_1(): pass
1741 def standby_2(): pass
1743 def standby_3(): pass
1745 def standby_4(): pass
1747 def standby_5(): pass
1749 def standby_6(): pass
1751 def standby_7(): pass
1753 def standby_8(): pass
1755 def standby_9(): pass
1757 def standby_10(): pass
1759 def standby_11(): pass
1761 def standby_12(): pass
1763 def standby_13(): pass
1765 def standby_14(): pass
1767 def standby_15(): pass
1769 def standby_16(): pass
1771 def standby_17(): pass
1773 def standby_18(): pass
1775 def standby_19(): pass
1777 def standby_20(): pass