1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
10 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
24 from Completer import Completer, CompleterTask
26 # step methods must take (self) and return a boolean (options is a member of the class)
28 def standby(minutes,dry_run):
29 utils.header('Entering StandBy for %d mn'%minutes)
33 time.sleep(60*minutes)
36 def standby_generic (func):
38 minutes=int(func.__name__.split("_")[1])
39 return standby(minutes,self.options.dry_run)
42 def node_mapper (method):
43 def actual(self,*args, **kwds):
45 node_method = TestNode.__dict__[method.__name__]
46 for test_node in self.all_nodes():
47 if not node_method(test_node, *args, **kwds): overall=False
49 # restore the doc text
50 actual.__doc__=TestNode.__dict__[method.__name__].__doc__
53 def slice_mapper (method):
56 slice_method = TestSlice.__dict__[method.__name__]
57 for slice_spec in self.plc_spec['slices']:
58 site_spec = self.locate_site (slice_spec['sitename'])
59 test_site = TestSite(self,site_spec)
60 test_slice=TestSlice(self,test_site,slice_spec)
61 if not slice_method(test_slice,self.options): overall=False
63 # restore the doc text
64 actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
67 # run a step but return True so that we can go on
68 def ignore_result (method):
70 # ssh_slice_ignore->ssh_slice
71 ref_name=method.__name__.replace('_ignore','').replace('force_','')
72 ref_method=TestPlc.__dict__[ref_name]
73 result=ref_method(self)
74 print "Actual (but ignored) result for %(ref_name)s is %(result)s"%locals()
75 return Ignored (result)
76 wrappee.__doc__="ignored version of " + method.__name__.replace('_ignore','').replace('ignore_','')
79 # a variant that expects the TestSlice method to return a list of CompleterTasks that
80 # are then merged into a single Completer run to avoid wating for all the slices
81 # esp. useful when a test fails of course
82 # because we need to pass arguments we use a class instead..
83 class slice_mapper__tasks (object):
84 # could not get this to work with named arguments
85 def __init__ (self,timeout_minutes,silent_minutes,period_seconds):
86 self.timeout=timedelta(minutes=timeout_minutes)
87 self.silent=timedelta(minutes=silent_minutes)
88 self.period=timedelta(seconds=period_seconds)
89 def __call__ (self, method):
91 # compute augmented method name
92 method_name = method.__name__ + "__tasks"
94 slice_method = TestSlice.__dict__[ method_name ]
97 for slice_spec in self.plc_spec['slices']:
98 site_spec = self.locate_site (slice_spec['sitename'])
99 test_site = TestSite(self,site_spec)
100 test_slice=TestSlice(self,test_site,slice_spec)
101 tasks += slice_method (test_slice, self.options)
102 return Completer (tasks).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
103 # restore the doc text from the TestSlice method even if a bit odd
104 wrappee.__doc__ = slice_method.__doc__
107 def auth_sfa_mapper (method):
110 auth_method = TestAuthSfa.__dict__[method.__name__]
111 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
112 test_auth=TestAuthSfa(self,auth_spec)
113 if not auth_method(test_auth,self.options): overall=False
115 # restore the doc text
116 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
120 def __init__ (self,result):
130 'vs_delete','timestamp_vs','vs_create', SEP,
131 # 'plc_install', 'mod_python', 'plc_configure', 'plc_start', SEP,
132 'plc_install', 'plc_configure', 'plc_start', SEP,
133 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
134 'plcapi_urls','speed_up_slices', SEP,
135 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
136 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
137 # keep this our of the way for now
138 # 'check_vsys_defaults', SEP,
139 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
140 'qemu_kill_mine','qemu_clean_mine', 'qemu_export', 'qemu_start', 'timestamp_qemu', SEP,
141 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
142 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
143 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_renew_slice@1', SEPSFA,
144 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
145 'sfi_list@1', 'sfi_show@1', 'sfa_utest@1', SEPSFA,
146 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
147 # but as the stress test might take a while, we sometimes missed the debug mode..
148 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
149 'ssh_node_boot@1', 'node_bmlogs@1', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts_ignore', SEP,
150 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
151 'cross_check_tcp@1', 'check_system_slice', SEP,
152 # check slices are turned off properly
153 'empty_slices', 'ssh_slice_off', SEP,
154 # check they are properly re-created with the same name
155 'fill_slices', 'ssh_slice_again_ignore', SEP,
156 'gather_logs_force', SEP,
159 'export', 'show_boxes', SEP,
160 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
161 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
162 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
163 'delete_leases', 'list_leases', SEP,
165 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
166 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
167 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
168 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
169 'plc_db_dump' , 'plc_db_restore', SEP,
170 'check_netflow','check_drl', SEP,
171 'debug_nodemanager', SEP,
172 'standby_1_through_20','yes','no',SEP,
176 def printable_steps (list):
177 single_line=" ".join(list)+" "
178 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
180 def valid_step (step):
181 return step != SEP and step != SEPSFA
183 # turn off the sfa-related steps when build has skipped SFA
184 # this was originally for centos5 but is still valid
185 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
187 def check_whether_build_has_sfa (rpms_url):
188 utils.header ("Checking if build provides SFA package...")
189 # warning, we're now building 'sface' so let's be a bit more picky
190 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
191 # full builds are expected to return with 0 here
193 utils.header("build does provide SFA")
195 # move all steps containing 'sfa' from default_steps to other_steps
196 utils.header("SFA package not found - removing steps with sfa or sfi")
197 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
198 TestPlc.other_steps += sfa_steps
199 for step in sfa_steps: TestPlc.default_steps.remove(step)
201 def __init__ (self,plc_spec,options):
202 self.plc_spec=plc_spec
204 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
205 self.vserverip=plc_spec['vserverip']
206 self.vservername=plc_spec['vservername']
207 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
208 self.apiserver=TestApiserver(self.url,options.dry_run)
209 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
210 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
212 def has_addresses_api (self):
213 return self.apiserver.has_method('AddIpAddress')
216 name=self.plc_spec['name']
217 return "%s.%s"%(name,self.vservername)
220 return self.plc_spec['host_box']
223 return self.test_ssh.is_local()
225 # define the API methods on this object through xmlrpc
226 # would help, but not strictly necessary
230 def actual_command_in_guest (self,command):
231 return self.test_ssh.actual_command(self.host_to_guest(command),dry_run=self.options.dry_run)
233 def start_guest (self):
234 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),dry_run=self.options.dry_run))
236 def stop_guest (self):
237 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),dry_run=self.options.dry_run))
239 def run_in_guest (self,command):
240 return utils.system(self.actual_command_in_guest(command))
242 def run_in_host (self,command):
243 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
245 #command gets run in the plc's vm
246 def host_to_guest(self,command):
247 if self.options.plcs_use_lxc:
248 return "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s %s"%(self.vserverip,command)
250 return "vserver %s exec %s"%(self.vservername,command)
252 def vm_root_in_host(self):
253 if self.options.plcs_use_lxc:
254 return "/vservers/%s/"%(self.vservername)
256 return "/vservers/%s"%(self.vservername)
258 def vm_timestamp_path (self):
259 if self.options.plcs_use_lxc:
260 return "/vservers/%s/%s.timestamp"%(self.vservername,self.vservername)
262 return "/vservers/%s.timestamp"%(self.vservername)
264 #start/stop the vserver
265 def start_guest_in_host(self):
266 if self.options.plcs_use_lxc:
267 return "virsh -c lxc:// start %s"%(self.vservername)
269 return "vserver %s start"%(self.vservername)
271 def stop_guest_in_host(self):
272 if self.options.plcs_use_lxc:
273 return "virsh -c lxc:// destroy %s"%(self.vservername)
275 return "vserver %s stop"%(self.vservername)
278 def run_in_guest_piped (self,local,remote):
279 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
281 def yum_check_installed (self, rpms):
282 if isinstance (rpms, list):
284 return self.run_in_guest("rpm -q %s"%rpms)==0
286 # does a yum install in the vs, ignore yum retcod, check with rpm
287 def yum_install (self, rpms):
288 if isinstance (rpms, list):
290 self.run_in_guest("yum -y install %s"%rpms)
291 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
292 self.run_in_guest("yum-complete-transaction -y")
293 return self.yum_check_installed (rpms)
295 def auth_root (self):
296 return {'Username':self.plc_spec['PLC_ROOT_USER'],
297 'AuthMethod':'password',
298 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
299 'Role' : self.plc_spec['role']
301 def locate_site (self,sitename):
302 for site in self.plc_spec['sites']:
303 if site['site_fields']['name'] == sitename:
305 if site['site_fields']['login_base'] == sitename:
307 raise Exception,"Cannot locate site %s"%sitename
309 def locate_node (self,nodename):
310 for site in self.plc_spec['sites']:
311 for node in site['nodes']:
312 if node['name'] == nodename:
314 raise Exception,"Cannot locate node %s"%nodename
316 def locate_hostname (self,hostname):
317 for site in self.plc_spec['sites']:
318 for node in site['nodes']:
319 if node['node_fields']['hostname'] == hostname:
321 raise Exception,"Cannot locate hostname %s"%hostname
323 def locate_key (self,key_name):
324 for key in self.plc_spec['keys']:
325 if key['key_name'] == key_name:
327 raise Exception,"Cannot locate key %s"%key_name
329 def locate_private_key_from_key_names (self, key_names):
330 # locate the first avail. key
332 for key_name in key_names:
333 key_spec=self.locate_key(key_name)
334 test_key=TestKey(self,key_spec)
335 publickey=test_key.publicpath()
336 privatekey=test_key.privatepath()
337 if os.path.isfile(publickey) and os.path.isfile(privatekey):
339 if found: return privatekey
342 def locate_slice (self, slicename):
343 for slice in self.plc_spec['slices']:
344 if slice['slice_fields']['name'] == slicename:
346 raise Exception,"Cannot locate slice %s"%slicename
348 def all_sliver_objs (self):
350 for slice_spec in self.plc_spec['slices']:
351 slicename = slice_spec['slice_fields']['name']
352 for nodename in slice_spec['nodenames']:
353 result.append(self.locate_sliver_obj (nodename,slicename))
356 def locate_sliver_obj (self,nodename,slicename):
357 (site,node) = self.locate_node(nodename)
358 slice = self.locate_slice (slicename)
360 test_site = TestSite (self, site)
361 test_node = TestNode (self, test_site,node)
362 # xxx the slice site is assumed to be the node site - mhh - probably harmless
363 test_slice = TestSlice (self, test_site, slice)
364 return TestSliver (self, test_node, test_slice)
366 def locate_first_node(self):
367 nodename=self.plc_spec['slices'][0]['nodenames'][0]
368 (site,node) = self.locate_node(nodename)
369 test_site = TestSite (self, site)
370 test_node = TestNode (self, test_site,node)
373 def locate_first_sliver (self):
374 slice_spec=self.plc_spec['slices'][0]
375 slicename=slice_spec['slice_fields']['name']
376 nodename=slice_spec['nodenames'][0]
377 return self.locate_sliver_obj(nodename,slicename)
379 # all different hostboxes used in this plc
380 def get_BoxNodes(self):
381 # maps on sites and nodes, return [ (host_box,test_node) ]
383 for site_spec in self.plc_spec['sites']:
384 test_site = TestSite (self,site_spec)
385 for node_spec in site_spec['nodes']:
386 test_node = TestNode (self, test_site, node_spec)
387 if not test_node.is_real():
388 tuples.append( (test_node.host_box(),test_node) )
389 # transform into a dict { 'host_box' -> [ test_node .. ] }
391 for (box,node) in tuples:
392 if not result.has_key(box):
395 result[box].append(node)
398 # a step for checking this stuff
399 def show_boxes (self):
400 'print summary of nodes location'
401 for (box,nodes) in self.get_BoxNodes().iteritems():
402 print box,":"," + ".join( [ node.name() for node in nodes ] )
405 # make this a valid step
406 def qemu_kill_all(self):
407 'kill all qemu instances on the qemu boxes involved by this setup'
408 # this is the brute force version, kill all qemus on that host box
409 for (box,nodes) in self.get_BoxNodes().iteritems():
410 # pass the first nodename, as we don't push template-qemu on testboxes
411 nodedir=nodes[0].nodedir()
412 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
415 # make this a valid step
416 def qemu_list_all(self):
417 'list all qemu instances on the qemu boxes involved by this setup'
418 for (box,nodes) in self.get_BoxNodes().iteritems():
419 # this is the brute force version, kill all qemus on that host box
420 TestBoxQemu(box,self.options.buildname).qemu_list_all()
423 # kill only the qemus related to this test
424 def qemu_list_mine(self):
425 'list qemu instances for our nodes'
426 for (box,nodes) in self.get_BoxNodes().iteritems():
427 # the fine-grain version
432 # kill only the qemus related to this test
433 def qemu_clean_mine(self):
434 'cleanup (rm -rf) qemu instances for our nodes'
435 for (box,nodes) in self.get_BoxNodes().iteritems():
436 # the fine-grain version
441 # kill only the right qemus
442 def qemu_kill_mine(self):
443 'kill the qemu instances for our nodes'
444 for (box,nodes) in self.get_BoxNodes().iteritems():
445 # the fine-grain version
450 #################### display config
452 "show test configuration after localization"
457 # uggly hack to make sure 'run export' only reports about the 1st plc
458 # to avoid confusion - also we use 'inri_slice1' in various aliases..
461 "print cut'n paste-able stuff to export env variables to your shell"
462 # guess local domain from hostname
463 if TestPlc.exported_id>1:
464 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
466 TestPlc.exported_id+=1
467 domain=socket.gethostname().split('.',1)[1]
468 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
469 print "export BUILD=%s"%self.options.buildname
470 if self.options.plcs_use_lxc:
471 print "export PLCHOSTLXC=%s"%fqdn
473 print "export PLCHOSTVS=%s"%fqdn
474 print "export GUESTNAME=%s"%self.plc_spec['vservername']
475 vplcname=self.plc_spec['vservername'].split('-')[-1]
476 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
477 # find hostname of first node
478 (hostname,qemubox) = self.all_node_infos()[0]
479 print "export KVMHOST=%s.%s"%(qemubox,domain)
480 print "export NODE=%s"%(hostname)
484 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
485 def show_pass (self,passno):
486 for (key,val) in self.plc_spec.iteritems():
487 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
491 self.display_site_spec(site)
492 for node in site['nodes']:
493 self.display_node_spec(node)
494 elif key=='initscripts':
495 for initscript in val:
496 self.display_initscript_spec (initscript)
499 self.display_slice_spec (slice)
502 self.display_key_spec (key)
504 if key not in ['sites','initscripts','slices','keys', 'sfa']:
505 print '+ ',key,':',val
507 def display_site_spec (self,site):
508 print '+ ======== site',site['site_fields']['name']
509 for (k,v) in site.iteritems():
510 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
513 print '+ ','nodes : ',
515 print node['node_fields']['hostname'],'',
521 print user['name'],'',
523 elif k == 'site_fields':
524 print '+ login_base',':',v['login_base']
525 elif k == 'address_fields':
531 def display_initscript_spec (self,initscript):
532 print '+ ======== initscript',initscript['initscript_fields']['name']
534 def display_key_spec (self,key):
535 print '+ ======== key',key['key_name']
537 def display_slice_spec (self,slice):
538 print '+ ======== slice',slice['slice_fields']['name']
539 for (k,v) in slice.iteritems():
552 elif k=='slice_fields':
553 print '+ fields',':',
554 print 'max_nodes=',v['max_nodes'],
559 def display_node_spec (self,node):
560 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
561 print "hostname=",node['node_fields']['hostname'],
562 print "ip=",node['interface_fields']['ip']
563 if self.options.verbose:
564 utils.pprint("node details",node,depth=3)
566 # another entry point for just showing the boxes involved
567 def display_mapping (self):
568 TestPlc.display_mapping_plc(self.plc_spec)
572 def display_mapping_plc (plc_spec):
573 print '+ MyPLC',plc_spec['name']
574 # WARNING this would not be right for lxc-based PLC's - should be harmless though
575 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
576 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
577 for site_spec in plc_spec['sites']:
578 for node_spec in site_spec['nodes']:
579 TestPlc.display_mapping_node(node_spec)
582 def display_mapping_node (node_spec):
583 print '+ NODE %s'%(node_spec['name'])
584 print '+\tqemu box %s'%node_spec['host_box']
585 print '+\thostname=%s'%node_spec['node_fields']['hostname']
587 # write a timestamp in /vservers/<>.timestamp
588 # cannot be inside the vserver, that causes vserver .. build to cough
589 def timestamp_vs (self):
590 "Create a timestamp to remember creation date for this plc"
592 # TODO-lxc check this one
593 # a first approx. is to store the timestamp close to the VM root like vs does
594 stamp_path=self.vm_timestamp_path ()
595 stamp_dir = os.path.dirname (stamp_path)
596 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
597 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
599 # this is called inconditionnally at the beginning of the test sequence
600 # just in case this is a rerun, so if the vm is not running it's fine
602 "vserver delete the test myplc"
603 stamp_path=self.vm_timestamp_path()
604 self.run_in_host("rm -f %s"%stamp_path)
605 if self.options.plcs_use_lxc:
606 self.run_in_host("virsh -c lxc:// destroy %s"%self.vservername)
607 self.run_in_host("virsh -c lxc:// undefine %s"%self.vservername)
608 self.run_in_host("rm -fr /vservers/%s"%self.vservername)
611 self.run_in_host("vserver --silent %s delete"%self.vservername)
615 # historically the build was being fetched by the tests
616 # now the build pushes itself as a subdir of the tests workdir
617 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
618 def vs_create (self):
619 "vserver creation (no install done)"
620 # push the local build/ dir to the testplc box
622 # a full path for the local calls
623 build_dir=os.path.dirname(sys.argv[0])
624 # sometimes this is empty - set to "." in such a case
625 if not build_dir: build_dir="."
626 build_dir += "/build"
628 # use a standard name - will be relative to remote buildname
630 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
631 self.test_ssh.rmdir(build_dir)
632 self.test_ssh.copy(build_dir,recursive=True)
633 # the repo url is taken from arch-rpms-url
634 # with the last step (i386) removed
635 repo_url = self.options.arch_rpms_url
636 for level in [ 'arch' ]:
637 repo_url = os.path.dirname(repo_url)
639 # invoke initvm (drop support for vs)
640 script="lbuild-initvm.sh"
642 # pass the vbuild-nightly options to [lv]test-initvm
643 script_options += " -p %s"%self.options.personality
644 script_options += " -d %s"%self.options.pldistro
645 script_options += " -f %s"%self.options.fcdistro
646 script_options += " -r %s"%repo_url
647 vserver_name = self.vservername
649 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
650 script_options += " -n %s"%vserver_hostname
652 print "Cannot reverse lookup %s"%self.vserverip
653 print "This is considered fatal, as this might pollute the test results"
655 create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s"%locals()
656 return self.run_in_host(create_vserver) == 0
659 def plc_install(self):
660 "yum install myplc, noderepo, and the plain bootstrapfs"
662 # workaround for getting pgsql8.2 on centos5
663 if self.options.fcdistro == "centos5":
664 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
667 if self.options.personality == "linux32":
669 elif self.options.personality == "linux64":
672 raise Exception, "Unsupported personality %r"%self.options.personality
673 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
676 pkgs_list.append ("slicerepo-%s"%nodefamily)
677 pkgs_list.append ("myplc")
678 pkgs_list.append ("noderepo-%s"%nodefamily)
679 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
680 pkgs_string=" ".join(pkgs_list)
681 return self.yum_install (pkgs_list)
684 def mod_python(self):
685 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
686 return self.yum_install ( [ 'mod_python' ] )
689 def plc_configure(self):
691 tmpname='%s.plc-config-tty'%(self.name())
692 fileconf=open(tmpname,'w')
693 for var in [ 'PLC_NAME',
698 'PLC_MAIL_SUPPORT_ADDRESS',
701 # Above line was added for integrating SFA Testing
707 'PLC_RESERVATION_GRANULARITY',
709 'PLC_OMF_XMPP_SERVER',
712 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
713 fileconf.write('w\n')
714 fileconf.write('q\n')
716 utils.system('cat %s'%tmpname)
717 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
718 utils.system('rm %s'%tmpname)
723 self.run_in_guest('service plc start')
728 self.run_in_guest('service plc stop')
732 "start the PLC vserver"
737 "stop the PLC vserver"
741 # stores the keys from the config for further use
742 def keys_store(self):
743 "stores test users ssh keys in keys/"
744 for key_spec in self.plc_spec['keys']:
745 TestKey(self,key_spec).store_key()
748 def keys_clean(self):
749 "removes keys cached in keys/"
750 utils.system("rm -rf ./keys")
753 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
754 # for later direct access to the nodes
755 def keys_fetch(self):
756 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
758 if not os.path.isdir(dir):
760 vservername=self.vservername
761 vm_root=self.vm_root_in_host()
763 prefix = 'debug_ssh_key'
764 for ext in [ 'pub', 'rsa' ] :
765 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
766 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
767 if self.test_ssh.fetch(src,dst) != 0: overall=False
771 "create sites with PLCAPI"
772 return self.do_sites()
774 def delete_sites (self):
775 "delete sites with PLCAPI"
776 return self.do_sites(action="delete")
778 def do_sites (self,action="add"):
779 for site_spec in self.plc_spec['sites']:
780 test_site = TestSite (self,site_spec)
781 if (action != "add"):
782 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
783 test_site.delete_site()
784 # deleted with the site
785 #test_site.delete_users()
788 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
789 test_site.create_site()
790 test_site.create_users()
793 def delete_all_sites (self):
794 "Delete all sites in PLC, and related objects"
795 print 'auth_root',self.auth_root()
796 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
798 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
799 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
800 site_id=site['site_id']
801 print 'Deleting site_id',site_id
802 self.apiserver.DeleteSite(self.auth_root(),site_id)
806 "create nodes with PLCAPI"
807 return self.do_nodes()
808 def delete_nodes (self):
809 "delete nodes with PLCAPI"
810 return self.do_nodes(action="delete")
812 def do_nodes (self,action="add"):
813 for site_spec in self.plc_spec['sites']:
814 test_site = TestSite (self,site_spec)
816 utils.header("Deleting nodes in site %s"%test_site.name())
817 for node_spec in site_spec['nodes']:
818 test_node=TestNode(self,test_site,node_spec)
819 utils.header("Deleting %s"%test_node.name())
820 test_node.delete_node()
822 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
823 for node_spec in site_spec['nodes']:
824 utils.pprint('Creating node %s'%node_spec,node_spec)
825 test_node = TestNode (self,test_site,node_spec)
826 test_node.create_node ()
829 def nodegroups (self):
830 "create nodegroups with PLCAPI"
831 return self.do_nodegroups("add")
832 def delete_nodegroups (self):
833 "delete nodegroups with PLCAPI"
834 return self.do_nodegroups("delete")
838 def translate_timestamp (start,grain,timestamp):
839 if timestamp < TestPlc.YEAR: return start+timestamp*grain
840 else: return timestamp
843 def timestamp_printable (timestamp):
844 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
847 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
849 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
850 print 'API answered grain=',grain
851 start=(now/grain)*grain
853 # find out all nodes that are reservable
854 nodes=self.all_reservable_nodenames()
856 utils.header ("No reservable node found - proceeding without leases")
859 # attach them to the leases as specified in plc_specs
860 # this is where the 'leases' field gets interpreted as relative of absolute
861 for lease_spec in self.plc_spec['leases']:
862 # skip the ones that come with a null slice id
863 if not lease_spec['slice']: continue
864 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
865 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
866 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
867 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
868 if lease_addition['errors']:
869 utils.header("Cannot create leases, %s"%lease_addition['errors'])
872 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
873 (nodes,lease_spec['slice'],
874 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
875 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
879 def delete_leases (self):
880 "remove all leases in the myplc side"
881 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
882 utils.header("Cleaning leases %r"%lease_ids)
883 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
886 def list_leases (self):
887 "list all leases known to the myplc"
888 leases = self.apiserver.GetLeases(self.auth_root())
891 current=l['t_until']>=now
892 if self.options.verbose or current:
893 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
894 TestPlc.timestamp_printable(l['t_from']),
895 TestPlc.timestamp_printable(l['t_until'])))
898 # create nodegroups if needed, and populate
899 def do_nodegroups (self, action="add"):
900 # 1st pass to scan contents
902 for site_spec in self.plc_spec['sites']:
903 test_site = TestSite (self,site_spec)
904 for node_spec in site_spec['nodes']:
905 test_node=TestNode (self,test_site,node_spec)
906 if node_spec.has_key('nodegroups'):
907 nodegroupnames=node_spec['nodegroups']
908 if isinstance(nodegroupnames,StringTypes):
909 nodegroupnames = [ nodegroupnames ]
910 for nodegroupname in nodegroupnames:
911 if not groups_dict.has_key(nodegroupname):
912 groups_dict[nodegroupname]=[]
913 groups_dict[nodegroupname].append(test_node.name())
914 auth=self.auth_root()
916 for (nodegroupname,group_nodes) in groups_dict.iteritems():
918 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
919 # first, check if the nodetagtype is here
920 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
922 tag_type_id = tag_types[0]['tag_type_id']
924 tag_type_id = self.apiserver.AddTagType(auth,
925 {'tagname':nodegroupname,
926 'description': 'for nodegroup %s'%nodegroupname,
928 print 'located tag (type)',nodegroupname,'as',tag_type_id
930 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
932 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
933 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
934 # set node tag on all nodes, value='yes'
935 for nodename in group_nodes:
937 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
939 traceback.print_exc()
940 print 'node',nodename,'seems to already have tag',nodegroupname
943 expect_yes = self.apiserver.GetNodeTags(auth,
944 {'hostname':nodename,
945 'tagname':nodegroupname},
946 ['value'])[0]['value']
947 if expect_yes != "yes":
948 print 'Mismatch node tag on node',nodename,'got',expect_yes
951 if not self.options.dry_run:
952 print 'Cannot find tag',nodegroupname,'on node',nodename
956 print 'cleaning nodegroup',nodegroupname
957 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
959 traceback.print_exc()
963 # a list of TestNode objs
964 def all_nodes (self):
966 for site_spec in self.plc_spec['sites']:
967 test_site = TestSite (self,site_spec)
968 for node_spec in site_spec['nodes']:
969 nodes.append(TestNode (self,test_site,node_spec))
972 # return a list of tuples (nodename,qemuname)
973 def all_node_infos (self) :
975 for site_spec in self.plc_spec['sites']:
976 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
977 for node_spec in site_spec['nodes'] ]
980 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
981 def all_reservable_nodenames (self):
983 for site_spec in self.plc_spec['sites']:
984 for node_spec in site_spec['nodes']:
985 node_fields=node_spec['node_fields']
986 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
987 res.append(node_fields['hostname'])
990 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
991 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
992 if self.options.dry_run:
996 class CompleterTaskBootState (CompleterTask):
997 def __init__ (self, test_plc,hostname):
998 self.test_plc=test_plc
999 self.hostname=hostname
1000 self.last_boot_state='undef'
1001 def actual_run (self):
1003 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
1005 self.last_boot_state = node['boot_state']
1006 return self.last_boot_state == target_boot_state
1010 return "CompleterTaskBootState with node %s"%self.hostname
1011 def failure_message (self):
1012 return "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
1014 timeout = timedelta(minutes=timeout_minutes)
1015 graceout = timedelta(minutes=silent_minutes)
1016 period = timedelta(seconds=period_seconds)
1017 # the nodes that haven't checked yet - start with a full list and shrink over time
1018 utils.header("checking nodes boot state (expected %s)"%target_boot_state)
1019 tasks = [ CompleterTaskBootState (self,hostname) \
1020 for (hostname,_) in self.all_node_infos() ]
1021 return Completer (tasks).run (timeout, graceout, period)
1023 def nodes_booted(self):
1024 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
1026 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
1027 class CompleterTaskNodeSsh (CompleterTask):
1028 def __init__ (self, hostname, qemuname, boot_state, local_key):
1029 self.hostname=hostname
1030 self.qemuname=qemuname
1031 self.boot_state=boot_state
1032 self.local_key=local_key
1033 def run (self, silent):
1034 command = TestSsh (self.hostname,key=self.local_key).actual_command("hostname;uname -a")
1035 return utils.system (command, silent=silent)==0
1036 def failure_message (self):
1037 return "Cannot reach %s @ %s in %s mode"%(self.hostname, self.qemuname, self.boot_state)
1040 timeout = timedelta(minutes=timeout_minutes)
1041 graceout = timedelta(minutes=silent_minutes)
1042 period = timedelta(seconds=period_seconds)
1043 vservername=self.vservername
1046 local_key = "keys/%(vservername)s-debug.rsa"%locals()
1049 local_key = "keys/key_admin.rsa"
1050 utils.header("checking ssh access to nodes (expected in %s mode)"%message)
1051 node_infos = self.all_node_infos()
1052 tasks = [ CompleterTaskNodeSsh (nodename, qemuname, message, local_key) \
1053 for (nodename,qemuname) in node_infos ]
1054 return Completer (tasks).run (timeout, graceout, period)
1056 def ssh_node_debug(self):
1057 "Tries to ssh into nodes in debug mode with the debug ssh key"
1058 return self.check_nodes_ssh(debug=True,
1059 timeout_minutes=self.ssh_node_debug_timeout,
1060 silent_minutes=self.ssh_node_debug_silent)
1062 def ssh_node_boot(self):
1063 "Tries to ssh into nodes in production mode with the root ssh key"
1064 return self.check_nodes_ssh(debug=False,
1065 timeout_minutes=self.ssh_node_boot_timeout,
1066 silent_minutes=self.ssh_node_boot_silent)
1068 def node_bmlogs(self):
1069 "Checks that there's a non-empty dir. /var/log/bm/raw"
1070 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
1073 def qemu_local_init (self): pass
1075 def bootcd (self): pass
1077 def qemu_local_config (self): pass
1079 def nodestate_reinstall (self): pass
1081 def nodestate_safeboot (self): pass
1083 def nodestate_boot (self): pass
1085 def nodestate_show (self): pass
1087 def qemu_export (self): pass
1089 ### check hooks : invoke scripts from hooks/{node,slice}
1090 def check_hooks_node (self):
1091 return self.locate_first_node().check_hooks()
1092 def check_hooks_sliver (self) :
1093 return self.locate_first_sliver().check_hooks()
1095 def check_hooks (self):
1096 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1097 return self.check_hooks_node() and self.check_hooks_sliver()
1100 def do_check_initscripts(self):
1101 class CompleterTaskInitscript (CompleterTask):
1102 def __init__ (self, test_sliver, stamp):
1103 self.test_sliver=test_sliver
1105 def actual_run (self):
1106 return self.test_sliver.check_initscript_stamp (self.stamp)
1108 return "initscript checker for %s"%self.test_sliver.name()
1109 def failure_message (self):
1110 return "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
1113 for slice_spec in self.plc_spec['slices']:
1114 if not slice_spec.has_key('initscriptstamp'):
1116 stamp=slice_spec['initscriptstamp']
1117 slicename=slice_spec['slice_fields']['name']
1118 for nodename in slice_spec['nodenames']:
1119 print 'nodename',nodename,'slicename',slicename,'stamp',stamp
1120 (site,node) = self.locate_node (nodename)
1121 # xxx - passing the wrong site - probably harmless
1122 test_site = TestSite (self,site)
1123 test_slice = TestSlice (self,test_site,slice_spec)
1124 test_node = TestNode (self,test_site,node)
1125 test_sliver = TestSliver (self, test_node, test_slice)
1126 tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
1127 return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1129 def check_initscripts(self):
1130 "check that the initscripts have triggered"
1131 return self.do_check_initscripts()
1133 def initscripts (self):
1134 "create initscripts with PLCAPI"
1135 for initscript in self.plc_spec['initscripts']:
1136 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1137 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1140 def delete_initscripts (self):
1141 "delete initscripts with PLCAPI"
1142 for initscript in self.plc_spec['initscripts']:
1143 initscript_name = initscript['initscript_fields']['name']
1144 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1146 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1147 print initscript_name,'deleted'
1149 print 'deletion went wrong - probably did not exist'
1154 "create slices with PLCAPI"
1155 return self.do_slices(action="add")
1157 def delete_slices (self):
1158 "delete slices with PLCAPI"
1159 return self.do_slices(action="delete")
1161 def fill_slices (self):
1162 "add nodes in slices with PLCAPI"
1163 return self.do_slices(action="fill")
1165 def empty_slices (self):
1166 "remove nodes from slices with PLCAPI"
1167 return self.do_slices(action="empty")
1169 def do_slices (self, action="add"):
1170 for slice in self.plc_spec['slices']:
1171 site_spec = self.locate_site (slice['sitename'])
1172 test_site = TestSite(self,site_spec)
1173 test_slice=TestSlice(self,test_site,slice)
1174 if action == "delete":
1175 test_slice.delete_slice()
1176 elif action=="fill":
1177 test_slice.add_nodes()
1178 elif action=="empty":
1179 test_slice.delete_nodes()
1181 test_slice.create_slice()
1184 @slice_mapper__tasks(20,10,15)
1185 def ssh_slice(self): pass
1186 @slice_mapper__tasks(20,19,15)
1187 def ssh_slice_off (self): pass
1189 # use another name so we can exclude/ignore it from the tests on the nightly command line
1190 def ssh_slice_again(self): return self.ssh_slice()
1191 # note that simply doing ssh_slice_again=ssh_slice would kind od work too
1192 # but for some reason the ignore-wrapping thing would not
1195 def ssh_slice_basics(self): pass
1198 def check_vsys_defaults(self): pass
1201 def keys_clear_known_hosts (self): pass
1203 def plcapi_urls (self):
1204 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1206 def speed_up_slices (self):
1207 "tweak nodemanager settings on all nodes using a conf file"
1208 # create the template on the server-side
1209 template="%s.nodemanager"%self.name()
1210 template_file = open (template,"w")
1211 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1212 template_file.close()
1213 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1214 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1215 self.test_ssh.copy_abs(template,remote)
1217 self.apiserver.AddConfFile (self.auth_root(),
1218 {'dest':'/etc/sysconfig/nodemanager',
1219 'source':'PlanetLabConf/nodemanager',
1220 'postinstall_cmd':'service nm restart',})
1223 def debug_nodemanager (self):
1224 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1225 template="%s.nodemanager"%self.name()
1226 template_file = open (template,"w")
1227 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1228 template_file.close()
1229 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1230 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1231 self.test_ssh.copy_abs(template,remote)
1235 def qemu_start (self) : pass
1238 def timestamp_qemu (self) : pass
1240 # when a spec refers to a node possibly on another plc
1241 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1242 for plc in [ self ] + other_plcs:
1244 return plc.locate_sliver_obj (nodename, slicename)
1247 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1249 # implement this one as a cross step so that we can take advantage of different nodes
1250 # in multi-plcs mode
1251 def cross_check_tcp (self, other_plcs):
1252 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1253 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1254 utils.header ("check_tcp: no/empty config found")
1256 specs = self.plc_spec['tcp_specs']
1261 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1262 if not s_test_sliver.run_tcp_server(port,timeout=20):
1266 # idem for the client side
1267 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1268 # use nodename from locatesd sliver, unless 'client_connect' is set
1269 if 'client_connect' in spec:
1270 destination = spec['client_connect']
1272 destination=s_test_sliver.test_node.name()
1273 if not c_test_sliver.run_tcp_client(destination,port):
1277 # painfully enough, we need to allow for some time as netflow might show up last
1278 def check_system_slice (self):
1279 "all nodes: check that a system slice is alive"
1280 # netflow currently not working in the lxc distro
1281 # drl not built at all in the wtx distro
1282 # if we find either of them we're happy
1283 return self.check_netflow() or self.check_drl()
1286 def check_netflow (self): return self._check_system_slice ('netflow')
1287 def check_drl (self): return self._check_system_slice ('drl')
1289 # we have the slices up already here, so it should not take too long
1290 def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
1291 class CompleterTaskSystemSlice (CompleterTask):
1292 def __init__ (self, test_node, dry_run):
1293 self.test_node=test_node
1294 self.dry_run=dry_run
1295 def actual_run (self):
1296 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
1298 return "System slice %s @ %s"%(slicename, self.test_node.name())
1299 def failure_message (self):
1300 return "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
1301 timeout = timedelta(minutes=timeout_minutes)
1302 silent = timedelta (0)
1303 period = timedelta (seconds=period_seconds)
1304 tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
1305 for test_node in self.all_nodes() ]
1306 return Completer (tasks) . run (timeout, silent, period)
1308 def plcsh_stress_test (self):
1309 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1310 # install the stress-test in the plc image
1311 location = "/usr/share/plc_api/plcsh_stress_test.py"
1312 remote="%s/%s"%(self.vm_root_in_host(),location)
1313 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1315 command += " -- --check"
1316 if self.options.size == 1:
1317 command += " --tiny"
1318 return ( self.run_in_guest(command) == 0)
1320 # populate runs the same utility without slightly different options
1321 # in particular runs with --preserve (dont cleanup) and without --check
1322 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1324 def sfa_install_all (self):
1325 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1326 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1328 def sfa_install_core(self):
1330 return self.yum_install ("sfa")
1332 def sfa_install_plc(self):
1333 "yum install sfa-plc"
1334 return self.yum_install("sfa-plc")
1336 def sfa_install_sfatables(self):
1337 "yum install sfa-sfatables"
1338 return self.yum_install ("sfa-sfatables")
1340 # for some very odd reason, this sometimes fails with the following symptom
1341 # # yum install sfa-client
1342 # Setting up Install Process
1344 # Downloading Packages:
1345 # Running rpm_check_debug
1346 # Running Transaction Test
1347 # Transaction Test Succeeded
1348 # Running Transaction
1349 # Transaction couldn't start:
1350 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1351 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1352 # even though in the same context I have
1353 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1354 # Filesystem Size Used Avail Use% Mounted on
1355 # /dev/hdv1 806G 264G 501G 35% /
1356 # none 16M 36K 16M 1% /tmp
1358 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1359 def sfa_install_client(self):
1360 "yum install sfa-client"
1361 first_try=self.yum_install("sfa-client")
1362 if first_try: return True
1363 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1364 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1365 utils.header("rpm_path=<<%s>>"%rpm_path)
1367 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1368 return self.yum_check_installed ("sfa-client")
1370 def sfa_dbclean(self):
1371 "thoroughly wipes off the SFA database"
1372 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1373 self.run_in_guest("sfa-nuke.py")==0 or \
1374 self.run_in_guest("sfa-nuke-plc.py")==0
1376 def sfa_fsclean(self):
1377 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1378 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1381 def sfa_plcclean(self):
1382 "cleans the PLC entries that were created as a side effect of running the script"
1384 sfa_spec=self.plc_spec['sfa']
1386 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1387 login_base=auth_sfa_spec['login_base']
1388 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1389 except: print "Site %s already absent from PLC db"%login_base
1391 for spec_name in ['pi_spec','user_spec']:
1392 user_spec=auth_sfa_spec[spec_name]
1393 username=user_spec['email']
1394 try: self.apiserver.DeletePerson(self.auth_root(),username)
1396 # this in fact is expected as sites delete their members
1397 #print "User %s already absent from PLC db"%username
1400 print "REMEMBER TO RUN sfa_import AGAIN"
1403 def sfa_uninstall(self):
1404 "uses rpm to uninstall sfa - ignore result"
1405 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1406 self.run_in_guest("rm -rf /var/lib/sfa")
1407 self.run_in_guest("rm -rf /etc/sfa")
1408 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1410 self.run_in_guest("rpm -e --noscripts sfa-plc")
1413 ### run unit tests for SFA
1414 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1415 # Running Transaction
1416 # Transaction couldn't start:
1417 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1418 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1419 # no matter how many Gbs are available on the testplc
1420 # could not figure out what's wrong, so...
1421 # if the yum install phase fails, consider the test is successful
1422 # other combinations will eventually run it hopefully
1423 def sfa_utest(self):
1424 "yum install sfa-tests and run SFA unittests"
1425 self.run_in_guest("yum -y install sfa-tests")
1426 # failed to install - forget it
1427 if self.run_in_guest("rpm -q sfa-tests")!=0:
1428 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1430 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1434 dirname="conf.%s"%self.plc_spec['name']
1435 if not os.path.isdir(dirname):
1436 utils.system("mkdir -p %s"%dirname)
1437 if not os.path.isdir(dirname):
1438 raise Exception,"Cannot create config dir for plc %s"%self.name()
1441 def conffile(self,filename):
1442 return "%s/%s"%(self.confdir(),filename)
1443 def confsubdir(self,dirname,clean,dry_run=False):
1444 subdirname="%s/%s"%(self.confdir(),dirname)
1446 utils.system("rm -rf %s"%subdirname)
1447 if not os.path.isdir(subdirname):
1448 utils.system("mkdir -p %s"%subdirname)
1449 if not dry_run and not os.path.isdir(subdirname):
1450 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1453 def conffile_clean (self,filename):
1454 filename=self.conffile(filename)
1455 return utils.system("rm -rf %s"%filename)==0
1458 def sfa_configure(self):
1459 "run sfa-config-tty"
1460 tmpname=self.conffile("sfa-config-tty")
1461 fileconf=open(tmpname,'w')
1462 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1463 'SFA_INTERFACE_HRN',
1464 'SFA_REGISTRY_LEVEL1_AUTH',
1465 'SFA_REGISTRY_HOST',
1466 'SFA_AGGREGATE_HOST',
1476 'SFA_GENERIC_FLAVOUR',
1477 'SFA_AGGREGATE_ENABLED',
1479 if self.plc_spec['sfa'].has_key(var):
1480 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1481 # the way plc_config handles booleans just sucks..
1484 if self.plc_spec['sfa'][var]: val='true'
1485 fileconf.write ('e %s\n%s\n'%(var,val))
1486 fileconf.write('w\n')
1487 fileconf.write('R\n')
1488 fileconf.write('q\n')
1490 utils.system('cat %s'%tmpname)
1491 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1494 def aggregate_xml_line(self):
1495 port=self.plc_spec['sfa']['neighbours-port']
1496 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1497 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1499 def registry_xml_line(self):
1500 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1501 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1504 # a cross step that takes all other plcs in argument
1505 def cross_sfa_configure(self, other_plcs):
1506 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1507 # of course with a single plc, other_plcs is an empty list
1510 agg_fname=self.conffile("agg.xml")
1511 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1512 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1513 utils.header ("(Over)wrote %s"%agg_fname)
1514 reg_fname=self.conffile("reg.xml")
1515 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1516 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1517 utils.header ("(Over)wrote %s"%reg_fname)
1518 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1519 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1521 def sfa_import(self):
1522 "use sfaadmin to import from plc"
1523 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1525 self.run_in_guest('sfaadmin reg import_registry')==0
1526 # not needed anymore
1527 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1529 def sfa_start(self):
1531 return self.run_in_guest('service sfa start')==0
1533 def sfi_configure(self):
1534 "Create /root/sfi on the plc side for sfi client configuration"
1535 if self.options.dry_run:
1536 utils.header("DRY RUN - skipping step")
1538 sfa_spec=self.plc_spec['sfa']
1539 # cannot use auth_sfa_mapper to pass dir_name
1540 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1541 test_slice=TestAuthSfa(self,slice_spec)
1542 dir_basename=os.path.basename(test_slice.sfi_path())
1543 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1544 test_slice.sfi_configure(dir_name)
1545 # push into the remote /root/sfi area
1546 location = test_slice.sfi_path()
1547 remote="%s/%s"%(self.vm_root_in_host(),location)
1548 self.test_ssh.mkdir(remote,abs=True)
1549 # need to strip last level or remote otherwise we get an extra dir level
1550 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1554 def sfi_clean (self):
1555 "clean up /root/sfi on the plc side"
1556 self.run_in_guest("rm -rf /root/sfi")
1560 def sfa_add_site (self): pass
1562 def sfa_add_pi (self): pass
1564 def sfa_add_user(self): pass
1566 def sfa_update_user(self): pass
1568 def sfa_add_slice(self): pass
1570 def sfa_renew_slice(self): pass
1572 def sfa_discover(self): pass
1574 def sfa_create_slice(self): pass
1576 def sfa_check_slice_plc(self): pass
1578 def sfa_update_slice(self): pass
1580 def sfi_list(self): pass
1582 def sfi_show(self): pass
1584 def ssh_slice_sfa(self): pass
1586 def sfa_delete_user(self): pass
1588 def sfa_delete_slice(self): pass
1592 self.run_in_guest('service sfa stop')==0
1595 def populate (self):
1596 "creates random entries in the PLCAPI"
1597 # install the stress-test in the plc image
1598 location = "/usr/share/plc_api/plcsh_stress_test.py"
1599 remote="%s/%s"%(self.vm_root_in_host(),location)
1600 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1602 command += " -- --preserve --short-names"
1603 local = (self.run_in_guest(command) == 0);
1604 # second run with --foreign
1605 command += ' --foreign'
1606 remote = (self.run_in_guest(command) == 0);
1607 return ( local and remote)
1609 def gather_logs (self):
1610 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1611 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1612 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1613 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1614 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1615 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1616 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1618 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1619 self.gather_var_logs ()
1621 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1622 self.gather_pgsql_logs ()
1624 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1625 self.gather_root_sfi ()
1627 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1628 for site_spec in self.plc_spec['sites']:
1629 test_site = TestSite (self,site_spec)
1630 for node_spec in site_spec['nodes']:
1631 test_node=TestNode(self,test_site,node_spec)
1632 test_node.gather_qemu_logs()
1634 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1635 self.gather_nodes_var_logs()
1637 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1638 self.gather_slivers_var_logs()
1641 def gather_slivers_var_logs(self):
1642 for test_sliver in self.all_sliver_objs():
1643 remote = test_sliver.tar_var_logs()
1644 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1645 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1646 utils.system(command)
1649 def gather_var_logs (self):
1650 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1651 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1652 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1653 utils.system(command)
1654 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1655 utils.system(command)
1657 def gather_pgsql_logs (self):
1658 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1659 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1660 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1661 utils.system(command)
1663 def gather_root_sfi (self):
1664 utils.system("mkdir -p logs/sfi.%s"%self.name())
1665 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1666 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1667 utils.system(command)
1669 def gather_nodes_var_logs (self):
1670 for site_spec in self.plc_spec['sites']:
1671 test_site = TestSite (self,site_spec)
1672 for node_spec in site_spec['nodes']:
1673 test_node=TestNode(self,test_site,node_spec)
1674 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1675 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1676 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1677 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1678 utils.system(command)
1681 # returns the filename to use for sql dump/restore, using options.dbname if set
1682 def dbfile (self, database):
1683 # uses options.dbname if it is found
1685 name=self.options.dbname
1686 if not isinstance(name,StringTypes):
1692 return "/root/%s-%s.sql"%(database,name)
1694 def plc_db_dump(self):
1695 'dump the planetlab5 DB in /root in the PLC - filename has time'
1696 dump=self.dbfile("planetab5")
1697 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1698 utils.header('Dumped planetlab5 database in %s'%dump)
1701 def plc_db_restore(self):
1702 'restore the planetlab5 DB - looks broken, but run -n might help'
1703 dump=self.dbfile("planetab5")
1704 ##stop httpd service
1705 self.run_in_guest('service httpd stop')
1706 # xxx - need another wrapper
1707 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1708 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1709 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1710 ##starting httpd service
1711 self.run_in_guest('service httpd start')
1713 utils.header('Database restored from ' + dump)
1716 def create_ignore_steps ():
1717 for step in TestPlc.default_steps + TestPlc.other_steps:
1718 # default step can have a plc qualifier
1719 if '@' in step: (step,qualifier)=step.split('@')
1720 # or be defined as forced or ignored by default
1721 for keyword in ['_ignore','_force']:
1722 if step.endswith (keyword): step=step.replace(keyword,'')
1723 if step == SEP or step == SEPSFA : continue
1724 method=getattr(TestPlc,step)
1726 wrapped=ignore_result(method)
1727 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1728 setattr(TestPlc, name, wrapped)
1731 # def ssh_slice_again_ignore (self): pass
1733 # def check_initscripts_ignore (self): pass
1735 def standby_1_through_20(self):
1736 """convenience function to wait for a specified number of minutes"""
1739 def standby_1(): pass
1741 def standby_2(): pass
1743 def standby_3(): pass
1745 def standby_4(): pass
1747 def standby_5(): pass
1749 def standby_6(): pass
1751 def standby_7(): pass
1753 def standby_8(): pass
1755 def standby_9(): pass
1757 def standby_10(): pass
1759 def standby_11(): pass
1761 def standby_12(): pass
1763 def standby_13(): pass
1765 def standby_14(): pass
1767 def standby_15(): pass
1769 def standby_16(): pass
1771 def standby_17(): pass
1773 def standby_18(): pass
1775 def standby_19(): pass
1777 def standby_20(): pass
1779 # convenience for debugging the test logic
1780 def yes (self): return True
1781 def no (self): return False