1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
10 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
24 from Completer import Completer, CompleterTask
26 # step methods must take (self) and return a boolean (options is a member of the class)
28 def standby(minutes,dry_run):
29 utils.header('Entering StandBy for %d mn'%minutes)
33 time.sleep(60*minutes)
36 def standby_generic (func):
38 minutes=int(func.__name__.split("_")[1])
39 return standby(minutes,self.options.dry_run)
42 def node_mapper (method):
43 def actual(self,*args, **kwds):
45 node_method = TestNode.__dict__[method.__name__]
46 for test_node in self.all_nodes():
47 if not node_method(test_node, *args, **kwds): overall=False
49 # restore the doc text
50 actual.__doc__=TestNode.__dict__[method.__name__].__doc__
53 def slice_mapper (method):
56 slice_method = TestSlice.__dict__[method.__name__]
57 for slice_spec in self.plc_spec['slices']:
58 site_spec = self.locate_site (slice_spec['sitename'])
59 test_site = TestSite(self,site_spec)
60 test_slice=TestSlice(self,test_site,slice_spec)
61 if not slice_method(test_slice,self.options): overall=False
63 # restore the doc text
64 actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
67 # run a step but return True so that we can go on
68 def ignore_result (method):
70 # ssh_slice_ignore->ssh_slice
71 ref_name=method.__name__.replace('_ignore','').replace('ignore_','')
72 ref_method=TestPlc.__dict__[ref_name]
73 result=ref_method(self)
74 print "Actual - but ignored - result for %(ref_name)s is %(result)s"%locals()
76 wrappee.__doc__="ignored version of " + method.__name__.replace('_ignore','').replace('ignore_','')
79 # a variant that expects the TestSlice method to return a list of CompleterTasks that
80 # are then merged into a single Completer run to avoid wating for all the slices
81 # esp. useful when a test fails of course
82 # because we need to pass arguments we use a class instead..
83 class slice_mapper__tasks (object):
84 # could not get this to work with named arguments
85 def __init__ (self,timeout_minutes,silent_minutes,period_seconds):
86 print "timeout_minutes,silent_minutes,period_seconds",timeout_minutes,silent_minutes,period_seconds
87 self.timeout=timedelta(minutes=timeout_minutes)
88 self.silent=timedelta(minutes=silent_minutes)
89 self.period=timedelta(seconds=period_seconds)
90 def __call__ (self, method):
92 # compute augmented method name
93 method_name = method.__name__ + "__tasks"
95 slice_method = TestSlice.__dict__[ method_name ]
98 for slice_spec in self.plc_spec['slices']:
99 site_spec = self.locate_site (slice_spec['sitename'])
100 test_site = TestSite(self,site_spec)
101 test_slice=TestSlice(self,test_site,slice_spec)
102 tasks += slice_method (test_slice, self.options)
103 return Completer (tasks).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
104 # restore the doc text from the TestSlice method even if a bit odd
105 wrappee.__doc__ = slice_method.__doc__
108 def auth_sfa_mapper (method):
111 auth_method = TestAuthSfa.__dict__[method.__name__]
112 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
113 test_auth=TestAuthSfa(self,auth_spec)
114 if not auth_method(test_auth,self.options): overall=False
116 # restore the doc text
117 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
127 'vs_delete','timestamp_vs','vs_create', SEP,
128 # 'plc_install', 'mod_python', 'plc_configure', 'plc_start', SEP,
129 'plc_install', 'plc_configure', 'plc_start', SEP,
130 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
131 'plcapi_urls','speed_up_slices', SEP,
132 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
133 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
134 # keep this our of the way for now
135 # 'check_vsys_defaults', SEP,
136 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
137 'qemu_kill_mine','qemu_clean_mine', 'qemu_export', 'qemu_start', 'timestamp_qemu', SEP,
138 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
139 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
140 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_renew_slice@1', SEPSFA,
141 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
142 'sfi_list@1', 'sfi_show@1', 'sfa_utest@1', SEPSFA,
143 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
144 # but as the stress test might take a while, we sometimes missed the debug mode..
145 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
146 'ssh_node_boot@1', 'node_bmlogs@1', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts_ignore', SEP,
147 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
148 'cross_check_tcp@1', 'check_system_slice', SEP,
149 # check slices are turned off properly
150 'empty_slices', 'ssh_slice_off', SEP,
151 # check they are properly re-created with the same name
152 'fill_slices', 'ssh_slice_again_ignore', SEP,
153 'force_gather_logs', SEP,
156 'export', 'show_boxes', SEP,
157 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
158 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
159 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
160 'delete_leases', 'list_leases', SEP,
162 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
163 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
164 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
165 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
166 'plc_db_dump' , 'plc_db_restore', SEP,
167 'check_netflow','check_drl', SEP,
168 'debug_nodemanager', SEP,
169 'standby_1_through_20',SEP,
173 def printable_steps (list):
174 single_line=" ".join(list)+" "
175 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
177 def valid_step (step):
178 return step != SEP and step != SEPSFA
180 # turn off the sfa-related steps when build has skipped SFA
181 # this was originally for centos5 but is still valid
182 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
184 def check_whether_build_has_sfa (rpms_url):
185 utils.header ("Checking if build provides SFA package...")
186 # warning, we're now building 'sface' so let's be a bit more picky
187 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
188 # full builds are expected to return with 0 here
190 utils.header("build does provide SFA")
192 # move all steps containing 'sfa' from default_steps to other_steps
193 utils.header("SFA package not found - removing steps with sfa or sfi")
194 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
195 TestPlc.other_steps += sfa_steps
196 for step in sfa_steps: TestPlc.default_steps.remove(step)
198 def __init__ (self,plc_spec,options):
199 self.plc_spec=plc_spec
201 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
202 self.vserverip=plc_spec['vserverip']
203 self.vservername=plc_spec['vservername']
204 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
205 self.apiserver=TestApiserver(self.url,options.dry_run)
206 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
207 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
209 def has_addresses_api (self):
210 return self.apiserver.has_method('AddIpAddress')
213 name=self.plc_spec['name']
214 return "%s.%s"%(name,self.vservername)
217 return self.plc_spec['host_box']
220 return self.test_ssh.is_local()
222 # define the API methods on this object through xmlrpc
223 # would help, but not strictly necessary
227 def actual_command_in_guest (self,command):
228 return self.test_ssh.actual_command(self.host_to_guest(command),dry_run=self.options.dry_run)
230 def start_guest (self):
231 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),dry_run=self.options.dry_run))
233 def stop_guest (self):
234 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),dry_run=self.options.dry_run))
236 def run_in_guest (self,command):
237 return utils.system(self.actual_command_in_guest(command))
239 def run_in_host (self,command):
240 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
242 #command gets run in the plc's vm
243 def host_to_guest(self,command):
244 if self.options.plcs_use_lxc:
245 return "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s %s"%(self.vserverip,command)
247 return "vserver %s exec %s"%(self.vservername,command)
249 def vm_root_in_host(self):
250 if self.options.plcs_use_lxc:
251 return "/vservers/%s/rootfs/"%(self.vservername)
253 return "/vservers/%s"%(self.vservername)
255 def vm_timestamp_path (self):
256 if self.options.plcs_use_lxc:
257 return "/vservers/%s/%s.timestamp"%(self.vservername,self.vservername)
259 return "/vservers/%s.timestamp"%(self.vservername)
261 #start/stop the vserver
262 def start_guest_in_host(self):
263 if self.options.plcs_use_lxc:
264 return "virsh -c lxc:// start %s"%(self.vservername)
266 return "vserver %s start"%(self.vservername)
268 def stop_guest_in_host(self):
269 if self.options.plcs_use_lxc:
270 return "virsh -c lxc:// destroy %s"%(self.vservername)
272 return "vserver %s stop"%(self.vservername)
275 def run_in_guest_piped (self,local,remote):
276 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
278 def yum_check_installed (self, rpms):
279 if isinstance (rpms, list):
281 return self.run_in_guest("rpm -q %s"%rpms)==0
283 # does a yum install in the vs, ignore yum retcod, check with rpm
284 def yum_install (self, rpms):
285 if isinstance (rpms, list):
287 self.run_in_guest("yum -y install %s"%rpms)
288 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
289 self.run_in_guest("yum-complete-transaction -y")
290 return self.yum_check_installed (rpms)
292 def auth_root (self):
293 return {'Username':self.plc_spec['PLC_ROOT_USER'],
294 'AuthMethod':'password',
295 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
296 'Role' : self.plc_spec['role']
298 def locate_site (self,sitename):
299 for site in self.plc_spec['sites']:
300 if site['site_fields']['name'] == sitename:
302 if site['site_fields']['login_base'] == sitename:
304 raise Exception,"Cannot locate site %s"%sitename
306 def locate_node (self,nodename):
307 for site in self.plc_spec['sites']:
308 for node in site['nodes']:
309 if node['name'] == nodename:
311 raise Exception,"Cannot locate node %s"%nodename
313 def locate_hostname (self,hostname):
314 for site in self.plc_spec['sites']:
315 for node in site['nodes']:
316 if node['node_fields']['hostname'] == hostname:
318 raise Exception,"Cannot locate hostname %s"%hostname
320 def locate_key (self,key_name):
321 for key in self.plc_spec['keys']:
322 if key['key_name'] == key_name:
324 raise Exception,"Cannot locate key %s"%key_name
326 def locate_private_key_from_key_names (self, key_names):
327 # locate the first avail. key
329 for key_name in key_names:
330 key_spec=self.locate_key(key_name)
331 test_key=TestKey(self,key_spec)
332 publickey=test_key.publicpath()
333 privatekey=test_key.privatepath()
334 if os.path.isfile(publickey) and os.path.isfile(privatekey):
336 if found: return privatekey
339 def locate_slice (self, slicename):
340 for slice in self.plc_spec['slices']:
341 if slice['slice_fields']['name'] == slicename:
343 raise Exception,"Cannot locate slice %s"%slicename
345 def all_sliver_objs (self):
347 for slice_spec in self.plc_spec['slices']:
348 slicename = slice_spec['slice_fields']['name']
349 for nodename in slice_spec['nodenames']:
350 result.append(self.locate_sliver_obj (nodename,slicename))
353 def locate_sliver_obj (self,nodename,slicename):
354 (site,node) = self.locate_node(nodename)
355 slice = self.locate_slice (slicename)
357 test_site = TestSite (self, site)
358 test_node = TestNode (self, test_site,node)
359 # xxx the slice site is assumed to be the node site - mhh - probably harmless
360 test_slice = TestSlice (self, test_site, slice)
361 return TestSliver (self, test_node, test_slice)
363 def locate_first_node(self):
364 nodename=self.plc_spec['slices'][0]['nodenames'][0]
365 (site,node) = self.locate_node(nodename)
366 test_site = TestSite (self, site)
367 test_node = TestNode (self, test_site,node)
370 def locate_first_sliver (self):
371 slice_spec=self.plc_spec['slices'][0]
372 slicename=slice_spec['slice_fields']['name']
373 nodename=slice_spec['nodenames'][0]
374 return self.locate_sliver_obj(nodename,slicename)
376 # all different hostboxes used in this plc
377 def get_BoxNodes(self):
378 # maps on sites and nodes, return [ (host_box,test_node) ]
380 for site_spec in self.plc_spec['sites']:
381 test_site = TestSite (self,site_spec)
382 for node_spec in site_spec['nodes']:
383 test_node = TestNode (self, test_site, node_spec)
384 if not test_node.is_real():
385 tuples.append( (test_node.host_box(),test_node) )
386 # transform into a dict { 'host_box' -> [ test_node .. ] }
388 for (box,node) in tuples:
389 if not result.has_key(box):
392 result[box].append(node)
395 # a step for checking this stuff
396 def show_boxes (self):
397 'print summary of nodes location'
398 for (box,nodes) in self.get_BoxNodes().iteritems():
399 print box,":"," + ".join( [ node.name() for node in nodes ] )
402 # make this a valid step
403 def qemu_kill_all(self):
404 'kill all qemu instances on the qemu boxes involved by this setup'
405 # this is the brute force version, kill all qemus on that host box
406 for (box,nodes) in self.get_BoxNodes().iteritems():
407 # pass the first nodename, as we don't push template-qemu on testboxes
408 nodedir=nodes[0].nodedir()
409 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
412 # make this a valid step
413 def qemu_list_all(self):
414 'list all qemu instances on the qemu boxes involved by this setup'
415 for (box,nodes) in self.get_BoxNodes().iteritems():
416 # this is the brute force version, kill all qemus on that host box
417 TestBoxQemu(box,self.options.buildname).qemu_list_all()
420 # kill only the qemus related to this test
421 def qemu_list_mine(self):
422 'list qemu instances for our nodes'
423 for (box,nodes) in self.get_BoxNodes().iteritems():
424 # the fine-grain version
429 # kill only the qemus related to this test
430 def qemu_clean_mine(self):
431 'cleanup (rm -rf) qemu instances for our nodes'
432 for (box,nodes) in self.get_BoxNodes().iteritems():
433 # the fine-grain version
438 # kill only the right qemus
439 def qemu_kill_mine(self):
440 'kill the qemu instances for our nodes'
441 for (box,nodes) in self.get_BoxNodes().iteritems():
442 # the fine-grain version
447 #################### display config
449 "show test configuration after localization"
454 # uggly hack to make sure 'run export' only reports about the 1st plc
455 # to avoid confusion - also we use 'inri_slice1' in various aliases..
458 "print cut'n paste-able stuff to export env variables to your shell"
459 # guess local domain from hostname
460 if TestPlc.exported_id>1:
461 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
463 TestPlc.exported_id+=1
464 domain=socket.gethostname().split('.',1)[1]
465 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
466 print "export BUILD=%s"%self.options.buildname
467 if self.options.plcs_use_lxc:
468 print "export PLCHOSTLXC=%s"%fqdn
470 print "export PLCHOSTVS=%s"%fqdn
471 print "export GUESTNAME=%s"%self.plc_spec['vservername']
472 vplcname=self.plc_spec['vservername'].split('-')[-1]
473 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
474 # find hostname of first node
475 (hostname,qemubox) = self.all_node_infos()[0]
476 print "export KVMHOST=%s.%s"%(qemubox,domain)
477 print "export NODE=%s"%(hostname)
481 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
482 def show_pass (self,passno):
483 for (key,val) in self.plc_spec.iteritems():
484 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
488 self.display_site_spec(site)
489 for node in site['nodes']:
490 self.display_node_spec(node)
491 elif key=='initscripts':
492 for initscript in val:
493 self.display_initscript_spec (initscript)
496 self.display_slice_spec (slice)
499 self.display_key_spec (key)
501 if key not in ['sites','initscripts','slices','keys', 'sfa']:
502 print '+ ',key,':',val
504 def display_site_spec (self,site):
505 print '+ ======== site',site['site_fields']['name']
506 for (k,v) in site.iteritems():
507 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
510 print '+ ','nodes : ',
512 print node['node_fields']['hostname'],'',
518 print user['name'],'',
520 elif k == 'site_fields':
521 print '+ login_base',':',v['login_base']
522 elif k == 'address_fields':
528 def display_initscript_spec (self,initscript):
529 print '+ ======== initscript',initscript['initscript_fields']['name']
531 def display_key_spec (self,key):
532 print '+ ======== key',key['key_name']
534 def display_slice_spec (self,slice):
535 print '+ ======== slice',slice['slice_fields']['name']
536 for (k,v) in slice.iteritems():
549 elif k=='slice_fields':
550 print '+ fields',':',
551 print 'max_nodes=',v['max_nodes'],
556 def display_node_spec (self,node):
557 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
558 print "hostname=",node['node_fields']['hostname'],
559 print "ip=",node['interface_fields']['ip']
560 if self.options.verbose:
561 utils.pprint("node details",node,depth=3)
563 # another entry point for just showing the boxes involved
564 def display_mapping (self):
565 TestPlc.display_mapping_plc(self.plc_spec)
569 def display_mapping_plc (plc_spec):
570 print '+ MyPLC',plc_spec['name']
571 # WARNING this would not be right for lxc-based PLC's - should be harmless though
572 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
573 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
574 for site_spec in plc_spec['sites']:
575 for node_spec in site_spec['nodes']:
576 TestPlc.display_mapping_node(node_spec)
579 def display_mapping_node (node_spec):
580 print '+ NODE %s'%(node_spec['name'])
581 print '+\tqemu box %s'%node_spec['host_box']
582 print '+\thostname=%s'%node_spec['node_fields']['hostname']
584 # write a timestamp in /vservers/<>.timestamp
585 # cannot be inside the vserver, that causes vserver .. build to cough
586 def timestamp_vs (self):
587 "Create a timestamp to remember creation date for this plc"
589 # TODO-lxc check this one
590 # a first approx. is to store the timestamp close to the VM root like vs does
591 stamp_path=self.vm_timestamp_path ()
592 stamp_dir = os.path.dirname (stamp_path)
593 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
594 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
596 # this is called inconditionnally at the beginning of the test sequence
597 # just in case this is a rerun, so if the vm is not running it's fine
599 "vserver delete the test myplc"
600 stamp_path=self.vm_timestamp_path()
601 self.run_in_host("rm -f %s"%stamp_path)
602 if self.options.plcs_use_lxc:
603 self.run_in_host("virsh -c lxc:// destroy %s"%self.vservername)
604 self.run_in_host("virsh -c lxc:// undefine %s"%self.vservername)
605 self.run_in_host("rm -fr /vservers/%s"%self.vservername)
608 self.run_in_host("vserver --silent %s delete"%self.vservername)
612 # historically the build was being fetched by the tests
613 # now the build pushes itself as a subdir of the tests workdir
614 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
615 def vs_create (self):
616 "vserver creation (no install done)"
617 # push the local build/ dir to the testplc box
619 # a full path for the local calls
620 build_dir=os.path.dirname(sys.argv[0])
621 # sometimes this is empty - set to "." in such a case
622 if not build_dir: build_dir="."
623 build_dir += "/build"
625 # use a standard name - will be relative to remote buildname
627 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
628 self.test_ssh.rmdir(build_dir)
629 self.test_ssh.copy(build_dir,recursive=True)
630 # the repo url is taken from arch-rpms-url
631 # with the last step (i386) removed
632 repo_url = self.options.arch_rpms_url
633 for level in [ 'arch' ]:
634 repo_url = os.path.dirname(repo_url)
636 # invoke initvm (drop support for vs)
637 script="ltest-initvm.sh"
639 # pass the vbuild-nightly options to [lv]test-initvm
640 script_options += " -p %s"%self.options.personality
641 script_options += " -d %s"%self.options.pldistro
642 script_options += " -f %s"%self.options.fcdistro
643 script_options += " -r %s"%repo_url
644 vserver_name = self.vservername
646 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
647 script_options += " -n %s"%vserver_hostname
649 print "Cannot reverse lookup %s"%self.vserverip
650 print "This is considered fatal, as this might pollute the test results"
652 create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s"%locals()
653 return self.run_in_host(create_vserver) == 0
656 def plc_install(self):
657 "yum install myplc, noderepo, and the plain bootstrapfs"
659 # workaround for getting pgsql8.2 on centos5
660 if self.options.fcdistro == "centos5":
661 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
664 if self.options.personality == "linux32":
666 elif self.options.personality == "linux64":
669 raise Exception, "Unsupported personality %r"%self.options.personality
670 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
673 pkgs_list.append ("slicerepo-%s"%nodefamily)
674 pkgs_list.append ("myplc")
675 pkgs_list.append ("noderepo-%s"%nodefamily)
676 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
677 pkgs_string=" ".join(pkgs_list)
678 return self.yum_install (pkgs_list)
681 def mod_python(self):
682 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
683 return self.yum_install ( [ 'mod_python' ] )
686 def plc_configure(self):
688 tmpname='%s.plc-config-tty'%(self.name())
689 fileconf=open(tmpname,'w')
690 for var in [ 'PLC_NAME',
695 'PLC_MAIL_SUPPORT_ADDRESS',
698 # Above line was added for integrating SFA Testing
704 'PLC_RESERVATION_GRANULARITY',
706 'PLC_OMF_XMPP_SERVER',
709 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
710 fileconf.write('w\n')
711 fileconf.write('q\n')
713 utils.system('cat %s'%tmpname)
714 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
715 utils.system('rm %s'%tmpname)
720 self.run_in_guest('service plc start')
725 self.run_in_guest('service plc stop')
729 "start the PLC vserver"
734 "stop the PLC vserver"
738 # stores the keys from the config for further use
739 def keys_store(self):
740 "stores test users ssh keys in keys/"
741 for key_spec in self.plc_spec['keys']:
742 TestKey(self,key_spec).store_key()
745 def keys_clean(self):
746 "removes keys cached in keys/"
747 utils.system("rm -rf ./keys")
750 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
751 # for later direct access to the nodes
752 def keys_fetch(self):
753 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
755 if not os.path.isdir(dir):
757 vservername=self.vservername
758 vm_root=self.vm_root_in_host()
760 prefix = 'debug_ssh_key'
761 for ext in [ 'pub', 'rsa' ] :
762 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
763 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
764 if self.test_ssh.fetch(src,dst) != 0: overall=False
768 "create sites with PLCAPI"
769 return self.do_sites()
771 def delete_sites (self):
772 "delete sites with PLCAPI"
773 return self.do_sites(action="delete")
775 def do_sites (self,action="add"):
776 for site_spec in self.plc_spec['sites']:
777 test_site = TestSite (self,site_spec)
778 if (action != "add"):
779 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
780 test_site.delete_site()
781 # deleted with the site
782 #test_site.delete_users()
785 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
786 test_site.create_site()
787 test_site.create_users()
790 def delete_all_sites (self):
791 "Delete all sites in PLC, and related objects"
792 print 'auth_root',self.auth_root()
793 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
795 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
796 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
797 site_id=site['site_id']
798 print 'Deleting site_id',site_id
799 self.apiserver.DeleteSite(self.auth_root(),site_id)
803 "create nodes with PLCAPI"
804 return self.do_nodes()
805 def delete_nodes (self):
806 "delete nodes with PLCAPI"
807 return self.do_nodes(action="delete")
809 def do_nodes (self,action="add"):
810 for site_spec in self.plc_spec['sites']:
811 test_site = TestSite (self,site_spec)
813 utils.header("Deleting nodes in site %s"%test_site.name())
814 for node_spec in site_spec['nodes']:
815 test_node=TestNode(self,test_site,node_spec)
816 utils.header("Deleting %s"%test_node.name())
817 test_node.delete_node()
819 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
820 for node_spec in site_spec['nodes']:
821 utils.pprint('Creating node %s'%node_spec,node_spec)
822 test_node = TestNode (self,test_site,node_spec)
823 test_node.create_node ()
826 def nodegroups (self):
827 "create nodegroups with PLCAPI"
828 return self.do_nodegroups("add")
829 def delete_nodegroups (self):
830 "delete nodegroups with PLCAPI"
831 return self.do_nodegroups("delete")
835 def translate_timestamp (start,grain,timestamp):
836 if timestamp < TestPlc.YEAR: return start+timestamp*grain
837 else: return timestamp
840 def timestamp_printable (timestamp):
841 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
844 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
846 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
847 print 'API answered grain=',grain
848 start=(now/grain)*grain
850 # find out all nodes that are reservable
851 nodes=self.all_reservable_nodenames()
853 utils.header ("No reservable node found - proceeding without leases")
856 # attach them to the leases as specified in plc_specs
857 # this is where the 'leases' field gets interpreted as relative of absolute
858 for lease_spec in self.plc_spec['leases']:
859 # skip the ones that come with a null slice id
860 if not lease_spec['slice']: continue
861 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
862 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
863 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
864 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
865 if lease_addition['errors']:
866 utils.header("Cannot create leases, %s"%lease_addition['errors'])
869 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
870 (nodes,lease_spec['slice'],
871 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
872 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
876 def delete_leases (self):
877 "remove all leases in the myplc side"
878 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
879 utils.header("Cleaning leases %r"%lease_ids)
880 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
883 def list_leases (self):
884 "list all leases known to the myplc"
885 leases = self.apiserver.GetLeases(self.auth_root())
888 current=l['t_until']>=now
889 if self.options.verbose or current:
890 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
891 TestPlc.timestamp_printable(l['t_from']),
892 TestPlc.timestamp_printable(l['t_until'])))
895 # create nodegroups if needed, and populate
896 def do_nodegroups (self, action="add"):
897 # 1st pass to scan contents
899 for site_spec in self.plc_spec['sites']:
900 test_site = TestSite (self,site_spec)
901 for node_spec in site_spec['nodes']:
902 test_node=TestNode (self,test_site,node_spec)
903 if node_spec.has_key('nodegroups'):
904 nodegroupnames=node_spec['nodegroups']
905 if isinstance(nodegroupnames,StringTypes):
906 nodegroupnames = [ nodegroupnames ]
907 for nodegroupname in nodegroupnames:
908 if not groups_dict.has_key(nodegroupname):
909 groups_dict[nodegroupname]=[]
910 groups_dict[nodegroupname].append(test_node.name())
911 auth=self.auth_root()
913 for (nodegroupname,group_nodes) in groups_dict.iteritems():
915 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
916 # first, check if the nodetagtype is here
917 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
919 tag_type_id = tag_types[0]['tag_type_id']
921 tag_type_id = self.apiserver.AddTagType(auth,
922 {'tagname':nodegroupname,
923 'description': 'for nodegroup %s'%nodegroupname,
925 print 'located tag (type)',nodegroupname,'as',tag_type_id
927 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
929 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
930 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
931 # set node tag on all nodes, value='yes'
932 for nodename in group_nodes:
934 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
936 traceback.print_exc()
937 print 'node',nodename,'seems to already have tag',nodegroupname
940 expect_yes = self.apiserver.GetNodeTags(auth,
941 {'hostname':nodename,
942 'tagname':nodegroupname},
943 ['value'])[0]['value']
944 if expect_yes != "yes":
945 print 'Mismatch node tag on node',nodename,'got',expect_yes
948 if not self.options.dry_run:
949 print 'Cannot find tag',nodegroupname,'on node',nodename
953 print 'cleaning nodegroup',nodegroupname
954 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
956 traceback.print_exc()
960 # a list of TestNode objs
961 def all_nodes (self):
963 for site_spec in self.plc_spec['sites']:
964 test_site = TestSite (self,site_spec)
965 for node_spec in site_spec['nodes']:
966 nodes.append(TestNode (self,test_site,node_spec))
969 # return a list of tuples (nodename,qemuname)
970 def all_node_infos (self) :
972 for site_spec in self.plc_spec['sites']:
973 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
974 for node_spec in site_spec['nodes'] ]
977 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
978 def all_reservable_nodenames (self):
980 for site_spec in self.plc_spec['sites']:
981 for node_spec in site_spec['nodes']:
982 node_fields=node_spec['node_fields']
983 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
984 res.append(node_fields['hostname'])
987 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
988 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
989 if self.options.dry_run:
993 class CompleterTaskBootState (CompleterTask):
994 def __init__ (self, test_plc,hostname):
995 self.test_plc=test_plc
996 self.hostname=hostname
997 self.last_boot_state='undef'
998 def actual_run (self):
1000 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
1002 self.last_boot_state = node['boot_state']
1003 return self.last_boot_state == target_boot_state
1007 return "CompleterTaskBootState with node %s"%self.hostname
1008 def failure_message (self):
1009 return "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
1011 timeout = timedelta(minutes=timeout_minutes)
1012 graceout = timedelta(minutes=silent_minutes)
1013 period = timedelta(seconds=period_seconds)
1014 # the nodes that haven't checked yet - start with a full list and shrink over time
1015 utils.header("checking nodes boot state (expected %s)"%target_boot_state)
1016 tasks = [ CompleterTaskBootState (self,hostname) \
1017 for (hostname,_) in self.all_node_infos() ]
1018 return Completer (tasks).run (timeout, graceout, period)
1020 def nodes_booted(self):
1021 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
1023 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
1024 class CompleterTaskNodeSsh (CompleterTask):
1025 def __init__ (self, hostname, qemuname, boot_state, local_key):
1026 self.hostname=hostname
1027 self.qemuname=qemuname
1028 self.boot_state=boot_state
1029 self.local_key=local_key
1030 def run (self, silent):
1031 command = TestSsh (self.hostname,key=self.local_key).actual_command("hostname;uname -a")
1032 return utils.system (command, silent=silent)==0
1033 def failure_message (self):
1034 return "Cannot reach %s @ %s in %s mode"%(self.hostname, self.qemuname, self.boot_state)
1037 timeout = timedelta(minutes=timeout_minutes)
1038 graceout = timedelta(minutes=silent_minutes)
1039 period = timedelta(seconds=period_seconds)
1040 vservername=self.vservername
1043 local_key = "keys/%(vservername)s-debug.rsa"%locals()
1046 local_key = "keys/key_admin.rsa"
1047 utils.header("checking ssh access to nodes (expected in %s mode)"%message)
1048 node_infos = self.all_node_infos()
1049 tasks = [ CompleterTaskNodeSsh (nodename, qemuname, message, local_key) \
1050 for (nodename,qemuname) in node_infos ]
1051 return Completer (tasks).run (timeout, graceout, period)
1053 def ssh_node_debug(self):
1054 "Tries to ssh into nodes in debug mode with the debug ssh key"
1055 return self.check_nodes_ssh(debug=True,
1056 timeout_minutes=self.ssh_node_debug_timeout,
1057 silent_minutes=self.ssh_node_debug_silent)
1059 def ssh_node_boot(self):
1060 "Tries to ssh into nodes in production mode with the root ssh key"
1061 return self.check_nodes_ssh(debug=False,
1062 timeout_minutes=self.ssh_node_boot_timeout,
1063 silent_minutes=self.ssh_node_boot_silent)
1065 def node_bmlogs(self):
1066 "Checks that there's a non-empty dir. /var/log/bm/raw"
1067 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
1070 def qemu_local_init (self): pass
1072 def bootcd (self): pass
1074 def qemu_local_config (self): pass
1076 def nodestate_reinstall (self): pass
1078 def nodestate_safeboot (self): pass
1080 def nodestate_boot (self): pass
1082 def nodestate_show (self): pass
1084 def qemu_export (self): pass
1086 ### check hooks : invoke scripts from hooks/{node,slice}
1087 def check_hooks_node (self):
1088 return self.locate_first_node().check_hooks()
1089 def check_hooks_sliver (self) :
1090 return self.locate_first_sliver().check_hooks()
1092 def check_hooks (self):
1093 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1094 return self.check_hooks_node() and self.check_hooks_sliver()
1097 def do_check_initscripts(self):
1098 class CompleterTaskInitscript (CompleterTask):
1099 def __init__ (self, test_sliver, stamp):
1100 self.test_sliver=test_sliver
1102 def actual_run (self):
1103 return self.test_sliver.check_initscript_stamp (self.stamp)
1105 return "initscript checker for %s"%self.test_sliver.name()
1106 def failure_message (self):
1107 return "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
1110 for slice_spec in self.plc_spec['slices']:
1111 if not slice_spec.has_key('initscriptstamp'):
1113 stamp=slice_spec['initscriptstamp']
1114 slicename=slice_spec['slice_fields']['name']
1115 for nodename in slice_spec['nodenames']:
1116 print 'nodename',nodename,'slicename',slicename,'stamp',stamp
1117 (site,node) = self.locate_node (nodename)
1118 # xxx - passing the wrong site - probably harmless
1119 test_site = TestSite (self,site)
1120 test_slice = TestSlice (self,test_site,slice_spec)
1121 test_node = TestNode (self,test_site,node)
1122 test_sliver = TestSliver (self, test_node, test_slice)
1123 tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
1124 return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1126 def check_initscripts(self):
1127 "check that the initscripts have triggered"
1128 return self.do_check_initscripts()
1130 def initscripts (self):
1131 "create initscripts with PLCAPI"
1132 for initscript in self.plc_spec['initscripts']:
1133 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1134 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1137 def delete_initscripts (self):
1138 "delete initscripts with PLCAPI"
1139 for initscript in self.plc_spec['initscripts']:
1140 initscript_name = initscript['initscript_fields']['name']
1141 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1143 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1144 print initscript_name,'deleted'
1146 print 'deletion went wrong - probably did not exist'
1151 "create slices with PLCAPI"
1152 return self.do_slices(action="add")
1154 def delete_slices (self):
1155 "delete slices with PLCAPI"
1156 return self.do_slices(action="delete")
1158 def fill_slices (self):
1159 "add nodes in slices with PLCAPI"
1160 return self.do_slices(action="fill")
1162 def empty_slices (self):
1163 "remove nodes from slices with PLCAPI"
1164 return self.do_slices(action="empty")
1166 def do_slices (self, action="add"):
1167 for slice in self.plc_spec['slices']:
1168 site_spec = self.locate_site (slice['sitename'])
1169 test_site = TestSite(self,site_spec)
1170 test_slice=TestSlice(self,test_site,slice)
1171 if action == "delete":
1172 test_slice.delete_slice()
1173 elif action=="fill":
1174 test_slice.add_nodes()
1175 elif action=="empty":
1176 test_slice.delete_nodes()
1178 test_slice.create_slice()
1181 @slice_mapper__tasks(20,10,15)
1182 def ssh_slice(self): pass
1183 @slice_mapper__tasks(20,19,15)
1184 def ssh_slice_off (self): pass
1186 # this is semantically just equivalent to ssh_slice
1187 # but we use another name so we can exclude it from the tests on the nightly command line
1188 ssh_slice_again=ssh_slice
1191 def ssh_slice_basics(self): pass
1194 def check_vsys_defaults(self): pass
1197 def keys_clear_known_hosts (self): pass
1199 def plcapi_urls (self):
1200 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1202 def speed_up_slices (self):
1203 "tweak nodemanager settings on all nodes using a conf file"
1204 # create the template on the server-side
1205 template="%s.nodemanager"%self.name()
1206 template_file = open (template,"w")
1207 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1208 template_file.close()
1209 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1210 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1211 self.test_ssh.copy_abs(template,remote)
1213 self.apiserver.AddConfFile (self.auth_root(),
1214 {'dest':'/etc/sysconfig/nodemanager',
1215 'source':'PlanetLabConf/nodemanager',
1216 'postinstall_cmd':'service nm restart',})
1219 def debug_nodemanager (self):
1220 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1221 template="%s.nodemanager"%self.name()
1222 template_file = open (template,"w")
1223 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1224 template_file.close()
1225 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1226 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1227 self.test_ssh.copy_abs(template,remote)
1231 def qemu_start (self) : pass
1234 def timestamp_qemu (self) : pass
1236 # when a spec refers to a node possibly on another plc
1237 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1238 for plc in [ self ] + other_plcs:
1240 return plc.locate_sliver_obj (nodename, slicename)
1243 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1245 # implement this one as a cross step so that we can take advantage of different nodes
1246 # in multi-plcs mode
1247 def cross_check_tcp (self, other_plcs):
1248 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1249 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1250 utils.header ("check_tcp: no/empty config found")
1252 specs = self.plc_spec['tcp_specs']
1257 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1258 if not s_test_sliver.run_tcp_server(port,timeout=20):
1262 # idem for the client side
1263 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1264 # use nodename from locatesd sliver, unless 'client_connect' is set
1265 if 'client_connect' in spec:
1266 destination = spec['client_connect']
1268 destination=s_test_sliver.test_node.name()
1269 if not c_test_sliver.run_tcp_client(destination,port):
1273 # painfully enough, we need to allow for some time as netflow might show up last
1274 def check_system_slice (self):
1275 "all nodes: check that a system slice is alive"
1276 # netflow currently not working in the lxc distro
1277 # drl not built at all in the wtx distro
1278 # if we find either of them we're happy
1279 return self.check_netflow() or self.check_drl()
1282 def check_netflow (self): return self._check_system_slice ('netflow')
1283 def check_drl (self): return self._check_system_slice ('drl')
1285 # we have the slices up already here, so it should not take too long
1286 def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
1287 class CompleterTaskSystemSlice (CompleterTask):
1288 def __init__ (self, test_node, dry_run):
1289 self.test_node=test_node
1290 self.dry_run=dry_run
1291 def actual_run (self):
1292 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
1294 return "System slice %s @ %s"%(slicename, self.test_node.name())
1295 def failure_message (self):
1296 return "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
1297 timeout = timedelta(minutes=timeout_minutes)
1298 silent = timedelta (0)
1299 period = timedelta (seconds=period_seconds)
1300 tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
1301 for test_node in self.all_nodes() ]
1302 return Completer (tasks) . run (timeout, silent, period)
1304 def plcsh_stress_test (self):
1305 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1306 # install the stress-test in the plc image
1307 location = "/usr/share/plc_api/plcsh_stress_test.py"
1308 remote="%s/%s"%(self.vm_root_in_host(),location)
1309 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1311 command += " -- --check"
1312 if self.options.size == 1:
1313 command += " --tiny"
1314 return ( self.run_in_guest(command) == 0)
1316 # populate runs the same utility without slightly different options
1317 # in particular runs with --preserve (dont cleanup) and without --check
1318 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1320 def sfa_install_all (self):
1321 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1322 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1324 def sfa_install_core(self):
1326 return self.yum_install ("sfa")
1328 def sfa_install_plc(self):
1329 "yum install sfa-plc"
1330 return self.yum_install("sfa-plc")
1332 def sfa_install_sfatables(self):
1333 "yum install sfa-sfatables"
1334 return self.yum_install ("sfa-sfatables")
1336 # for some very odd reason, this sometimes fails with the following symptom
1337 # # yum install sfa-client
1338 # Setting up Install Process
1340 # Downloading Packages:
1341 # Running rpm_check_debug
1342 # Running Transaction Test
1343 # Transaction Test Succeeded
1344 # Running Transaction
1345 # Transaction couldn't start:
1346 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1347 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1348 # even though in the same context I have
1349 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1350 # Filesystem Size Used Avail Use% Mounted on
1351 # /dev/hdv1 806G 264G 501G 35% /
1352 # none 16M 36K 16M 1% /tmp
1354 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1355 def sfa_install_client(self):
1356 "yum install sfa-client"
1357 first_try=self.yum_install("sfa-client")
1358 if first_try: return True
1359 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1360 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1361 utils.header("rpm_path=<<%s>>"%rpm_path)
1363 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1364 return self.yum_check_installed ("sfa-client")
1366 def sfa_dbclean(self):
1367 "thoroughly wipes off the SFA database"
1368 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1369 self.run_in_guest("sfa-nuke.py")==0 or \
1370 self.run_in_guest("sfa-nuke-plc.py")==0
1372 def sfa_fsclean(self):
1373 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1374 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1377 def sfa_plcclean(self):
1378 "cleans the PLC entries that were created as a side effect of running the script"
1380 sfa_spec=self.plc_spec['sfa']
1382 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1383 login_base=auth_sfa_spec['login_base']
1384 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1385 except: print "Site %s already absent from PLC db"%login_base
1387 for spec_name in ['pi_spec','user_spec']:
1388 user_spec=auth_sfa_spec[spec_name]
1389 username=user_spec['email']
1390 try: self.apiserver.DeletePerson(self.auth_root(),username)
1392 # this in fact is expected as sites delete their members
1393 #print "User %s already absent from PLC db"%username
1396 print "REMEMBER TO RUN sfa_import AGAIN"
1399 def sfa_uninstall(self):
1400 "uses rpm to uninstall sfa - ignore result"
1401 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1402 self.run_in_guest("rm -rf /var/lib/sfa")
1403 self.run_in_guest("rm -rf /etc/sfa")
1404 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1406 self.run_in_guest("rpm -e --noscripts sfa-plc")
1409 ### run unit tests for SFA
1410 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1411 # Running Transaction
1412 # Transaction couldn't start:
1413 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1414 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1415 # no matter how many Gbs are available on the testplc
1416 # could not figure out what's wrong, so...
1417 # if the yum install phase fails, consider the test is successful
1418 # other combinations will eventually run it hopefully
1419 def sfa_utest(self):
1420 "yum install sfa-tests and run SFA unittests"
1421 self.run_in_guest("yum -y install sfa-tests")
1422 # failed to install - forget it
1423 if self.run_in_guest("rpm -q sfa-tests")!=0:
1424 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1426 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1430 dirname="conf.%s"%self.plc_spec['name']
1431 if not os.path.isdir(dirname):
1432 utils.system("mkdir -p %s"%dirname)
1433 if not os.path.isdir(dirname):
1434 raise Exception,"Cannot create config dir for plc %s"%self.name()
1437 def conffile(self,filename):
1438 return "%s/%s"%(self.confdir(),filename)
1439 def confsubdir(self,dirname,clean,dry_run=False):
1440 subdirname="%s/%s"%(self.confdir(),dirname)
1442 utils.system("rm -rf %s"%subdirname)
1443 if not os.path.isdir(subdirname):
1444 utils.system("mkdir -p %s"%subdirname)
1445 if not dry_run and not os.path.isdir(subdirname):
1446 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1449 def conffile_clean (self,filename):
1450 filename=self.conffile(filename)
1451 return utils.system("rm -rf %s"%filename)==0
1454 def sfa_configure(self):
1455 "run sfa-config-tty"
1456 tmpname=self.conffile("sfa-config-tty")
1457 fileconf=open(tmpname,'w')
1458 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1459 'SFA_INTERFACE_HRN',
1460 'SFA_REGISTRY_LEVEL1_AUTH',
1461 'SFA_REGISTRY_HOST',
1462 'SFA_AGGREGATE_HOST',
1472 'SFA_GENERIC_FLAVOUR',
1473 'SFA_AGGREGATE_ENABLED',
1475 if self.plc_spec['sfa'].has_key(var):
1476 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1477 # the way plc_config handles booleans just sucks..
1480 if self.plc_spec['sfa'][var]: val='true'
1481 fileconf.write ('e %s\n%s\n'%(var,val))
1482 fileconf.write('w\n')
1483 fileconf.write('R\n')
1484 fileconf.write('q\n')
1486 utils.system('cat %s'%tmpname)
1487 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1490 def aggregate_xml_line(self):
1491 port=self.plc_spec['sfa']['neighbours-port']
1492 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1493 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1495 def registry_xml_line(self):
1496 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1497 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1500 # a cross step that takes all other plcs in argument
1501 def cross_sfa_configure(self, other_plcs):
1502 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1503 # of course with a single plc, other_plcs is an empty list
1506 agg_fname=self.conffile("agg.xml")
1507 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1508 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1509 utils.header ("(Over)wrote %s"%agg_fname)
1510 reg_fname=self.conffile("reg.xml")
1511 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1512 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1513 utils.header ("(Over)wrote %s"%reg_fname)
1514 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1515 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1517 def sfa_import(self):
1518 "use sfaadmin to import from plc"
1519 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1521 self.run_in_guest('sfaadmin reg import_registry')==0
1522 # not needed anymore
1523 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1525 def sfa_start(self):
1527 return self.run_in_guest('service sfa start')==0
1529 def sfi_configure(self):
1530 "Create /root/sfi on the plc side for sfi client configuration"
1531 if self.options.dry_run:
1532 utils.header("DRY RUN - skipping step")
1534 sfa_spec=self.plc_spec['sfa']
1535 # cannot use auth_sfa_mapper to pass dir_name
1536 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1537 test_slice=TestAuthSfa(self,slice_spec)
1538 dir_basename=os.path.basename(test_slice.sfi_path())
1539 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1540 test_slice.sfi_configure(dir_name)
1541 # push into the remote /root/sfi area
1542 location = test_slice.sfi_path()
1543 remote="%s/%s"%(self.vm_root_in_host(),location)
1544 self.test_ssh.mkdir(remote,abs=True)
1545 # need to strip last level or remote otherwise we get an extra dir level
1546 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1550 def sfi_clean (self):
1551 "clean up /root/sfi on the plc side"
1552 self.run_in_guest("rm -rf /root/sfi")
1556 def sfa_add_site (self): pass
1558 def sfa_add_pi (self): pass
1560 def sfa_add_user(self): pass
1562 def sfa_update_user(self): pass
1564 def sfa_add_slice(self): pass
1566 def sfa_renew_slice(self): pass
1568 def sfa_discover(self): pass
1570 def sfa_create_slice(self): pass
1572 def sfa_check_slice_plc(self): pass
1574 def sfa_update_slice(self): pass
1576 def sfi_list(self): pass
1578 def sfi_show(self): pass
1580 def ssh_slice_sfa(self): pass
1582 def sfa_delete_user(self): pass
1584 def sfa_delete_slice(self): pass
1588 self.run_in_guest('service sfa stop')==0
1591 def populate (self):
1592 "creates random entries in the PLCAPI"
1593 # install the stress-test in the plc image
1594 location = "/usr/share/plc_api/plcsh_stress_test.py"
1595 remote="%s/%s"%(self.vm_root_in_host(),location)
1596 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1598 command += " -- --preserve --short-names"
1599 local = (self.run_in_guest(command) == 0);
1600 # second run with --foreign
1601 command += ' --foreign'
1602 remote = (self.run_in_guest(command) == 0);
1603 return ( local and remote)
1605 def gather_logs (self):
1606 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1607 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1608 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1609 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1610 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1611 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1612 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1614 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1615 self.gather_var_logs ()
1617 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1618 self.gather_pgsql_logs ()
1620 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1621 self.gather_root_sfi ()
1623 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1624 for site_spec in self.plc_spec['sites']:
1625 test_site = TestSite (self,site_spec)
1626 for node_spec in site_spec['nodes']:
1627 test_node=TestNode(self,test_site,node_spec)
1628 test_node.gather_qemu_logs()
1630 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1631 self.gather_nodes_var_logs()
1633 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1634 self.gather_slivers_var_logs()
1637 def gather_slivers_var_logs(self):
1638 for test_sliver in self.all_sliver_objs():
1639 remote = test_sliver.tar_var_logs()
1640 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1641 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1642 utils.system(command)
1645 def gather_var_logs (self):
1646 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1647 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1648 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1649 utils.system(command)
1650 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1651 utils.system(command)
1653 def gather_pgsql_logs (self):
1654 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1655 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1656 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1657 utils.system(command)
1659 def gather_root_sfi (self):
1660 utils.system("mkdir -p logs/sfi.%s"%self.name())
1661 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1662 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1663 utils.system(command)
1665 def gather_nodes_var_logs (self):
1666 for site_spec in self.plc_spec['sites']:
1667 test_site = TestSite (self,site_spec)
1668 for node_spec in site_spec['nodes']:
1669 test_node=TestNode(self,test_site,node_spec)
1670 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1671 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1672 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1673 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1674 utils.system(command)
1677 # returns the filename to use for sql dump/restore, using options.dbname if set
1678 def dbfile (self, database):
1679 # uses options.dbname if it is found
1681 name=self.options.dbname
1682 if not isinstance(name,StringTypes):
1688 return "/root/%s-%s.sql"%(database,name)
1690 def plc_db_dump(self):
1691 'dump the planetlab5 DB in /root in the PLC - filename has time'
1692 dump=self.dbfile("planetab5")
1693 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1694 utils.header('Dumped planetlab5 database in %s'%dump)
1697 def plc_db_restore(self):
1698 'restore the planetlab5 DB - looks broken, but run -n might help'
1699 dump=self.dbfile("planetab5")
1700 ##stop httpd service
1701 self.run_in_guest('service httpd stop')
1702 # xxx - need another wrapper
1703 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1704 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1705 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1706 ##starting httpd service
1707 self.run_in_guest('service httpd start')
1709 utils.header('Database restored from ' + dump)
1712 def ssh_slice_again_ignore (self): pass
1714 def check_initscripts_ignore (self): pass
1716 def standby_1_through_20(self):
1717 """convenience function to wait for a specified number of minutes"""
1720 def standby_1(): pass
1722 def standby_2(): pass
1724 def standby_3(): pass
1726 def standby_4(): pass
1728 def standby_5(): pass
1730 def standby_6(): pass
1732 def standby_7(): pass
1734 def standby_8(): pass
1736 def standby_9(): pass
1738 def standby_10(): pass
1740 def standby_11(): pass
1742 def standby_12(): pass
1744 def standby_13(): pass
1746 def standby_14(): pass
1748 def standby_15(): pass
1750 def standby_16(): pass
1752 def standby_17(): pass
1754 def standby_18(): pass
1756 def standby_19(): pass
1758 def standby_20(): pass