1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
10 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
24 from Completer import Completer, CompleterTask
26 # step methods must take (self) and return a boolean (options is a member of the class)
28 def standby(minutes,dry_run):
29 utils.header('Entering StandBy for %d mn'%minutes)
33 time.sleep(60*minutes)
36 def standby_generic (func):
38 minutes=int(func.__name__.split("_")[1])
39 return standby(minutes,self.options.dry_run)
42 def node_mapper (method):
43 def actual(self,*args, **kwds):
45 node_method = TestNode.__dict__[method.__name__]
46 for test_node in self.all_nodes():
47 if not node_method(test_node, *args, **kwds): overall=False
49 # restore the doc text
50 actual.__doc__=TestNode.__dict__[method.__name__].__doc__
53 def slice_mapper (method):
56 slice_method = TestSlice.__dict__[method.__name__]
57 for slice_spec in self.plc_spec['slices']:
58 site_spec = self.locate_site (slice_spec['sitename'])
59 test_site = TestSite(self,site_spec)
60 test_slice=TestSlice(self,test_site,slice_spec)
61 if not slice_method(test_slice,self.options): overall=False
63 # restore the doc text
64 actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
67 # a variant that expects the TestSlice method to return a list of CompleterTasks that
68 # are then merged into a single Completer run to avoid wating for all the slices
69 # esp. useful when a test fails of course
70 # because we need to pass arguments we use a class instead..
71 class slice_mapper__tasks (object):
72 # could not get this to work with named arguments
73 def __init__ (self,timeout_minutes,silent_minutes,period_seconds):
75 print "timeout_minutes,silent_minutes,period_seconds",timeout_minutes,silent_minutes,period_seconds
76 self.timeout=timedelta(minutes=timeout_minutes)
77 self.silent=timedelta(minutes=silent_minutes)
78 self.period=timedelta(seconds=period_seconds)
79 def __call__ (self, method):
81 # compute augmented method name
82 method_name = method.__name__ + "__tasks"
84 slice_method = TestSlice.__dict__[ method_name ]
87 for slice_spec in self.plc_spec['slices']:
88 site_spec = self.locate_site (slice_spec['sitename'])
89 test_site = TestSite(self,site_spec)
90 test_slice=TestSlice(self,test_site,slice_spec)
91 tasks += slice_method (test_slice, self.options)
92 return Completer (tasks).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
93 # restore the doc text from the TestSlice method even if a bit odd
94 wrappee.__doc__ = slice_method.__doc__
97 def auth_sfa_mapper (method):
100 auth_method = TestAuthSfa.__dict__[method.__name__]
101 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
102 test_auth=TestAuthSfa(self,auth_spec)
103 if not auth_method(test_auth,self.options): overall=False
105 # restore the doc text
106 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
116 'vs_delete','timestamp_vs','vs_create', SEP,
117 # 'plc_install', 'mod_python', 'plc_configure', 'plc_start', SEP,
118 'plc_install', 'plc_configure', 'plc_start', SEP,
119 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
120 'plcapi_urls','speed_up_slices', SEP,
121 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
122 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
123 # keep this our of the way for now
124 # 'check_vsys_defaults', SEP,
125 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
126 'qemu_kill_mine','qemu_clean_mine', 'qemu_export', 'qemu_start', 'timestamp_qemu', SEP,
127 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
128 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
129 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_renew_slice@1', SEPSFA,
130 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
131 'sfi_list@1', 'sfi_show@1', 'sfa_utest@1', SEPSFA,
132 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
133 # but as the stress test might take a while, we sometimes missed the debug mode..
134 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
135 'ssh_node_boot@1', 'node_bmlogs@1', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts', SEP,
136 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
137 'cross_check_tcp@1', 'check_system_slice', SEP,
138 # check slices are turned off properly
139 'empty_slices', 'ssh_slice_off', SEP,
140 # check they are properly re-created with the same name
141 'fill_slices', 'ssh_slice', SEP,
142 'force_gather_logs', SEP,
145 'export', 'show_boxes', SEP,
146 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
147 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
148 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
149 'delete_leases', 'list_leases', SEP,
151 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
152 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
153 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
154 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
155 'plc_db_dump' , 'plc_db_restore', SEP,
156 'check_netflow','check_drl', SEP,
157 'debug_nodemanager', SEP,
158 'standby_1_through_20',SEP,
162 def printable_steps (list):
163 single_line=" ".join(list)+" "
164 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
166 def valid_step (step):
167 return step != SEP and step != SEPSFA
169 # turn off the sfa-related steps when build has skipped SFA
170 # this was originally for centos5 but is still valid
171 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
173 def check_whether_build_has_sfa (rpms_url):
174 utils.header ("Checking if build provides SFA package...")
175 # warning, we're now building 'sface' so let's be a bit more picky
176 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
177 # full builds are expected to return with 0 here
179 utils.header("build does provide SFA")
181 # move all steps containing 'sfa' from default_steps to other_steps
182 utils.header("SFA package not found - removing steps with sfa or sfi")
183 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
184 TestPlc.other_steps += sfa_steps
185 for step in sfa_steps: TestPlc.default_steps.remove(step)
187 def __init__ (self,plc_spec,options):
188 self.plc_spec=plc_spec
190 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
191 self.vserverip=plc_spec['vserverip']
192 self.vservername=plc_spec['vservername']
193 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
194 self.apiserver=TestApiserver(self.url,options.dry_run)
195 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
196 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
198 def has_addresses_api (self):
199 return self.apiserver.has_method('AddIpAddress')
202 name=self.plc_spec['name']
203 return "%s.%s"%(name,self.vservername)
206 return self.plc_spec['host_box']
209 return self.test_ssh.is_local()
211 # define the API methods on this object through xmlrpc
212 # would help, but not strictly necessary
216 def actual_command_in_guest (self,command):
217 return self.test_ssh.actual_command(self.host_to_guest(command),dry_run=self.options.dry_run)
219 def start_guest (self):
220 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),dry_run=self.options.dry_run))
222 def stop_guest (self):
223 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),dry_run=self.options.dry_run))
225 def run_in_guest (self,command):
226 return utils.system(self.actual_command_in_guest(command))
228 def run_in_host (self,command):
229 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
231 #command gets run in the plc's vm
232 def host_to_guest(self,command):
233 if self.options.plcs_use_lxc:
234 return "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s %s"%(self.vserverip,command)
236 return "vserver %s exec %s"%(self.vservername,command)
238 def vm_root_in_host(self):
239 if self.options.plcs_use_lxc:
240 return "/vservers/%s/rootfs/"%(self.vservername)
242 return "/vservers/%s"%(self.vservername)
244 def vm_timestamp_path (self):
245 if self.options.plcs_use_lxc:
246 return "/vservers/%s/%s.timestamp"%(self.vservername,self.vservername)
248 return "/vservers/%s.timestamp"%(self.vservername)
250 #start/stop the vserver
251 def start_guest_in_host(self):
252 if self.options.plcs_use_lxc:
253 return "virsh -c lxc:// start %s"%(self.vservername)
255 return "vserver %s start"%(self.vservername)
257 def stop_guest_in_host(self):
258 if self.options.plcs_use_lxc:
259 return "virsh -c lxc:// destroy %s"%(self.vservername)
261 return "vserver %s stop"%(self.vservername)
264 def run_in_guest_piped (self,local,remote):
265 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
267 def yum_check_installed (self, rpms):
268 if isinstance (rpms, list):
270 return self.run_in_guest("rpm -q %s"%rpms)==0
272 # does a yum install in the vs, ignore yum retcod, check with rpm
273 def yum_install (self, rpms):
274 if isinstance (rpms, list):
276 self.run_in_guest("yum -y install %s"%rpms)
277 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
278 self.run_in_guest("yum-complete-transaction -y")
279 return self.yum_check_installed (rpms)
281 def auth_root (self):
282 return {'Username':self.plc_spec['PLC_ROOT_USER'],
283 'AuthMethod':'password',
284 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
285 'Role' : self.plc_spec['role']
287 def locate_site (self,sitename):
288 for site in self.plc_spec['sites']:
289 if site['site_fields']['name'] == sitename:
291 if site['site_fields']['login_base'] == sitename:
293 raise Exception,"Cannot locate site %s"%sitename
295 def locate_node (self,nodename):
296 for site in self.plc_spec['sites']:
297 for node in site['nodes']:
298 if node['name'] == nodename:
300 raise Exception,"Cannot locate node %s"%nodename
302 def locate_hostname (self,hostname):
303 for site in self.plc_spec['sites']:
304 for node in site['nodes']:
305 if node['node_fields']['hostname'] == hostname:
307 raise Exception,"Cannot locate hostname %s"%hostname
309 def locate_key (self,key_name):
310 for key in self.plc_spec['keys']:
311 if key['key_name'] == key_name:
313 raise Exception,"Cannot locate key %s"%key_name
315 def locate_private_key_from_key_names (self, key_names):
316 # locate the first avail. key
318 for key_name in key_names:
319 key_spec=self.locate_key(key_name)
320 test_key=TestKey(self,key_spec)
321 publickey=test_key.publicpath()
322 privatekey=test_key.privatepath()
323 if os.path.isfile(publickey) and os.path.isfile(privatekey):
325 if found: return privatekey
328 def locate_slice (self, slicename):
329 for slice in self.plc_spec['slices']:
330 if slice['slice_fields']['name'] == slicename:
332 raise Exception,"Cannot locate slice %s"%slicename
334 def all_sliver_objs (self):
336 for slice_spec in self.plc_spec['slices']:
337 slicename = slice_spec['slice_fields']['name']
338 for nodename in slice_spec['nodenames']:
339 result.append(self.locate_sliver_obj (nodename,slicename))
342 def locate_sliver_obj (self,nodename,slicename):
343 (site,node) = self.locate_node(nodename)
344 slice = self.locate_slice (slicename)
346 test_site = TestSite (self, site)
347 test_node = TestNode (self, test_site,node)
348 # xxx the slice site is assumed to be the node site - mhh - probably harmless
349 test_slice = TestSlice (self, test_site, slice)
350 return TestSliver (self, test_node, test_slice)
352 def locate_first_node(self):
353 nodename=self.plc_spec['slices'][0]['nodenames'][0]
354 (site,node) = self.locate_node(nodename)
355 test_site = TestSite (self, site)
356 test_node = TestNode (self, test_site,node)
359 def locate_first_sliver (self):
360 slice_spec=self.plc_spec['slices'][0]
361 slicename=slice_spec['slice_fields']['name']
362 nodename=slice_spec['nodenames'][0]
363 return self.locate_sliver_obj(nodename,slicename)
365 # all different hostboxes used in this plc
366 def get_BoxNodes(self):
367 # maps on sites and nodes, return [ (host_box,test_node) ]
369 for site_spec in self.plc_spec['sites']:
370 test_site = TestSite (self,site_spec)
371 for node_spec in site_spec['nodes']:
372 test_node = TestNode (self, test_site, node_spec)
373 if not test_node.is_real():
374 tuples.append( (test_node.host_box(),test_node) )
375 # transform into a dict { 'host_box' -> [ test_node .. ] }
377 for (box,node) in tuples:
378 if not result.has_key(box):
381 result[box].append(node)
384 # a step for checking this stuff
385 def show_boxes (self):
386 'print summary of nodes location'
387 for (box,nodes) in self.get_BoxNodes().iteritems():
388 print box,":"," + ".join( [ node.name() for node in nodes ] )
391 # make this a valid step
392 def qemu_kill_all(self):
393 'kill all qemu instances on the qemu boxes involved by this setup'
394 # this is the brute force version, kill all qemus on that host box
395 for (box,nodes) in self.get_BoxNodes().iteritems():
396 # pass the first nodename, as we don't push template-qemu on testboxes
397 nodedir=nodes[0].nodedir()
398 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
401 # make this a valid step
402 def qemu_list_all(self):
403 'list all qemu instances on the qemu boxes involved by this setup'
404 for (box,nodes) in self.get_BoxNodes().iteritems():
405 # this is the brute force version, kill all qemus on that host box
406 TestBoxQemu(box,self.options.buildname).qemu_list_all()
409 # kill only the qemus related to this test
410 def qemu_list_mine(self):
411 'list qemu instances for our nodes'
412 for (box,nodes) in self.get_BoxNodes().iteritems():
413 # the fine-grain version
418 # kill only the qemus related to this test
419 def qemu_clean_mine(self):
420 'cleanup (rm -rf) qemu instances for our nodes'
421 for (box,nodes) in self.get_BoxNodes().iteritems():
422 # the fine-grain version
427 # kill only the right qemus
428 def qemu_kill_mine(self):
429 'kill the qemu instances for our nodes'
430 for (box,nodes) in self.get_BoxNodes().iteritems():
431 # the fine-grain version
436 #################### display config
438 "show test configuration after localization"
443 # uggly hack to make sure 'run export' only reports about the 1st plc
444 # to avoid confusion - also we use 'inri_slice1' in various aliases..
447 "print cut'n paste-able stuff to export env variables to your shell"
448 # guess local domain from hostname
449 if TestPlc.exported_id>1:
450 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
452 TestPlc.exported_id+=1
453 domain=socket.gethostname().split('.',1)[1]
454 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
455 print "export BUILD=%s"%self.options.buildname
456 if self.options.plcs_use_lxc:
457 print "export PLCHOSTLXC=%s"%fqdn
459 print "export PLCHOSTVS=%s"%fqdn
460 print "export GUESTNAME=%s"%self.plc_spec['vservername']
461 vplcname=self.plc_spec['vservername'].split('-')[-1]
462 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
463 # find hostname of first node
464 (hostname,qemubox) = self.all_node_infos()[0]
465 print "export KVMHOST=%s.%s"%(qemubox,domain)
466 print "export NODE=%s"%(hostname)
470 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
471 def show_pass (self,passno):
472 for (key,val) in self.plc_spec.iteritems():
473 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
477 self.display_site_spec(site)
478 for node in site['nodes']:
479 self.display_node_spec(node)
480 elif key=='initscripts':
481 for initscript in val:
482 self.display_initscript_spec (initscript)
485 self.display_slice_spec (slice)
488 self.display_key_spec (key)
490 if key not in ['sites','initscripts','slices','keys', 'sfa']:
491 print '+ ',key,':',val
493 def display_site_spec (self,site):
494 print '+ ======== site',site['site_fields']['name']
495 for (k,v) in site.iteritems():
496 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
499 print '+ ','nodes : ',
501 print node['node_fields']['hostname'],'',
507 print user['name'],'',
509 elif k == 'site_fields':
510 print '+ login_base',':',v['login_base']
511 elif k == 'address_fields':
517 def display_initscript_spec (self,initscript):
518 print '+ ======== initscript',initscript['initscript_fields']['name']
520 def display_key_spec (self,key):
521 print '+ ======== key',key['key_name']
523 def display_slice_spec (self,slice):
524 print '+ ======== slice',slice['slice_fields']['name']
525 for (k,v) in slice.iteritems():
538 elif k=='slice_fields':
539 print '+ fields',':',
540 print 'max_nodes=',v['max_nodes'],
545 def display_node_spec (self,node):
546 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
547 print "hostname=",node['node_fields']['hostname'],
548 print "ip=",node['interface_fields']['ip']
549 if self.options.verbose:
550 utils.pprint("node details",node,depth=3)
552 # another entry point for just showing the boxes involved
553 def display_mapping (self):
554 TestPlc.display_mapping_plc(self.plc_spec)
558 def display_mapping_plc (plc_spec):
559 print '+ MyPLC',plc_spec['name']
560 # WARNING this would not be right for lxc-based PLC's - should be harmless though
561 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
562 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
563 for site_spec in plc_spec['sites']:
564 for node_spec in site_spec['nodes']:
565 TestPlc.display_mapping_node(node_spec)
568 def display_mapping_node (node_spec):
569 print '+ NODE %s'%(node_spec['name'])
570 print '+\tqemu box %s'%node_spec['host_box']
571 print '+\thostname=%s'%node_spec['node_fields']['hostname']
573 # write a timestamp in /vservers/<>.timestamp
574 # cannot be inside the vserver, that causes vserver .. build to cough
575 def timestamp_vs (self):
576 "Create a timestamp to remember creation date for this plc"
578 # TODO-lxc check this one
579 # a first approx. is to store the timestamp close to the VM root like vs does
580 stamp_path=self.vm_timestamp_path ()
581 stamp_dir = os.path.dirname (stamp_path)
582 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
583 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
585 # this is called inconditionnally at the beginning of the test sequence
586 # just in case this is a rerun, so if the vm is not running it's fine
588 "vserver delete the test myplc"
589 stamp_path=self.vm_timestamp_path()
590 self.run_in_host("rm -f %s"%stamp_path)
591 if self.options.plcs_use_lxc:
592 self.run_in_host("virsh -c lxc:// destroy %s"%self.vservername)
593 self.run_in_host("virsh -c lxc:// undefine %s"%self.vservername)
594 self.run_in_host("rm -fr /vservers/%s"%self.vservername)
597 self.run_in_host("vserver --silent %s delete"%self.vservername)
601 # historically the build was being fetched by the tests
602 # now the build pushes itself as a subdir of the tests workdir
603 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
604 def vs_create (self):
605 "vserver creation (no install done)"
606 # push the local build/ dir to the testplc box
608 # a full path for the local calls
609 build_dir=os.path.dirname(sys.argv[0])
610 # sometimes this is empty - set to "." in such a case
611 if not build_dir: build_dir="."
612 build_dir += "/build"
614 # use a standard name - will be relative to remote buildname
616 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
617 self.test_ssh.rmdir(build_dir)
618 self.test_ssh.copy(build_dir,recursive=True)
619 # the repo url is taken from arch-rpms-url
620 # with the last step (i386) removed
621 repo_url = self.options.arch_rpms_url
622 for level in [ 'arch' ]:
623 repo_url = os.path.dirname(repo_url)
624 # pass the vbuild-nightly options to [lv]test-initvm
626 test_env_options += " -p %s"%self.options.personality
627 test_env_options += " -d %s"%self.options.pldistro
628 test_env_options += " -f %s"%self.options.fcdistro
629 if self.options.plcs_use_lxc:
630 script="ltest-initvm.sh"
632 script="vtest-initvm.sh"
633 vserver_name = self.vservername
634 vserver_options="--netdev eth0 --interface %s"%self.vserverip
636 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
637 vserver_options += " --hostname %s"%vserver_hostname
639 print "Cannot reverse lookup %s"%self.vserverip
640 print "This is considered fatal, as this might pollute the test results"
642 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
643 return self.run_in_host(create_vserver) == 0
646 def plc_install(self):
647 "yum install myplc, noderepo, and the plain bootstrapfs"
649 # workaround for getting pgsql8.2 on centos5
650 if self.options.fcdistro == "centos5":
651 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
654 if self.options.personality == "linux32":
656 elif self.options.personality == "linux64":
659 raise Exception, "Unsupported personality %r"%self.options.personality
660 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
663 pkgs_list.append ("slicerepo-%s"%nodefamily)
664 pkgs_list.append ("myplc")
665 pkgs_list.append ("noderepo-%s"%nodefamily)
666 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
667 pkgs_string=" ".join(pkgs_list)
668 return self.yum_install (pkgs_list)
671 def mod_python(self):
672 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
673 return self.yum_install ( [ 'mod_python' ] )
676 def plc_configure(self):
678 tmpname='%s.plc-config-tty'%(self.name())
679 fileconf=open(tmpname,'w')
680 for var in [ 'PLC_NAME',
685 'PLC_MAIL_SUPPORT_ADDRESS',
688 # Above line was added for integrating SFA Testing
694 'PLC_RESERVATION_GRANULARITY',
696 'PLC_OMF_XMPP_SERVER',
699 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
700 fileconf.write('w\n')
701 fileconf.write('q\n')
703 utils.system('cat %s'%tmpname)
704 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
705 utils.system('rm %s'%tmpname)
710 self.run_in_guest('service plc start')
715 self.run_in_guest('service plc stop')
719 "start the PLC vserver"
724 "stop the PLC vserver"
728 # stores the keys from the config for further use
729 def keys_store(self):
730 "stores test users ssh keys in keys/"
731 for key_spec in self.plc_spec['keys']:
732 TestKey(self,key_spec).store_key()
735 def keys_clean(self):
736 "removes keys cached in keys/"
737 utils.system("rm -rf ./keys")
740 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
741 # for later direct access to the nodes
742 def keys_fetch(self):
743 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
745 if not os.path.isdir(dir):
747 vservername=self.vservername
748 vm_root=self.vm_root_in_host()
750 prefix = 'debug_ssh_key'
751 for ext in [ 'pub', 'rsa' ] :
752 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
753 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
754 if self.test_ssh.fetch(src,dst) != 0: overall=False
758 "create sites with PLCAPI"
759 return self.do_sites()
761 def delete_sites (self):
762 "delete sites with PLCAPI"
763 return self.do_sites(action="delete")
765 def do_sites (self,action="add"):
766 for site_spec in self.plc_spec['sites']:
767 test_site = TestSite (self,site_spec)
768 if (action != "add"):
769 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
770 test_site.delete_site()
771 # deleted with the site
772 #test_site.delete_users()
775 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
776 test_site.create_site()
777 test_site.create_users()
780 def delete_all_sites (self):
781 "Delete all sites in PLC, and related objects"
782 print 'auth_root',self.auth_root()
783 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
785 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
786 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
787 site_id=site['site_id']
788 print 'Deleting site_id',site_id
789 self.apiserver.DeleteSite(self.auth_root(),site_id)
793 "create nodes with PLCAPI"
794 return self.do_nodes()
795 def delete_nodes (self):
796 "delete nodes with PLCAPI"
797 return self.do_nodes(action="delete")
799 def do_nodes (self,action="add"):
800 for site_spec in self.plc_spec['sites']:
801 test_site = TestSite (self,site_spec)
803 utils.header("Deleting nodes in site %s"%test_site.name())
804 for node_spec in site_spec['nodes']:
805 test_node=TestNode(self,test_site,node_spec)
806 utils.header("Deleting %s"%test_node.name())
807 test_node.delete_node()
809 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
810 for node_spec in site_spec['nodes']:
811 utils.pprint('Creating node %s'%node_spec,node_spec)
812 test_node = TestNode (self,test_site,node_spec)
813 test_node.create_node ()
816 def nodegroups (self):
817 "create nodegroups with PLCAPI"
818 return self.do_nodegroups("add")
819 def delete_nodegroups (self):
820 "delete nodegroups with PLCAPI"
821 return self.do_nodegroups("delete")
825 def translate_timestamp (start,grain,timestamp):
826 if timestamp < TestPlc.YEAR: return start+timestamp*grain
827 else: return timestamp
830 def timestamp_printable (timestamp):
831 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
834 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
836 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
837 print 'API answered grain=',grain
838 start=(now/grain)*grain
840 # find out all nodes that are reservable
841 nodes=self.all_reservable_nodenames()
843 utils.header ("No reservable node found - proceeding without leases")
846 # attach them to the leases as specified in plc_specs
847 # this is where the 'leases' field gets interpreted as relative of absolute
848 for lease_spec in self.plc_spec['leases']:
849 # skip the ones that come with a null slice id
850 if not lease_spec['slice']: continue
851 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
852 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
853 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
854 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
855 if lease_addition['errors']:
856 utils.header("Cannot create leases, %s"%lease_addition['errors'])
859 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
860 (nodes,lease_spec['slice'],
861 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
862 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
866 def delete_leases (self):
867 "remove all leases in the myplc side"
868 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
869 utils.header("Cleaning leases %r"%lease_ids)
870 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
873 def list_leases (self):
874 "list all leases known to the myplc"
875 leases = self.apiserver.GetLeases(self.auth_root())
878 current=l['t_until']>=now
879 if self.options.verbose or current:
880 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
881 TestPlc.timestamp_printable(l['t_from']),
882 TestPlc.timestamp_printable(l['t_until'])))
885 # create nodegroups if needed, and populate
886 def do_nodegroups (self, action="add"):
887 # 1st pass to scan contents
889 for site_spec in self.plc_spec['sites']:
890 test_site = TestSite (self,site_spec)
891 for node_spec in site_spec['nodes']:
892 test_node=TestNode (self,test_site,node_spec)
893 if node_spec.has_key('nodegroups'):
894 nodegroupnames=node_spec['nodegroups']
895 if isinstance(nodegroupnames,StringTypes):
896 nodegroupnames = [ nodegroupnames ]
897 for nodegroupname in nodegroupnames:
898 if not groups_dict.has_key(nodegroupname):
899 groups_dict[nodegroupname]=[]
900 groups_dict[nodegroupname].append(test_node.name())
901 auth=self.auth_root()
903 for (nodegroupname,group_nodes) in groups_dict.iteritems():
905 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
906 # first, check if the nodetagtype is here
907 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
909 tag_type_id = tag_types[0]['tag_type_id']
911 tag_type_id = self.apiserver.AddTagType(auth,
912 {'tagname':nodegroupname,
913 'description': 'for nodegroup %s'%nodegroupname,
915 print 'located tag (type)',nodegroupname,'as',tag_type_id
917 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
919 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
920 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
921 # set node tag on all nodes, value='yes'
922 for nodename in group_nodes:
924 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
926 traceback.print_exc()
927 print 'node',nodename,'seems to already have tag',nodegroupname
930 expect_yes = self.apiserver.GetNodeTags(auth,
931 {'hostname':nodename,
932 'tagname':nodegroupname},
933 ['value'])[0]['value']
934 if expect_yes != "yes":
935 print 'Mismatch node tag on node',nodename,'got',expect_yes
938 if not self.options.dry_run:
939 print 'Cannot find tag',nodegroupname,'on node',nodename
943 print 'cleaning nodegroup',nodegroupname
944 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
946 traceback.print_exc()
950 # a list of TestNode objs
951 def all_nodes (self):
953 for site_spec in self.plc_spec['sites']:
954 test_site = TestSite (self,site_spec)
955 for node_spec in site_spec['nodes']:
956 nodes.append(TestNode (self,test_site,node_spec))
959 # return a list of tuples (nodename,qemuname)
960 def all_node_infos (self) :
962 for site_spec in self.plc_spec['sites']:
963 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
964 for node_spec in site_spec['nodes'] ]
967 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
968 def all_reservable_nodenames (self):
970 for site_spec in self.plc_spec['sites']:
971 for node_spec in site_spec['nodes']:
972 node_fields=node_spec['node_fields']
973 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
974 res.append(node_fields['hostname'])
977 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
978 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
979 if self.options.dry_run:
983 class CompleterTaskBootState (CompleterTask):
984 def __init__ (self, test_plc,hostname):
985 self.test_plc=test_plc
986 self.hostname=hostname
987 self.last_boot_state='undef'
988 def actual_run (self):
990 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
992 self.last_boot_state = node['boot_state']
993 return self.last_boot_state == target_boot_state
997 return "CompleterTaskBootState with node %s"%self.hostname
998 def failure_message (self):
999 return "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
1001 timeout = timedelta(minutes=timeout_minutes)
1002 graceout = timedelta(minutes=silent_minutes)
1003 period = timedelta(seconds=period_seconds)
1004 # the nodes that haven't checked yet - start with a full list and shrink over time
1005 utils.header("checking nodes boot state (expected %s)"%target_boot_state)
1006 tasks = [ CompleterTaskBootState (self,hostname) \
1007 for (hostname,_) in self.all_node_infos() ]
1008 return Completer (tasks).run (timeout, graceout, period)
1010 def nodes_booted(self):
1011 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
1013 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
1014 class CompleterTaskNodeSsh (CompleterTask):
1015 def __init__ (self, hostname, qemuname, boot_state, local_key):
1016 self.hostname=hostname
1017 self.qemuname=qemuname
1018 self.boot_state=boot_state
1019 self.local_key=local_key
1020 def run (self, silent):
1021 command = TestSsh (self.hostname,key=self.local_key).actual_command("hostname;uname -a")
1022 return utils.system (command, silent=silent)==0
1023 def failure_message (self):
1024 return "Cannot reach %s @ %s in %s mode"%(self.hostname, self.qemuname, self.boot_state)
1027 timeout = timedelta(minutes=timeout_minutes)
1028 graceout = timedelta(minutes=silent_minutes)
1029 period = timedelta(seconds=period_seconds)
1030 vservername=self.vservername
1033 local_key = "keys/%(vservername)s-debug.rsa"%locals()
1036 local_key = "keys/key_admin.rsa"
1037 utils.header("checking ssh access to nodes (expected in %s mode)"%message)
1038 node_infos = self.all_node_infos()
1039 tasks = [ CompleterTaskNodeSsh (nodename, qemuname, message, local_key) \
1040 for (nodename,qemuname) in node_infos ]
1041 return Completer (tasks).run (timeout, graceout, period)
1043 def ssh_node_debug(self):
1044 "Tries to ssh into nodes in debug mode with the debug ssh key"
1045 return self.check_nodes_ssh(debug=True,
1046 timeout_minutes=self.ssh_node_debug_timeout,
1047 silent_minutes=self.ssh_node_debug_silent)
1049 def ssh_node_boot(self):
1050 "Tries to ssh into nodes in production mode with the root ssh key"
1051 return self.check_nodes_ssh(debug=False,
1052 timeout_minutes=self.ssh_node_boot_timeout,
1053 silent_minutes=self.ssh_node_boot_silent)
1055 def node_bmlogs(self):
1056 "Checks that there's a non-empty dir. /var/log/bm/raw"
1057 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
1060 def qemu_local_init (self): pass
1062 def bootcd (self): pass
1064 def qemu_local_config (self): pass
1066 def nodestate_reinstall (self): pass
1068 def nodestate_safeboot (self): pass
1070 def nodestate_boot (self): pass
1072 def nodestate_show (self): pass
1074 def qemu_export (self): pass
1076 ### check hooks : invoke scripts from hooks/{node,slice}
1077 def check_hooks_node (self):
1078 return self.locate_first_node().check_hooks()
1079 def check_hooks_sliver (self) :
1080 return self.locate_first_sliver().check_hooks()
1082 def check_hooks (self):
1083 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1084 return self.check_hooks_node() and self.check_hooks_sliver()
1087 def do_check_initscripts(self):
1088 class CompleterTaskInitscript (CompleterTask):
1089 def __init__ (self, test_sliver, stamp):
1090 self.test_sliver=test_sliver
1092 def actual_run (self):
1093 return self.test_sliver.check_initscript_stamp (self.stamp)
1095 return "initscript checker for %s"%self.test_sliver.name()
1096 def failure_message (self):
1097 return "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
1100 for slice_spec in self.plc_spec['slices']:
1101 if not slice_spec.has_key('initscriptstamp'):
1103 stamp=slice_spec['initscriptstamp']
1104 slicename=slice_spec['slice_fields']['name']
1105 for nodename in slice_spec['nodenames']:
1106 print 'nodename',nodename,'slicename',slicename,'stamp',stamp
1107 (site,node) = self.locate_node (nodename)
1108 # xxx - passing the wrong site - probably harmless
1109 test_site = TestSite (self,site)
1110 test_slice = TestSlice (self,test_site,slice_spec)
1111 test_node = TestNode (self,test_site,node)
1112 test_sliver = TestSliver (self, test_node, test_slice)
1113 tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
1114 return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1116 def check_initscripts(self):
1117 "check that the initscripts have triggered"
1118 return self.do_check_initscripts()
1120 def initscripts (self):
1121 "create initscripts with PLCAPI"
1122 for initscript in self.plc_spec['initscripts']:
1123 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1124 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1127 def delete_initscripts (self):
1128 "delete initscripts with PLCAPI"
1129 for initscript in self.plc_spec['initscripts']:
1130 initscript_name = initscript['initscript_fields']['name']
1131 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1133 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1134 print initscript_name,'deleted'
1136 print 'deletion went wrong - probably did not exist'
1141 "create slices with PLCAPI"
1142 return self.do_slices(action="add")
1144 def delete_slices (self):
1145 "delete slices with PLCAPI"
1146 return self.do_slices(action="delete")
1148 def fill_slices (self):
1149 "add nodes in slices with PLCAPI"
1150 return self.do_slices(action="fill")
1152 def empty_slices (self):
1153 "remove nodes from slices with PLCAPI"
1154 return self.do_slices(action="empty")
1156 def do_slices (self, action="add"):
1157 for slice in self.plc_spec['slices']:
1158 site_spec = self.locate_site (slice['sitename'])
1159 test_site = TestSite(self,site_spec)
1160 test_slice=TestSlice(self,test_site,slice)
1161 if action == "delete":
1162 test_slice.delete_slice()
1163 elif action=="fill":
1164 test_slice.add_nodes()
1165 elif action=="empty":
1166 test_slice.delete_nodes()
1168 test_slice.create_slice()
1171 @slice_mapper__tasks(20,10,15)
1172 def ssh_slice(self): pass
1173 @slice_mapper__tasks(20,19,15)
1174 def ssh_slice_off (self): pass
1177 def ssh_slice_basics(self): pass
1180 def check_vsys_defaults(self): pass
1183 def keys_clear_known_hosts (self): pass
1185 def plcapi_urls (self):
1186 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1188 def speed_up_slices (self):
1189 "tweak nodemanager settings on all nodes using a conf file"
1190 # create the template on the server-side
1191 template="%s.nodemanager"%self.name()
1192 template_file = open (template,"w")
1193 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1194 template_file.close()
1195 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1196 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1197 self.test_ssh.copy_abs(template,remote)
1199 self.apiserver.AddConfFile (self.auth_root(),
1200 {'dest':'/etc/sysconfig/nodemanager',
1201 'source':'PlanetLabConf/nodemanager',
1202 'postinstall_cmd':'service nm restart',})
1205 def debug_nodemanager (self):
1206 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1207 template="%s.nodemanager"%self.name()
1208 template_file = open (template,"w")
1209 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1210 template_file.close()
1211 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1212 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1213 self.test_ssh.copy_abs(template,remote)
1217 def qemu_start (self) : pass
1220 def timestamp_qemu (self) : pass
1222 # when a spec refers to a node possibly on another plc
1223 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1224 for plc in [ self ] + other_plcs:
1226 return plc.locate_sliver_obj (nodename, slicename)
1229 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1231 # implement this one as a cross step so that we can take advantage of different nodes
1232 # in multi-plcs mode
1233 def cross_check_tcp (self, other_plcs):
1234 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1235 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1236 utils.header ("check_tcp: no/empty config found")
1238 specs = self.plc_spec['tcp_specs']
1243 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1244 if not s_test_sliver.run_tcp_server(port,timeout=20):
1248 # idem for the client side
1249 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1250 # use nodename from locatesd sliver, unless 'client_connect' is set
1251 if 'client_connect' in spec:
1252 destination = spec['client_connect']
1254 destination=s_test_sliver.test_node.name()
1255 if not c_test_sliver.run_tcp_client(destination,port):
1259 # painfully enough, we need to allow for some time as netflow might show up last
1260 def check_system_slice (self):
1261 "all nodes: check that a system slice is alive"
1262 # netflow currently not working in the lxc distro
1263 # drl not built at all in the wtx distro
1264 # if we find either of them we're happy
1265 return self.check_netflow() or self.check_drl()
1268 def check_netflow (self): return self._check_system_slice ('netflow')
1269 def check_drl (self): return self._check_system_slice ('drl')
1271 # we have the slices up already here, so it should not take too long
1272 def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
1273 class CompleterTaskSystemSlice (CompleterTask):
1274 def __init__ (self, test_node, dry_run):
1275 self.test_node=test_node
1276 self.dry_run=dry_run
1277 def actual_run (self):
1278 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
1280 return "System slice %s @ %s"%(slicename, self.test_node.name())
1281 def failure_message (self):
1282 return "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
1283 timeout = timedelta(minutes=timeout_minutes)
1284 silent = timedelta (0)
1285 period = timedelta (seconds=period_seconds)
1286 tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
1287 for test_node in self.all_nodes() ]
1288 return Completer (tasks) . run (timeout, silent, period)
1290 def plcsh_stress_test (self):
1291 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1292 # install the stress-test in the plc image
1293 location = "/usr/share/plc_api/plcsh_stress_test.py"
1294 remote="%s/%s"%(self.vm_root_in_host(),location)
1295 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1297 command += " -- --check"
1298 if self.options.size == 1:
1299 command += " --tiny"
1300 return ( self.run_in_guest(command) == 0)
1302 # populate runs the same utility without slightly different options
1303 # in particular runs with --preserve (dont cleanup) and without --check
1304 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1306 def sfa_install_all (self):
1307 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1308 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1310 def sfa_install_core(self):
1312 return self.yum_install ("sfa")
1314 def sfa_install_plc(self):
1315 "yum install sfa-plc"
1316 return self.yum_install("sfa-plc")
1318 def sfa_install_sfatables(self):
1319 "yum install sfa-sfatables"
1320 return self.yum_install ("sfa-sfatables")
1322 # for some very odd reason, this sometimes fails with the following symptom
1323 # # yum install sfa-client
1324 # Setting up Install Process
1326 # Downloading Packages:
1327 # Running rpm_check_debug
1328 # Running Transaction Test
1329 # Transaction Test Succeeded
1330 # Running Transaction
1331 # Transaction couldn't start:
1332 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1333 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1334 # even though in the same context I have
1335 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1336 # Filesystem Size Used Avail Use% Mounted on
1337 # /dev/hdv1 806G 264G 501G 35% /
1338 # none 16M 36K 16M 1% /tmp
1340 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1341 def sfa_install_client(self):
1342 "yum install sfa-client"
1343 first_try=self.yum_install("sfa-client")
1344 if first_try: return True
1345 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1346 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1347 utils.header("rpm_path=<<%s>>"%rpm_path)
1349 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1350 return self.yum_check_installed ("sfa-client")
1352 def sfa_dbclean(self):
1353 "thoroughly wipes off the SFA database"
1354 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1355 self.run_in_guest("sfa-nuke.py")==0 or \
1356 self.run_in_guest("sfa-nuke-plc.py")==0
1358 def sfa_fsclean(self):
1359 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1360 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1363 def sfa_plcclean(self):
1364 "cleans the PLC entries that were created as a side effect of running the script"
1366 sfa_spec=self.plc_spec['sfa']
1368 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1369 login_base=auth_sfa_spec['login_base']
1370 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1371 except: print "Site %s already absent from PLC db"%login_base
1373 for spec_name in ['pi_spec','user_spec']:
1374 user_spec=auth_sfa_spec[spec_name]
1375 username=user_spec['email']
1376 try: self.apiserver.DeletePerson(self.auth_root(),username)
1378 # this in fact is expected as sites delete their members
1379 #print "User %s already absent from PLC db"%username
1382 print "REMEMBER TO RUN sfa_import AGAIN"
1385 def sfa_uninstall(self):
1386 "uses rpm to uninstall sfa - ignore result"
1387 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1388 self.run_in_guest("rm -rf /var/lib/sfa")
1389 self.run_in_guest("rm -rf /etc/sfa")
1390 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1392 self.run_in_guest("rpm -e --noscripts sfa-plc")
1395 ### run unit tests for SFA
1396 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1397 # Running Transaction
1398 # Transaction couldn't start:
1399 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1400 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1401 # no matter how many Gbs are available on the testplc
1402 # could not figure out what's wrong, so...
1403 # if the yum install phase fails, consider the test is successful
1404 # other combinations will eventually run it hopefully
1405 def sfa_utest(self):
1406 "yum install sfa-tests and run SFA unittests"
1407 self.run_in_guest("yum -y install sfa-tests")
1408 # failed to install - forget it
1409 if self.run_in_guest("rpm -q sfa-tests")!=0:
1410 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1412 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1416 dirname="conf.%s"%self.plc_spec['name']
1417 if not os.path.isdir(dirname):
1418 utils.system("mkdir -p %s"%dirname)
1419 if not os.path.isdir(dirname):
1420 raise Exception,"Cannot create config dir for plc %s"%self.name()
1423 def conffile(self,filename):
1424 return "%s/%s"%(self.confdir(),filename)
1425 def confsubdir(self,dirname,clean,dry_run=False):
1426 subdirname="%s/%s"%(self.confdir(),dirname)
1428 utils.system("rm -rf %s"%subdirname)
1429 if not os.path.isdir(subdirname):
1430 utils.system("mkdir -p %s"%subdirname)
1431 if not dry_run and not os.path.isdir(subdirname):
1432 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1435 def conffile_clean (self,filename):
1436 filename=self.conffile(filename)
1437 return utils.system("rm -rf %s"%filename)==0
1440 def sfa_configure(self):
1441 "run sfa-config-tty"
1442 tmpname=self.conffile("sfa-config-tty")
1443 fileconf=open(tmpname,'w')
1444 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1445 'SFA_INTERFACE_HRN',
1446 'SFA_REGISTRY_LEVEL1_AUTH',
1447 'SFA_REGISTRY_HOST',
1448 'SFA_AGGREGATE_HOST',
1458 'SFA_GENERIC_FLAVOUR',
1459 'SFA_AGGREGATE_ENABLED',
1461 if self.plc_spec['sfa'].has_key(var):
1462 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1463 # the way plc_config handles booleans just sucks..
1466 if self.plc_spec['sfa'][var]: val='true'
1467 fileconf.write ('e %s\n%s\n'%(var,val))
1468 fileconf.write('w\n')
1469 fileconf.write('R\n')
1470 fileconf.write('q\n')
1472 utils.system('cat %s'%tmpname)
1473 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1476 def aggregate_xml_line(self):
1477 port=self.plc_spec['sfa']['neighbours-port']
1478 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1479 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1481 def registry_xml_line(self):
1482 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1483 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1486 # a cross step that takes all other plcs in argument
1487 def cross_sfa_configure(self, other_plcs):
1488 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1489 # of course with a single plc, other_plcs is an empty list
1492 agg_fname=self.conffile("agg.xml")
1493 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1494 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1495 utils.header ("(Over)wrote %s"%agg_fname)
1496 reg_fname=self.conffile("reg.xml")
1497 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1498 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1499 utils.header ("(Over)wrote %s"%reg_fname)
1500 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1501 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1503 def sfa_import(self):
1504 "use sfaadmin to import from plc"
1505 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1507 self.run_in_guest('sfaadmin reg import_registry')==0
1508 # not needed anymore
1509 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1511 def sfa_start(self):
1513 return self.run_in_guest('service sfa start')==0
1515 def sfi_configure(self):
1516 "Create /root/sfi on the plc side for sfi client configuration"
1517 if self.options.dry_run:
1518 utils.header("DRY RUN - skipping step")
1520 sfa_spec=self.plc_spec['sfa']
1521 # cannot use auth_sfa_mapper to pass dir_name
1522 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1523 test_slice=TestAuthSfa(self,slice_spec)
1524 dir_basename=os.path.basename(test_slice.sfi_path())
1525 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1526 test_slice.sfi_configure(dir_name)
1527 # push into the remote /root/sfi area
1528 location = test_slice.sfi_path()
1529 remote="%s/%s"%(self.vm_root_in_host(),location)
1530 self.test_ssh.mkdir(remote,abs=True)
1531 # need to strip last level or remote otherwise we get an extra dir level
1532 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1536 def sfi_clean (self):
1537 "clean up /root/sfi on the plc side"
1538 self.run_in_guest("rm -rf /root/sfi")
1542 def sfa_add_site (self): pass
1544 def sfa_add_pi (self): pass
1546 def sfa_add_user(self): pass
1548 def sfa_update_user(self): pass
1550 def sfa_add_slice(self): pass
1552 def sfa_renew_slice(self): pass
1554 def sfa_discover(self): pass
1556 def sfa_create_slice(self): pass
1558 def sfa_check_slice_plc(self): pass
1560 def sfa_update_slice(self): pass
1562 def sfi_list(self): pass
1564 def sfi_show(self): pass
1566 def ssh_slice_sfa(self): pass
1568 def sfa_delete_user(self): pass
1570 def sfa_delete_slice(self): pass
1574 self.run_in_guest('service sfa stop')==0
1577 def populate (self):
1578 "creates random entries in the PLCAPI"
1579 # install the stress-test in the plc image
1580 location = "/usr/share/plc_api/plcsh_stress_test.py"
1581 remote="%s/%s"%(self.vm_root_in_host(),location)
1582 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1584 command += " -- --preserve --short-names"
1585 local = (self.run_in_guest(command) == 0);
1586 # second run with --foreign
1587 command += ' --foreign'
1588 remote = (self.run_in_guest(command) == 0);
1589 return ( local and remote)
1591 def gather_logs (self):
1592 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1593 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1594 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1595 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1596 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1597 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1598 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1600 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1601 self.gather_var_logs ()
1603 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1604 self.gather_pgsql_logs ()
1606 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1607 self.gather_root_sfi ()
1609 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1610 for site_spec in self.plc_spec['sites']:
1611 test_site = TestSite (self,site_spec)
1612 for node_spec in site_spec['nodes']:
1613 test_node=TestNode(self,test_site,node_spec)
1614 test_node.gather_qemu_logs()
1616 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1617 self.gather_nodes_var_logs()
1619 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1620 self.gather_slivers_var_logs()
1623 def gather_slivers_var_logs(self):
1624 for test_sliver in self.all_sliver_objs():
1625 remote = test_sliver.tar_var_logs()
1626 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1627 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1628 utils.system(command)
1631 def gather_var_logs (self):
1632 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1633 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1634 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1635 utils.system(command)
1636 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1637 utils.system(command)
1639 def gather_pgsql_logs (self):
1640 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1641 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1642 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1643 utils.system(command)
1645 def gather_root_sfi (self):
1646 utils.system("mkdir -p logs/sfi.%s"%self.name())
1647 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1648 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1649 utils.system(command)
1651 def gather_nodes_var_logs (self):
1652 for site_spec in self.plc_spec['sites']:
1653 test_site = TestSite (self,site_spec)
1654 for node_spec in site_spec['nodes']:
1655 test_node=TestNode(self,test_site,node_spec)
1656 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1657 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1658 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1659 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1660 utils.system(command)
1663 # returns the filename to use for sql dump/restore, using options.dbname if set
1664 def dbfile (self, database):
1665 # uses options.dbname if it is found
1667 name=self.options.dbname
1668 if not isinstance(name,StringTypes):
1674 return "/root/%s-%s.sql"%(database,name)
1676 def plc_db_dump(self):
1677 'dump the planetlab5 DB in /root in the PLC - filename has time'
1678 dump=self.dbfile("planetab5")
1679 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1680 utils.header('Dumped planetlab5 database in %s'%dump)
1683 def plc_db_restore(self):
1684 'restore the planetlab5 DB - looks broken, but run -n might help'
1685 dump=self.dbfile("planetab5")
1686 ##stop httpd service
1687 self.run_in_guest('service httpd stop')
1688 # xxx - need another wrapper
1689 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1690 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1691 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1692 ##starting httpd service
1693 self.run_in_guest('service httpd start')
1695 utils.header('Database restored from ' + dump)
1697 def standby_1_through_20(self):
1698 """convenience function to wait for a specified number of minutes"""
1701 def standby_1(): pass
1703 def standby_2(): pass
1705 def standby_3(): pass
1707 def standby_4(): pass
1709 def standby_5(): pass
1711 def standby_6(): pass
1713 def standby_7(): pass
1715 def standby_8(): pass
1717 def standby_9(): pass
1719 def standby_10(): pass
1721 def standby_11(): pass
1723 def standby_12(): pass
1725 def standby_13(): pass
1727 def standby_14(): pass
1729 def standby_15(): pass
1731 def standby_16(): pass
1733 def standby_17(): pass
1735 def standby_18(): pass
1737 def standby_19(): pass
1739 def standby_20(): pass