1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
10 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
24 from Completer import Completer, CompleterTask
26 # step methods must take (self) and return a boolean (options is a member of the class)
28 def standby(minutes,dry_run):
29 utils.header('Entering StandBy for %d mn'%minutes)
33 time.sleep(60*minutes)
36 def standby_generic (func):
38 minutes=int(func.__name__.split("_")[1])
39 return standby(minutes,self.options.dry_run)
42 def node_mapper (method):
43 def actual(self,*args, **kwds):
45 node_method = TestNode.__dict__[method.__name__]
46 for test_node in self.all_nodes():
47 if not node_method(test_node, *args, **kwds): overall=False
49 # restore the doc text
50 actual.__doc__=TestNode.__dict__[method.__name__].__doc__
53 def slice_mapper (method):
56 slice_method = TestSlice.__dict__[method.__name__]
57 for slice_spec in self.plc_spec['slices']:
58 site_spec = self.locate_site (slice_spec['sitename'])
59 test_site = TestSite(self,site_spec)
60 test_slice=TestSlice(self,test_site,slice_spec)
61 if not slice_method(test_slice,self.options): overall=False
63 # restore the doc text
64 actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
67 # a variant that expects the TestSlice method to return a list of CompleterTasks that
68 # are then merged into a single Completer run to avoid wating for all the slices
69 # esp. useful when a test fails of course
70 # because we need to pass arguments we use a class instead..
71 class slice_mapper__tasks (object):
72 # could not get this to work with named arguments
73 def __init__ (self,timeout_minutes,silent_minutes,period_seconds):
74 print "timeout_minutes,silent_minutes,period_seconds",timeout_minutes,silent_minutes,period_seconds
75 self.timeout=timedelta(minutes=timeout_minutes)
76 self.silent=timedelta(minutes=silent_minutes)
77 self.period=timedelta(seconds=period_seconds)
78 def __call__ (self, method):
80 # compute augmented method name
81 method_name = method.__name__ + "__tasks"
83 slice_method = TestSlice.__dict__[ method_name ]
86 for slice_spec in self.plc_spec['slices']:
87 site_spec = self.locate_site (slice_spec['sitename'])
88 test_site = TestSite(self,site_spec)
89 test_slice=TestSlice(self,test_site,slice_spec)
90 tasks += slice_method (test_slice, self.options)
91 return Completer (tasks).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
92 # restore the doc text from the TestSlice method even if a bit odd
93 wrappee.__doc__ = slice_method.__doc__
96 def auth_sfa_mapper (method):
99 auth_method = TestAuthSfa.__dict__[method.__name__]
100 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
101 test_auth=TestAuthSfa(self,auth_spec)
102 if not auth_method(test_auth,self.options): overall=False
104 # restore the doc text
105 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
115 'vs_delete','timestamp_vs','vs_create', SEP,
116 # 'plc_install', 'mod_python', 'plc_configure', 'plc_start', SEP,
117 'plc_install', 'plc_configure', 'plc_start', SEP,
118 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
119 'plcapi_urls','speed_up_slices', SEP,
120 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
121 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
122 # keep this our of the way for now
123 # 'check_vsys_defaults', SEP,
124 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
125 'qemu_kill_mine','qemu_clean_mine', 'qemu_export', 'qemu_start', 'timestamp_qemu', SEP,
126 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
127 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
128 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_renew_slice@1', SEPSFA,
129 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
130 'sfi_list@1', 'sfi_show@1', 'sfa_utest@1', SEPSFA,
131 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
132 # but as the stress test might take a while, we sometimes missed the debug mode..
133 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
134 'ssh_node_boot@1', 'node_bmlogs@1', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts', SEP,
135 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
136 'cross_check_tcp@1', 'check_system_slice', SEP,
137 # check slices are turned off properly
138 'empty_slices', 'ssh_slice_off', SEP,
139 # check they are properly re-created with the same name
140 'fill_slices', 'ssh_slice', SEP,
141 'force_gather_logs', SEP,
144 'export', 'show_boxes', SEP,
145 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
146 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
147 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
148 'delete_leases', 'list_leases', SEP,
150 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
151 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
152 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
153 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
154 'plc_db_dump' , 'plc_db_restore', SEP,
155 'check_netflow','check_drl', SEP,
156 'debug_nodemanager', SEP,
157 'standby_1_through_20',SEP,
161 def printable_steps (list):
162 single_line=" ".join(list)+" "
163 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
165 def valid_step (step):
166 return step != SEP and step != SEPSFA
168 # turn off the sfa-related steps when build has skipped SFA
169 # this was originally for centos5 but is still valid
170 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
172 def check_whether_build_has_sfa (rpms_url):
173 utils.header ("Checking if build provides SFA package...")
174 # warning, we're now building 'sface' so let's be a bit more picky
175 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
176 # full builds are expected to return with 0 here
178 utils.header("build does provide SFA")
180 # move all steps containing 'sfa' from default_steps to other_steps
181 utils.header("SFA package not found - removing steps with sfa or sfi")
182 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
183 TestPlc.other_steps += sfa_steps
184 for step in sfa_steps: TestPlc.default_steps.remove(step)
186 def __init__ (self,plc_spec,options):
187 self.plc_spec=plc_spec
189 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
190 self.vserverip=plc_spec['vserverip']
191 self.vservername=plc_spec['vservername']
192 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
193 self.apiserver=TestApiserver(self.url,options.dry_run)
194 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
195 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
197 def has_addresses_api (self):
198 return self.apiserver.has_method('AddIpAddress')
201 name=self.plc_spec['name']
202 return "%s.%s"%(name,self.vservername)
205 return self.plc_spec['host_box']
208 return self.test_ssh.is_local()
210 # define the API methods on this object through xmlrpc
211 # would help, but not strictly necessary
215 def actual_command_in_guest (self,command):
216 return self.test_ssh.actual_command(self.host_to_guest(command),dry_run=self.options.dry_run)
218 def start_guest (self):
219 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),dry_run=self.options.dry_run))
221 def stop_guest (self):
222 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),dry_run=self.options.dry_run))
224 def run_in_guest (self,command):
225 return utils.system(self.actual_command_in_guest(command))
227 def run_in_host (self,command):
228 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
230 #command gets run in the plc's vm
231 def host_to_guest(self,command):
232 if self.options.plcs_use_lxc:
233 return "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s %s"%(self.vserverip,command)
235 return "vserver %s exec %s"%(self.vservername,command)
237 def vm_root_in_host(self):
238 if self.options.plcs_use_lxc:
239 return "/vservers/%s/rootfs/"%(self.vservername)
241 return "/vservers/%s"%(self.vservername)
243 def vm_timestamp_path (self):
244 if self.options.plcs_use_lxc:
245 return "/vservers/%s/%s.timestamp"%(self.vservername,self.vservername)
247 return "/vservers/%s.timestamp"%(self.vservername)
249 #start/stop the vserver
250 def start_guest_in_host(self):
251 if self.options.plcs_use_lxc:
252 return "virsh -c lxc:// start %s"%(self.vservername)
254 return "vserver %s start"%(self.vservername)
256 def stop_guest_in_host(self):
257 if self.options.plcs_use_lxc:
258 return "virsh -c lxc:// destroy %s"%(self.vservername)
260 return "vserver %s stop"%(self.vservername)
263 def run_in_guest_piped (self,local,remote):
264 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
266 def yum_check_installed (self, rpms):
267 if isinstance (rpms, list):
269 return self.run_in_guest("rpm -q %s"%rpms)==0
271 # does a yum install in the vs, ignore yum retcod, check with rpm
272 def yum_install (self, rpms):
273 if isinstance (rpms, list):
275 self.run_in_guest("yum -y install %s"%rpms)
276 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
277 self.run_in_guest("yum-complete-transaction -y")
278 return self.yum_check_installed (rpms)
280 def auth_root (self):
281 return {'Username':self.plc_spec['PLC_ROOT_USER'],
282 'AuthMethod':'password',
283 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
284 'Role' : self.plc_spec['role']
286 def locate_site (self,sitename):
287 for site in self.plc_spec['sites']:
288 if site['site_fields']['name'] == sitename:
290 if site['site_fields']['login_base'] == sitename:
292 raise Exception,"Cannot locate site %s"%sitename
294 def locate_node (self,nodename):
295 for site in self.plc_spec['sites']:
296 for node in site['nodes']:
297 if node['name'] == nodename:
299 raise Exception,"Cannot locate node %s"%nodename
301 def locate_hostname (self,hostname):
302 for site in self.plc_spec['sites']:
303 for node in site['nodes']:
304 if node['node_fields']['hostname'] == hostname:
306 raise Exception,"Cannot locate hostname %s"%hostname
308 def locate_key (self,key_name):
309 for key in self.plc_spec['keys']:
310 if key['key_name'] == key_name:
312 raise Exception,"Cannot locate key %s"%key_name
314 def locate_private_key_from_key_names (self, key_names):
315 # locate the first avail. key
317 for key_name in key_names:
318 key_spec=self.locate_key(key_name)
319 test_key=TestKey(self,key_spec)
320 publickey=test_key.publicpath()
321 privatekey=test_key.privatepath()
322 if os.path.isfile(publickey) and os.path.isfile(privatekey):
324 if found: return privatekey
327 def locate_slice (self, slicename):
328 for slice in self.plc_spec['slices']:
329 if slice['slice_fields']['name'] == slicename:
331 raise Exception,"Cannot locate slice %s"%slicename
333 def all_sliver_objs (self):
335 for slice_spec in self.plc_spec['slices']:
336 slicename = slice_spec['slice_fields']['name']
337 for nodename in slice_spec['nodenames']:
338 result.append(self.locate_sliver_obj (nodename,slicename))
341 def locate_sliver_obj (self,nodename,slicename):
342 (site,node) = self.locate_node(nodename)
343 slice = self.locate_slice (slicename)
345 test_site = TestSite (self, site)
346 test_node = TestNode (self, test_site,node)
347 # xxx the slice site is assumed to be the node site - mhh - probably harmless
348 test_slice = TestSlice (self, test_site, slice)
349 return TestSliver (self, test_node, test_slice)
351 def locate_first_node(self):
352 nodename=self.plc_spec['slices'][0]['nodenames'][0]
353 (site,node) = self.locate_node(nodename)
354 test_site = TestSite (self, site)
355 test_node = TestNode (self, test_site,node)
358 def locate_first_sliver (self):
359 slice_spec=self.plc_spec['slices'][0]
360 slicename=slice_spec['slice_fields']['name']
361 nodename=slice_spec['nodenames'][0]
362 return self.locate_sliver_obj(nodename,slicename)
364 # all different hostboxes used in this plc
365 def get_BoxNodes(self):
366 # maps on sites and nodes, return [ (host_box,test_node) ]
368 for site_spec in self.plc_spec['sites']:
369 test_site = TestSite (self,site_spec)
370 for node_spec in site_spec['nodes']:
371 test_node = TestNode (self, test_site, node_spec)
372 if not test_node.is_real():
373 tuples.append( (test_node.host_box(),test_node) )
374 # transform into a dict { 'host_box' -> [ test_node .. ] }
376 for (box,node) in tuples:
377 if not result.has_key(box):
380 result[box].append(node)
383 # a step for checking this stuff
384 def show_boxes (self):
385 'print summary of nodes location'
386 for (box,nodes) in self.get_BoxNodes().iteritems():
387 print box,":"," + ".join( [ node.name() for node in nodes ] )
390 # make this a valid step
391 def qemu_kill_all(self):
392 'kill all qemu instances on the qemu boxes involved by this setup'
393 # this is the brute force version, kill all qemus on that host box
394 for (box,nodes) in self.get_BoxNodes().iteritems():
395 # pass the first nodename, as we don't push template-qemu on testboxes
396 nodedir=nodes[0].nodedir()
397 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
400 # make this a valid step
401 def qemu_list_all(self):
402 'list all qemu instances on the qemu boxes involved by this setup'
403 for (box,nodes) in self.get_BoxNodes().iteritems():
404 # this is the brute force version, kill all qemus on that host box
405 TestBoxQemu(box,self.options.buildname).qemu_list_all()
408 # kill only the qemus related to this test
409 def qemu_list_mine(self):
410 'list qemu instances for our nodes'
411 for (box,nodes) in self.get_BoxNodes().iteritems():
412 # the fine-grain version
417 # kill only the qemus related to this test
418 def qemu_clean_mine(self):
419 'cleanup (rm -rf) qemu instances for our nodes'
420 for (box,nodes) in self.get_BoxNodes().iteritems():
421 # the fine-grain version
426 # kill only the right qemus
427 def qemu_kill_mine(self):
428 'kill the qemu instances for our nodes'
429 for (box,nodes) in self.get_BoxNodes().iteritems():
430 # the fine-grain version
435 #################### display config
437 "show test configuration after localization"
442 # uggly hack to make sure 'run export' only reports about the 1st plc
443 # to avoid confusion - also we use 'inri_slice1' in various aliases..
446 "print cut'n paste-able stuff to export env variables to your shell"
447 # guess local domain from hostname
448 if TestPlc.exported_id>1:
449 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
451 TestPlc.exported_id+=1
452 domain=socket.gethostname().split('.',1)[1]
453 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
454 print "export BUILD=%s"%self.options.buildname
455 if self.options.plcs_use_lxc:
456 print "export PLCHOSTLXC=%s"%fqdn
458 print "export PLCHOSTVS=%s"%fqdn
459 print "export GUESTNAME=%s"%self.plc_spec['vservername']
460 vplcname=self.plc_spec['vservername'].split('-')[-1]
461 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
462 # find hostname of first node
463 (hostname,qemubox) = self.all_node_infos()[0]
464 print "export KVMHOST=%s.%s"%(qemubox,domain)
465 print "export NODE=%s"%(hostname)
469 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
470 def show_pass (self,passno):
471 for (key,val) in self.plc_spec.iteritems():
472 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
476 self.display_site_spec(site)
477 for node in site['nodes']:
478 self.display_node_spec(node)
479 elif key=='initscripts':
480 for initscript in val:
481 self.display_initscript_spec (initscript)
484 self.display_slice_spec (slice)
487 self.display_key_spec (key)
489 if key not in ['sites','initscripts','slices','keys', 'sfa']:
490 print '+ ',key,':',val
492 def display_site_spec (self,site):
493 print '+ ======== site',site['site_fields']['name']
494 for (k,v) in site.iteritems():
495 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
498 print '+ ','nodes : ',
500 print node['node_fields']['hostname'],'',
506 print user['name'],'',
508 elif k == 'site_fields':
509 print '+ login_base',':',v['login_base']
510 elif k == 'address_fields':
516 def display_initscript_spec (self,initscript):
517 print '+ ======== initscript',initscript['initscript_fields']['name']
519 def display_key_spec (self,key):
520 print '+ ======== key',key['key_name']
522 def display_slice_spec (self,slice):
523 print '+ ======== slice',slice['slice_fields']['name']
524 for (k,v) in slice.iteritems():
537 elif k=='slice_fields':
538 print '+ fields',':',
539 print 'max_nodes=',v['max_nodes'],
544 def display_node_spec (self,node):
545 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
546 print "hostname=",node['node_fields']['hostname'],
547 print "ip=",node['interface_fields']['ip']
548 if self.options.verbose:
549 utils.pprint("node details",node,depth=3)
551 # another entry point for just showing the boxes involved
552 def display_mapping (self):
553 TestPlc.display_mapping_plc(self.plc_spec)
557 def display_mapping_plc (plc_spec):
558 print '+ MyPLC',plc_spec['name']
559 # WARNING this would not be right for lxc-based PLC's - should be harmless though
560 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
561 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
562 for site_spec in plc_spec['sites']:
563 for node_spec in site_spec['nodes']:
564 TestPlc.display_mapping_node(node_spec)
567 def display_mapping_node (node_spec):
568 print '+ NODE %s'%(node_spec['name'])
569 print '+\tqemu box %s'%node_spec['host_box']
570 print '+\thostname=%s'%node_spec['node_fields']['hostname']
572 # write a timestamp in /vservers/<>.timestamp
573 # cannot be inside the vserver, that causes vserver .. build to cough
574 def timestamp_vs (self):
575 "Create a timestamp to remember creation date for this plc"
577 # TODO-lxc check this one
578 # a first approx. is to store the timestamp close to the VM root like vs does
579 stamp_path=self.vm_timestamp_path ()
580 stamp_dir = os.path.dirname (stamp_path)
581 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
582 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
584 # this is called inconditionnally at the beginning of the test sequence
585 # just in case this is a rerun, so if the vm is not running it's fine
587 "vserver delete the test myplc"
588 stamp_path=self.vm_timestamp_path()
589 self.run_in_host("rm -f %s"%stamp_path)
590 if self.options.plcs_use_lxc:
591 self.run_in_host("virsh -c lxc:// destroy %s"%self.vservername)
592 self.run_in_host("virsh -c lxc:// undefine %s"%self.vservername)
593 self.run_in_host("rm -fr /vservers/%s"%self.vservername)
596 self.run_in_host("vserver --silent %s delete"%self.vservername)
600 # historically the build was being fetched by the tests
601 # now the build pushes itself as a subdir of the tests workdir
602 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
603 def vs_create (self):
604 "vserver creation (no install done)"
605 # push the local build/ dir to the testplc box
607 # a full path for the local calls
608 build_dir=os.path.dirname(sys.argv[0])
609 # sometimes this is empty - set to "." in such a case
610 if not build_dir: build_dir="."
611 build_dir += "/build"
613 # use a standard name - will be relative to remote buildname
615 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
616 self.test_ssh.rmdir(build_dir)
617 self.test_ssh.copy(build_dir,recursive=True)
618 # the repo url is taken from arch-rpms-url
619 # with the last step (i386) removed
620 repo_url = self.options.arch_rpms_url
621 for level in [ 'arch' ]:
622 repo_url = os.path.dirname(repo_url)
623 # pass the vbuild-nightly options to [lv]test-initvm
625 test_env_options += " -p %s"%self.options.personality
626 test_env_options += " -d %s"%self.options.pldistro
627 test_env_options += " -f %s"%self.options.fcdistro
628 if self.options.plcs_use_lxc:
629 script="ltest-initvm.sh"
631 script="vtest-initvm.sh"
632 vserver_name = self.vservername
633 vserver_options="--netdev eth0 --interface %s"%self.vserverip
635 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
636 vserver_options += " --hostname %s"%vserver_hostname
638 print "Cannot reverse lookup %s"%self.vserverip
639 print "This is considered fatal, as this might pollute the test results"
641 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
642 return self.run_in_host(create_vserver) == 0
645 def plc_install(self):
646 "yum install myplc, noderepo, and the plain bootstrapfs"
648 # workaround for getting pgsql8.2 on centos5
649 if self.options.fcdistro == "centos5":
650 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
653 if self.options.personality == "linux32":
655 elif self.options.personality == "linux64":
658 raise Exception, "Unsupported personality %r"%self.options.personality
659 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
662 pkgs_list.append ("slicerepo-%s"%nodefamily)
663 pkgs_list.append ("myplc")
664 pkgs_list.append ("noderepo-%s"%nodefamily)
665 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
666 pkgs_string=" ".join(pkgs_list)
667 return self.yum_install (pkgs_list)
670 def mod_python(self):
671 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
672 return self.yum_install ( [ 'mod_python' ] )
675 def plc_configure(self):
677 tmpname='%s.plc-config-tty'%(self.name())
678 fileconf=open(tmpname,'w')
679 for var in [ 'PLC_NAME',
684 'PLC_MAIL_SUPPORT_ADDRESS',
687 # Above line was added for integrating SFA Testing
693 'PLC_RESERVATION_GRANULARITY',
695 'PLC_OMF_XMPP_SERVER',
698 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
699 fileconf.write('w\n')
700 fileconf.write('q\n')
702 utils.system('cat %s'%tmpname)
703 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
704 utils.system('rm %s'%tmpname)
709 self.run_in_guest('service plc start')
714 self.run_in_guest('service plc stop')
718 "start the PLC vserver"
723 "stop the PLC vserver"
727 # stores the keys from the config for further use
728 def keys_store(self):
729 "stores test users ssh keys in keys/"
730 for key_spec in self.plc_spec['keys']:
731 TestKey(self,key_spec).store_key()
734 def keys_clean(self):
735 "removes keys cached in keys/"
736 utils.system("rm -rf ./keys")
739 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
740 # for later direct access to the nodes
741 def keys_fetch(self):
742 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
744 if not os.path.isdir(dir):
746 vservername=self.vservername
747 vm_root=self.vm_root_in_host()
749 prefix = 'debug_ssh_key'
750 for ext in [ 'pub', 'rsa' ] :
751 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
752 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
753 if self.test_ssh.fetch(src,dst) != 0: overall=False
757 "create sites with PLCAPI"
758 return self.do_sites()
760 def delete_sites (self):
761 "delete sites with PLCAPI"
762 return self.do_sites(action="delete")
764 def do_sites (self,action="add"):
765 for site_spec in self.plc_spec['sites']:
766 test_site = TestSite (self,site_spec)
767 if (action != "add"):
768 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
769 test_site.delete_site()
770 # deleted with the site
771 #test_site.delete_users()
774 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
775 test_site.create_site()
776 test_site.create_users()
779 def delete_all_sites (self):
780 "Delete all sites in PLC, and related objects"
781 print 'auth_root',self.auth_root()
782 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
784 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
785 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
786 site_id=site['site_id']
787 print 'Deleting site_id',site_id
788 self.apiserver.DeleteSite(self.auth_root(),site_id)
792 "create nodes with PLCAPI"
793 return self.do_nodes()
794 def delete_nodes (self):
795 "delete nodes with PLCAPI"
796 return self.do_nodes(action="delete")
798 def do_nodes (self,action="add"):
799 for site_spec in self.plc_spec['sites']:
800 test_site = TestSite (self,site_spec)
802 utils.header("Deleting nodes in site %s"%test_site.name())
803 for node_spec in site_spec['nodes']:
804 test_node=TestNode(self,test_site,node_spec)
805 utils.header("Deleting %s"%test_node.name())
806 test_node.delete_node()
808 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
809 for node_spec in site_spec['nodes']:
810 utils.pprint('Creating node %s'%node_spec,node_spec)
811 test_node = TestNode (self,test_site,node_spec)
812 test_node.create_node ()
815 def nodegroups (self):
816 "create nodegroups with PLCAPI"
817 return self.do_nodegroups("add")
818 def delete_nodegroups (self):
819 "delete nodegroups with PLCAPI"
820 return self.do_nodegroups("delete")
824 def translate_timestamp (start,grain,timestamp):
825 if timestamp < TestPlc.YEAR: return start+timestamp*grain
826 else: return timestamp
829 def timestamp_printable (timestamp):
830 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
833 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
835 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
836 print 'API answered grain=',grain
837 start=(now/grain)*grain
839 # find out all nodes that are reservable
840 nodes=self.all_reservable_nodenames()
842 utils.header ("No reservable node found - proceeding without leases")
845 # attach them to the leases as specified in plc_specs
846 # this is where the 'leases' field gets interpreted as relative of absolute
847 for lease_spec in self.plc_spec['leases']:
848 # skip the ones that come with a null slice id
849 if not lease_spec['slice']: continue
850 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
851 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
852 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
853 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
854 if lease_addition['errors']:
855 utils.header("Cannot create leases, %s"%lease_addition['errors'])
858 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
859 (nodes,lease_spec['slice'],
860 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
861 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
865 def delete_leases (self):
866 "remove all leases in the myplc side"
867 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
868 utils.header("Cleaning leases %r"%lease_ids)
869 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
872 def list_leases (self):
873 "list all leases known to the myplc"
874 leases = self.apiserver.GetLeases(self.auth_root())
877 current=l['t_until']>=now
878 if self.options.verbose or current:
879 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
880 TestPlc.timestamp_printable(l['t_from']),
881 TestPlc.timestamp_printable(l['t_until'])))
884 # create nodegroups if needed, and populate
885 def do_nodegroups (self, action="add"):
886 # 1st pass to scan contents
888 for site_spec in self.plc_spec['sites']:
889 test_site = TestSite (self,site_spec)
890 for node_spec in site_spec['nodes']:
891 test_node=TestNode (self,test_site,node_spec)
892 if node_spec.has_key('nodegroups'):
893 nodegroupnames=node_spec['nodegroups']
894 if isinstance(nodegroupnames,StringTypes):
895 nodegroupnames = [ nodegroupnames ]
896 for nodegroupname in nodegroupnames:
897 if not groups_dict.has_key(nodegroupname):
898 groups_dict[nodegroupname]=[]
899 groups_dict[nodegroupname].append(test_node.name())
900 auth=self.auth_root()
902 for (nodegroupname,group_nodes) in groups_dict.iteritems():
904 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
905 # first, check if the nodetagtype is here
906 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
908 tag_type_id = tag_types[0]['tag_type_id']
910 tag_type_id = self.apiserver.AddTagType(auth,
911 {'tagname':nodegroupname,
912 'description': 'for nodegroup %s'%nodegroupname,
914 print 'located tag (type)',nodegroupname,'as',tag_type_id
916 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
918 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
919 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
920 # set node tag on all nodes, value='yes'
921 for nodename in group_nodes:
923 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
925 traceback.print_exc()
926 print 'node',nodename,'seems to already have tag',nodegroupname
929 expect_yes = self.apiserver.GetNodeTags(auth,
930 {'hostname':nodename,
931 'tagname':nodegroupname},
932 ['value'])[0]['value']
933 if expect_yes != "yes":
934 print 'Mismatch node tag on node',nodename,'got',expect_yes
937 if not self.options.dry_run:
938 print 'Cannot find tag',nodegroupname,'on node',nodename
942 print 'cleaning nodegroup',nodegroupname
943 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
945 traceback.print_exc()
949 # a list of TestNode objs
950 def all_nodes (self):
952 for site_spec in self.plc_spec['sites']:
953 test_site = TestSite (self,site_spec)
954 for node_spec in site_spec['nodes']:
955 nodes.append(TestNode (self,test_site,node_spec))
958 # return a list of tuples (nodename,qemuname)
959 def all_node_infos (self) :
961 for site_spec in self.plc_spec['sites']:
962 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
963 for node_spec in site_spec['nodes'] ]
966 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
967 def all_reservable_nodenames (self):
969 for site_spec in self.plc_spec['sites']:
970 for node_spec in site_spec['nodes']:
971 node_fields=node_spec['node_fields']
972 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
973 res.append(node_fields['hostname'])
976 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
977 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
978 if self.options.dry_run:
982 class CompleterTaskBootState (CompleterTask):
983 def __init__ (self, test_plc,hostname):
984 self.test_plc=test_plc
985 self.hostname=hostname
986 self.last_boot_state='undef'
987 def actual_run (self):
989 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
991 self.last_boot_state = node['boot_state']
992 return self.last_boot_state == target_boot_state
996 return "CompleterTaskBootState with node %s"%self.hostname
997 def failure_message (self):
998 return "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
1000 timeout = timedelta(minutes=timeout_minutes)
1001 graceout = timedelta(minutes=silent_minutes)
1002 period = timedelta(seconds=period_seconds)
1003 # the nodes that haven't checked yet - start with a full list and shrink over time
1004 utils.header("checking nodes boot state (expected %s)"%target_boot_state)
1005 tasks = [ CompleterTaskBootState (self,hostname) \
1006 for (hostname,_) in self.all_node_infos() ]
1007 return Completer (tasks).run (timeout, graceout, period)
1009 def nodes_booted(self):
1010 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
1012 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
1013 class CompleterTaskNodeSsh (CompleterTask):
1014 def __init__ (self, hostname, qemuname, boot_state, local_key):
1015 self.hostname=hostname
1016 self.qemuname=qemuname
1017 self.boot_state=boot_state
1018 self.local_key=local_key
1019 def run (self, silent):
1020 command = TestSsh (self.hostname,key=self.local_key).actual_command("hostname;uname -a")
1021 return utils.system (command, silent=silent)==0
1022 def failure_message (self):
1023 return "Cannot reach %s @ %s in %s mode"%(self.hostname, self.qemuname, self.boot_state)
1026 timeout = timedelta(minutes=timeout_minutes)
1027 graceout = timedelta(minutes=silent_minutes)
1028 period = timedelta(seconds=period_seconds)
1029 vservername=self.vservername
1032 local_key = "keys/%(vservername)s-debug.rsa"%locals()
1035 local_key = "keys/key_admin.rsa"
1036 utils.header("checking ssh access to nodes (expected in %s mode)"%message)
1037 node_infos = self.all_node_infos()
1038 tasks = [ CompleterTaskNodeSsh (nodename, qemuname, message, local_key) \
1039 for (nodename,qemuname) in node_infos ]
1040 return Completer (tasks).run (timeout, graceout, period)
1042 def ssh_node_debug(self):
1043 "Tries to ssh into nodes in debug mode with the debug ssh key"
1044 return self.check_nodes_ssh(debug=True,
1045 timeout_minutes=self.ssh_node_debug_timeout,
1046 silent_minutes=self.ssh_node_debug_silent)
1048 def ssh_node_boot(self):
1049 "Tries to ssh into nodes in production mode with the root ssh key"
1050 return self.check_nodes_ssh(debug=False,
1051 timeout_minutes=self.ssh_node_boot_timeout,
1052 silent_minutes=self.ssh_node_boot_silent)
1054 def node_bmlogs(self):
1055 "Checks that there's a non-empty dir. /var/log/bm/raw"
1056 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
1059 def qemu_local_init (self): pass
1061 def bootcd (self): pass
1063 def qemu_local_config (self): pass
1065 def nodestate_reinstall (self): pass
1067 def nodestate_safeboot (self): pass
1069 def nodestate_boot (self): pass
1071 def nodestate_show (self): pass
1073 def qemu_export (self): pass
1075 ### check hooks : invoke scripts from hooks/{node,slice}
1076 def check_hooks_node (self):
1077 return self.locate_first_node().check_hooks()
1078 def check_hooks_sliver (self) :
1079 return self.locate_first_sliver().check_hooks()
1081 def check_hooks (self):
1082 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1083 return self.check_hooks_node() and self.check_hooks_sliver()
1086 def do_check_initscripts(self):
1087 class CompleterTaskInitscript (CompleterTask):
1088 def __init__ (self, test_sliver, stamp):
1089 self.test_sliver=test_sliver
1091 def actual_run (self):
1092 return self.test_sliver.check_initscript_stamp (self.stamp)
1094 return "initscript checker for %s"%self.test_sliver.name()
1095 def failure_message (self):
1096 return "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
1099 for slice_spec in self.plc_spec['slices']:
1100 if not slice_spec.has_key('initscriptstamp'):
1102 stamp=slice_spec['initscriptstamp']
1103 slicename=slice_spec['slice_fields']['name']
1104 for nodename in slice_spec['nodenames']:
1105 print 'nodename',nodename,'slicename',slicename,'stamp',stamp
1106 (site,node) = self.locate_node (nodename)
1107 # xxx - passing the wrong site - probably harmless
1108 test_site = TestSite (self,site)
1109 test_slice = TestSlice (self,test_site,slice_spec)
1110 test_node = TestNode (self,test_site,node)
1111 test_sliver = TestSliver (self, test_node, test_slice)
1112 tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
1113 return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1115 def check_initscripts(self):
1116 "check that the initscripts have triggered"
1117 return self.do_check_initscripts()
1119 def initscripts (self):
1120 "create initscripts with PLCAPI"
1121 for initscript in self.plc_spec['initscripts']:
1122 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1123 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1126 def delete_initscripts (self):
1127 "delete initscripts with PLCAPI"
1128 for initscript in self.plc_spec['initscripts']:
1129 initscript_name = initscript['initscript_fields']['name']
1130 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1132 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1133 print initscript_name,'deleted'
1135 print 'deletion went wrong - probably did not exist'
1140 "create slices with PLCAPI"
1141 return self.do_slices(action="add")
1143 def delete_slices (self):
1144 "delete slices with PLCAPI"
1145 return self.do_slices(action="delete")
1147 def fill_slices (self):
1148 "add nodes in slices with PLCAPI"
1149 return self.do_slices(action="fill")
1151 def empty_slices (self):
1152 "remove nodes from slices with PLCAPI"
1153 return self.do_slices(action="empty")
1155 def do_slices (self, action="add"):
1156 for slice in self.plc_spec['slices']:
1157 site_spec = self.locate_site (slice['sitename'])
1158 test_site = TestSite(self,site_spec)
1159 test_slice=TestSlice(self,test_site,slice)
1160 if action == "delete":
1161 test_slice.delete_slice()
1162 elif action=="fill":
1163 test_slice.add_nodes()
1164 elif action=="empty":
1165 test_slice.delete_nodes()
1167 test_slice.create_slice()
1170 @slice_mapper__tasks(20,10,15)
1171 def ssh_slice(self): pass
1172 @slice_mapper__tasks(20,19,15)
1173 def ssh_slice_off (self): pass
1176 def ssh_slice_basics(self): pass
1179 def check_vsys_defaults(self): pass
1182 def keys_clear_known_hosts (self): pass
1184 def plcapi_urls (self):
1185 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1187 def speed_up_slices (self):
1188 "tweak nodemanager settings on all nodes using a conf file"
1189 # create the template on the server-side
1190 template="%s.nodemanager"%self.name()
1191 template_file = open (template,"w")
1192 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1193 template_file.close()
1194 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1195 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1196 self.test_ssh.copy_abs(template,remote)
1198 self.apiserver.AddConfFile (self.auth_root(),
1199 {'dest':'/etc/sysconfig/nodemanager',
1200 'source':'PlanetLabConf/nodemanager',
1201 'postinstall_cmd':'service nm restart',})
1204 def debug_nodemanager (self):
1205 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1206 template="%s.nodemanager"%self.name()
1207 template_file = open (template,"w")
1208 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1209 template_file.close()
1210 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1211 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1212 self.test_ssh.copy_abs(template,remote)
1216 def qemu_start (self) : pass
1219 def timestamp_qemu (self) : pass
1221 # when a spec refers to a node possibly on another plc
1222 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1223 for plc in [ self ] + other_plcs:
1225 return plc.locate_sliver_obj (nodename, slicename)
1228 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1230 # implement this one as a cross step so that we can take advantage of different nodes
1231 # in multi-plcs mode
1232 def cross_check_tcp (self, other_plcs):
1233 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1234 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1235 utils.header ("check_tcp: no/empty config found")
1237 specs = self.plc_spec['tcp_specs']
1242 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1243 if not s_test_sliver.run_tcp_server(port,timeout=20):
1247 # idem for the client side
1248 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1249 # use nodename from locatesd sliver, unless 'client_connect' is set
1250 if 'client_connect' in spec:
1251 destination = spec['client_connect']
1253 destination=s_test_sliver.test_node.name()
1254 if not c_test_sliver.run_tcp_client(destination,port):
1258 # painfully enough, we need to allow for some time as netflow might show up last
1259 def check_system_slice (self):
1260 "all nodes: check that a system slice is alive"
1261 # netflow currently not working in the lxc distro
1262 # drl not built at all in the wtx distro
1263 # if we find either of them we're happy
1264 return self.check_netflow() or self.check_drl()
1267 def check_netflow (self): return self._check_system_slice ('netflow')
1268 def check_drl (self): return self._check_system_slice ('drl')
1270 # we have the slices up already here, so it should not take too long
1271 def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
1272 class CompleterTaskSystemSlice (CompleterTask):
1273 def __init__ (self, test_node, dry_run):
1274 self.test_node=test_node
1275 self.dry_run=dry_run
1276 def actual_run (self):
1277 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
1279 return "System slice %s @ %s"%(slicename, self.test_node.name())
1280 def failure_message (self):
1281 return "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
1282 timeout = timedelta(minutes=timeout_minutes)
1283 silent = timedelta (0)
1284 period = timedelta (seconds=period_seconds)
1285 tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
1286 for test_node in self.all_nodes() ]
1287 return Completer (tasks) . run (timeout, silent, period)
1289 def plcsh_stress_test (self):
1290 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1291 # install the stress-test in the plc image
1292 location = "/usr/share/plc_api/plcsh_stress_test.py"
1293 remote="%s/%s"%(self.vm_root_in_host(),location)
1294 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1296 command += " -- --check"
1297 if self.options.size == 1:
1298 command += " --tiny"
1299 return ( self.run_in_guest(command) == 0)
1301 # populate runs the same utility without slightly different options
1302 # in particular runs with --preserve (dont cleanup) and without --check
1303 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1305 def sfa_install_all (self):
1306 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1307 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1309 def sfa_install_core(self):
1311 return self.yum_install ("sfa")
1313 def sfa_install_plc(self):
1314 "yum install sfa-plc"
1315 return self.yum_install("sfa-plc")
1317 def sfa_install_sfatables(self):
1318 "yum install sfa-sfatables"
1319 return self.yum_install ("sfa-sfatables")
1321 # for some very odd reason, this sometimes fails with the following symptom
1322 # # yum install sfa-client
1323 # Setting up Install Process
1325 # Downloading Packages:
1326 # Running rpm_check_debug
1327 # Running Transaction Test
1328 # Transaction Test Succeeded
1329 # Running Transaction
1330 # Transaction couldn't start:
1331 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1332 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1333 # even though in the same context I have
1334 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1335 # Filesystem Size Used Avail Use% Mounted on
1336 # /dev/hdv1 806G 264G 501G 35% /
1337 # none 16M 36K 16M 1% /tmp
1339 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1340 def sfa_install_client(self):
1341 "yum install sfa-client"
1342 first_try=self.yum_install("sfa-client")
1343 if first_try: return True
1344 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1345 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1346 utils.header("rpm_path=<<%s>>"%rpm_path)
1348 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1349 return self.yum_check_installed ("sfa-client")
1351 def sfa_dbclean(self):
1352 "thoroughly wipes off the SFA database"
1353 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1354 self.run_in_guest("sfa-nuke.py")==0 or \
1355 self.run_in_guest("sfa-nuke-plc.py")==0
1357 def sfa_fsclean(self):
1358 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1359 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1362 def sfa_plcclean(self):
1363 "cleans the PLC entries that were created as a side effect of running the script"
1365 sfa_spec=self.plc_spec['sfa']
1367 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1368 login_base=auth_sfa_spec['login_base']
1369 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1370 except: print "Site %s already absent from PLC db"%login_base
1372 for spec_name in ['pi_spec','user_spec']:
1373 user_spec=auth_sfa_spec[spec_name]
1374 username=user_spec['email']
1375 try: self.apiserver.DeletePerson(self.auth_root(),username)
1377 # this in fact is expected as sites delete their members
1378 #print "User %s already absent from PLC db"%username
1381 print "REMEMBER TO RUN sfa_import AGAIN"
1384 def sfa_uninstall(self):
1385 "uses rpm to uninstall sfa - ignore result"
1386 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1387 self.run_in_guest("rm -rf /var/lib/sfa")
1388 self.run_in_guest("rm -rf /etc/sfa")
1389 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1391 self.run_in_guest("rpm -e --noscripts sfa-plc")
1394 ### run unit tests for SFA
1395 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1396 # Running Transaction
1397 # Transaction couldn't start:
1398 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1399 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1400 # no matter how many Gbs are available on the testplc
1401 # could not figure out what's wrong, so...
1402 # if the yum install phase fails, consider the test is successful
1403 # other combinations will eventually run it hopefully
1404 def sfa_utest(self):
1405 "yum install sfa-tests and run SFA unittests"
1406 self.run_in_guest("yum -y install sfa-tests")
1407 # failed to install - forget it
1408 if self.run_in_guest("rpm -q sfa-tests")!=0:
1409 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1411 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1415 dirname="conf.%s"%self.plc_spec['name']
1416 if not os.path.isdir(dirname):
1417 utils.system("mkdir -p %s"%dirname)
1418 if not os.path.isdir(dirname):
1419 raise Exception,"Cannot create config dir for plc %s"%self.name()
1422 def conffile(self,filename):
1423 return "%s/%s"%(self.confdir(),filename)
1424 def confsubdir(self,dirname,clean,dry_run=False):
1425 subdirname="%s/%s"%(self.confdir(),dirname)
1427 utils.system("rm -rf %s"%subdirname)
1428 if not os.path.isdir(subdirname):
1429 utils.system("mkdir -p %s"%subdirname)
1430 if not dry_run and not os.path.isdir(subdirname):
1431 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1434 def conffile_clean (self,filename):
1435 filename=self.conffile(filename)
1436 return utils.system("rm -rf %s"%filename)==0
1439 def sfa_configure(self):
1440 "run sfa-config-tty"
1441 tmpname=self.conffile("sfa-config-tty")
1442 fileconf=open(tmpname,'w')
1443 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1444 'SFA_INTERFACE_HRN',
1445 'SFA_REGISTRY_LEVEL1_AUTH',
1446 'SFA_REGISTRY_HOST',
1447 'SFA_AGGREGATE_HOST',
1457 'SFA_GENERIC_FLAVOUR',
1458 'SFA_AGGREGATE_ENABLED',
1460 if self.plc_spec['sfa'].has_key(var):
1461 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1462 # the way plc_config handles booleans just sucks..
1465 if self.plc_spec['sfa'][var]: val='true'
1466 fileconf.write ('e %s\n%s\n'%(var,val))
1467 fileconf.write('w\n')
1468 fileconf.write('R\n')
1469 fileconf.write('q\n')
1471 utils.system('cat %s'%tmpname)
1472 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1475 def aggregate_xml_line(self):
1476 port=self.plc_spec['sfa']['neighbours-port']
1477 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1478 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1480 def registry_xml_line(self):
1481 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1482 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1485 # a cross step that takes all other plcs in argument
1486 def cross_sfa_configure(self, other_plcs):
1487 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1488 # of course with a single plc, other_plcs is an empty list
1491 agg_fname=self.conffile("agg.xml")
1492 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1493 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1494 utils.header ("(Over)wrote %s"%agg_fname)
1495 reg_fname=self.conffile("reg.xml")
1496 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1497 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1498 utils.header ("(Over)wrote %s"%reg_fname)
1499 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1500 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1502 def sfa_import(self):
1503 "use sfaadmin to import from plc"
1504 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1506 self.run_in_guest('sfaadmin reg import_registry')==0
1507 # not needed anymore
1508 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1510 def sfa_start(self):
1512 return self.run_in_guest('service sfa start')==0
1514 def sfi_configure(self):
1515 "Create /root/sfi on the plc side for sfi client configuration"
1516 if self.options.dry_run:
1517 utils.header("DRY RUN - skipping step")
1519 sfa_spec=self.plc_spec['sfa']
1520 # cannot use auth_sfa_mapper to pass dir_name
1521 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1522 test_slice=TestAuthSfa(self,slice_spec)
1523 dir_basename=os.path.basename(test_slice.sfi_path())
1524 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1525 test_slice.sfi_configure(dir_name)
1526 # push into the remote /root/sfi area
1527 location = test_slice.sfi_path()
1528 remote="%s/%s"%(self.vm_root_in_host(),location)
1529 self.test_ssh.mkdir(remote,abs=True)
1530 # need to strip last level or remote otherwise we get an extra dir level
1531 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1535 def sfi_clean (self):
1536 "clean up /root/sfi on the plc side"
1537 self.run_in_guest("rm -rf /root/sfi")
1541 def sfa_add_site (self): pass
1543 def sfa_add_pi (self): pass
1545 def sfa_add_user(self): pass
1547 def sfa_update_user(self): pass
1549 def sfa_add_slice(self): pass
1551 def sfa_renew_slice(self): pass
1553 def sfa_discover(self): pass
1555 def sfa_create_slice(self): pass
1557 def sfa_check_slice_plc(self): pass
1559 def sfa_update_slice(self): pass
1561 def sfi_list(self): pass
1563 def sfi_show(self): pass
1565 def ssh_slice_sfa(self): pass
1567 def sfa_delete_user(self): pass
1569 def sfa_delete_slice(self): pass
1573 self.run_in_guest('service sfa stop')==0
1576 def populate (self):
1577 "creates random entries in the PLCAPI"
1578 # install the stress-test in the plc image
1579 location = "/usr/share/plc_api/plcsh_stress_test.py"
1580 remote="%s/%s"%(self.vm_root_in_host(),location)
1581 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1583 command += " -- --preserve --short-names"
1584 local = (self.run_in_guest(command) == 0);
1585 # second run with --foreign
1586 command += ' --foreign'
1587 remote = (self.run_in_guest(command) == 0);
1588 return ( local and remote)
1590 def gather_logs (self):
1591 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1592 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1593 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1594 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1595 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1596 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1597 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1599 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1600 self.gather_var_logs ()
1602 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1603 self.gather_pgsql_logs ()
1605 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1606 self.gather_root_sfi ()
1608 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1609 for site_spec in self.plc_spec['sites']:
1610 test_site = TestSite (self,site_spec)
1611 for node_spec in site_spec['nodes']:
1612 test_node=TestNode(self,test_site,node_spec)
1613 test_node.gather_qemu_logs()
1615 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1616 self.gather_nodes_var_logs()
1618 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1619 self.gather_slivers_var_logs()
1622 def gather_slivers_var_logs(self):
1623 for test_sliver in self.all_sliver_objs():
1624 remote = test_sliver.tar_var_logs()
1625 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1626 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1627 utils.system(command)
1630 def gather_var_logs (self):
1631 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1632 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1633 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1634 utils.system(command)
1635 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1636 utils.system(command)
1638 def gather_pgsql_logs (self):
1639 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1640 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1641 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1642 utils.system(command)
1644 def gather_root_sfi (self):
1645 utils.system("mkdir -p logs/sfi.%s"%self.name())
1646 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1647 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1648 utils.system(command)
1650 def gather_nodes_var_logs (self):
1651 for site_spec in self.plc_spec['sites']:
1652 test_site = TestSite (self,site_spec)
1653 for node_spec in site_spec['nodes']:
1654 test_node=TestNode(self,test_site,node_spec)
1655 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1656 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1657 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1658 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1659 utils.system(command)
1662 # returns the filename to use for sql dump/restore, using options.dbname if set
1663 def dbfile (self, database):
1664 # uses options.dbname if it is found
1666 name=self.options.dbname
1667 if not isinstance(name,StringTypes):
1673 return "/root/%s-%s.sql"%(database,name)
1675 def plc_db_dump(self):
1676 'dump the planetlab5 DB in /root in the PLC - filename has time'
1677 dump=self.dbfile("planetab5")
1678 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1679 utils.header('Dumped planetlab5 database in %s'%dump)
1682 def plc_db_restore(self):
1683 'restore the planetlab5 DB - looks broken, but run -n might help'
1684 dump=self.dbfile("planetab5")
1685 ##stop httpd service
1686 self.run_in_guest('service httpd stop')
1687 # xxx - need another wrapper
1688 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1689 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1690 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1691 ##starting httpd service
1692 self.run_in_guest('service httpd start')
1694 utils.header('Database restored from ' + dump)
1696 def standby_1_through_20(self):
1697 """convenience function to wait for a specified number of minutes"""
1700 def standby_1(): pass
1702 def standby_2(): pass
1704 def standby_3(): pass
1706 def standby_4(): pass
1708 def standby_5(): pass
1710 def standby_6(): pass
1712 def standby_7(): pass
1714 def standby_8(): pass
1716 def standby_9(): pass
1718 def standby_10(): pass
1720 def standby_11(): pass
1722 def standby_12(): pass
1724 def standby_13(): pass
1726 def standby_14(): pass
1728 def standby_15(): pass
1730 def standby_16(): pass
1732 def standby_17(): pass
1734 def standby_18(): pass
1736 def standby_19(): pass
1738 def standby_20(): pass