1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
10 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
24 from Completer import Completer, CompleterTask
26 # step methods must take (self) and return a boolean (options is a member of the class)
28 def standby(minutes,dry_run):
29 utils.header('Entering StandBy for %d mn'%minutes)
33 time.sleep(60*minutes)
36 def standby_generic (func):
38 minutes=int(func.__name__.split("_")[1])
39 return standby(minutes,self.options.dry_run)
42 def node_mapper (method):
43 def actual(self,*args, **kwds):
45 node_method = TestNode.__dict__[method.__name__]
46 for test_node in self.all_nodes():
47 if not node_method(test_node, *args, **kwds): overall=False
49 # restore the doc text
50 actual.__doc__=TestNode.__dict__[method.__name__].__doc__
53 def slice_mapper (method):
56 slice_method = TestSlice.__dict__[method.__name__]
57 for slice_spec in self.plc_spec['slices']:
58 site_spec = self.locate_site (slice_spec['sitename'])
59 test_site = TestSite(self,site_spec)
60 test_slice=TestSlice(self,test_site,slice_spec)
61 if not slice_method(test_slice,self.options): overall=False
63 # restore the doc text
64 actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
67 # run a step but return True so that we can go on
68 def ignore_result (method):
70 # ssh_slice_ignore->ssh_slice
71 ref_name=method.__name__.replace('_ignore','').replace('force_','')
72 ref_method=TestPlc.__dict__[ref_name]
73 result=ref_method(self)
74 print "Actual (but ignored) result for %(ref_name)s is %(result)s"%locals()
75 return Ignored (result)
76 wrappee.__doc__="ignored version of " + method.__name__.replace('_ignore','').replace('ignore_','')
79 # a variant that expects the TestSlice method to return a list of CompleterTasks that
80 # are then merged into a single Completer run to avoid wating for all the slices
81 # esp. useful when a test fails of course
82 # because we need to pass arguments we use a class instead..
83 class slice_mapper__tasks (object):
84 # could not get this to work with named arguments
85 def __init__ (self,timeout_minutes,silent_minutes,period_seconds):
86 self.timeout=timedelta(minutes=timeout_minutes)
87 self.silent=timedelta(minutes=silent_minutes)
88 self.period=timedelta(seconds=period_seconds)
89 def __call__ (self, method):
91 # compute augmented method name
92 method_name = method.__name__ + "__tasks"
94 slice_method = TestSlice.__dict__[ method_name ]
97 for slice_spec in self.plc_spec['slices']:
98 site_spec = self.locate_site (slice_spec['sitename'])
99 test_site = TestSite(self,site_spec)
100 test_slice=TestSlice(self,test_site,slice_spec)
101 tasks += slice_method (test_slice, self.options)
102 return Completer (tasks).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
103 # restore the doc text from the TestSlice method even if a bit odd
104 wrappee.__doc__ = slice_method.__doc__
107 def auth_sfa_mapper (method):
110 auth_method = TestAuthSfa.__dict__[method.__name__]
111 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
112 test_auth=TestAuthSfa(self,auth_spec)
113 if not auth_method(test_auth,self.options): overall=False
115 # restore the doc text
116 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
120 def __init__ (self,result):
130 'vs_delete','timestamp_vs','vs_create', SEP,
131 # 'plc_install', 'mod_python', 'plc_configure', 'plc_start', SEP,
132 'plc_install', 'plc_configure', 'plc_start', SEP,
133 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
134 'plcapi_urls','speed_up_slices', SEP,
135 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
136 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
137 # keep this our of the way for now
138 # 'check_vsys_defaults', SEP,
139 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
140 'qemu_kill_mine','qemu_clean_mine', 'qemu_export', 'qemu_start', 'timestamp_qemu', SEP,
141 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
142 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
143 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_renew_slice@1', SEPSFA,
144 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
145 'sfi_list@1', 'sfi_show@1', 'sfa_utest@1', SEPSFA,
146 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
147 # but as the stress test might take a while, we sometimes missed the debug mode..
148 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
149 'ssh_node_boot@1', 'node_bmlogs@1', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts_ignore', SEP,
150 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
151 'cross_check_tcp@1', 'check_system_slice', SEP,
152 # check slices are turned off properly
153 'empty_slices', 'ssh_slice_off', SEP,
154 # check they are properly re-created with the same name
155 'fill_slices', 'ssh_slice_again_ignore', SEP,
156 'gather_logs_force', SEP,
159 'export', 'show_boxes', SEP,
160 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
161 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
162 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
163 'delete_leases', 'list_leases', SEP,
165 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
166 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
167 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
168 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
169 'plc_db_dump' , 'plc_db_restore', SEP,
170 'check_netflow','check_drl', SEP,
171 'debug_nodemanager', SEP,
172 'standby_1_through_20','yes','no',SEP,
176 def printable_steps (list):
177 single_line=" ".join(list)+" "
178 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
180 def valid_step (step):
181 return step != SEP and step != SEPSFA
183 # turn off the sfa-related steps when build has skipped SFA
184 # this was originally for centos5 but is still valid
185 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
187 def check_whether_build_has_sfa (rpms_url):
188 utils.header ("Checking if build provides SFA package...")
189 # warning, we're now building 'sface' so let's be a bit more picky
190 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
191 # full builds are expected to return with 0 here
193 utils.header("build does provide SFA")
195 # move all steps containing 'sfa' from default_steps to other_steps
196 utils.header("SFA package not found - removing steps with sfa or sfi")
197 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
198 TestPlc.other_steps += sfa_steps
199 for step in sfa_steps: TestPlc.default_steps.remove(step)
201 def __init__ (self,plc_spec,options):
202 self.plc_spec=plc_spec
204 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
205 self.vserverip=plc_spec['vserverip']
206 self.vservername=plc_spec['vservername']
207 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
208 self.apiserver=TestApiserver(self.url,options.dry_run)
209 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
210 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
212 def has_addresses_api (self):
213 return self.apiserver.has_method('AddIpAddress')
216 name=self.plc_spec['name']
217 return "%s.%s"%(name,self.vservername)
220 return self.plc_spec['host_box']
223 return self.test_ssh.is_local()
225 # define the API methods on this object through xmlrpc
226 # would help, but not strictly necessary
230 def actual_command_in_guest (self,command):
231 return self.test_ssh.actual_command(self.host_to_guest(command),dry_run=self.options.dry_run)
233 def start_guest (self):
234 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),dry_run=self.options.dry_run))
236 def stop_guest (self):
237 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),dry_run=self.options.dry_run))
239 def run_in_guest (self,command):
240 return utils.system(self.actual_command_in_guest(command))
242 def run_in_host (self,command):
243 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
245 #command gets run in the plc's vm
246 def host_to_guest(self,command):
247 return "virsh -c lxc:/// lxc-enter-namespace %s %s" %(self.vservername,command)
248 # return "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s %s"%(self.vserverip,command)
250 # this /vservers thing is legacy...
251 def vm_root_in_host(self):
252 return "/vservers/%s/"%(self.vservername)
254 def vm_timestamp_path (self):
255 return "/vservers/%s/%s.timestamp"%(self.vservername,self.vservername)
257 #start/stop the vserver
258 def start_guest_in_host(self):
259 return "virsh -c lxc:/// start %s"%(self.vservername)
261 def stop_guest_in_host(self):
262 return "virsh -c lxc:/// destroy %s"%(self.vservername)
265 def run_in_guest_piped (self,local,remote):
266 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
268 def yum_check_installed (self, rpms):
269 if isinstance (rpms, list):
271 return self.run_in_guest("rpm -q %s"%rpms)==0
273 # does a yum install in the vs, ignore yum retcod, check with rpm
274 def yum_install (self, rpms):
275 if isinstance (rpms, list):
277 self.run_in_guest("yum -y install %s"%rpms)
278 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
279 self.run_in_guest("yum-complete-transaction -y")
280 return self.yum_check_installed (rpms)
282 def auth_root (self):
283 return {'Username':self.plc_spec['PLC_ROOT_USER'],
284 'AuthMethod':'password',
285 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
286 'Role' : self.plc_spec['role']
288 def locate_site (self,sitename):
289 for site in self.plc_spec['sites']:
290 if site['site_fields']['name'] == sitename:
292 if site['site_fields']['login_base'] == sitename:
294 raise Exception,"Cannot locate site %s"%sitename
296 def locate_node (self,nodename):
297 for site in self.plc_spec['sites']:
298 for node in site['nodes']:
299 if node['name'] == nodename:
301 raise Exception,"Cannot locate node %s"%nodename
303 def locate_hostname (self,hostname):
304 for site in self.plc_spec['sites']:
305 for node in site['nodes']:
306 if node['node_fields']['hostname'] == hostname:
308 raise Exception,"Cannot locate hostname %s"%hostname
310 def locate_key (self,key_name):
311 for key in self.plc_spec['keys']:
312 if key['key_name'] == key_name:
314 raise Exception,"Cannot locate key %s"%key_name
316 def locate_private_key_from_key_names (self, key_names):
317 # locate the first avail. key
319 for key_name in key_names:
320 key_spec=self.locate_key(key_name)
321 test_key=TestKey(self,key_spec)
322 publickey=test_key.publicpath()
323 privatekey=test_key.privatepath()
324 if os.path.isfile(publickey) and os.path.isfile(privatekey):
326 if found: return privatekey
329 def locate_slice (self, slicename):
330 for slice in self.plc_spec['slices']:
331 if slice['slice_fields']['name'] == slicename:
333 raise Exception,"Cannot locate slice %s"%slicename
335 def all_sliver_objs (self):
337 for slice_spec in self.plc_spec['slices']:
338 slicename = slice_spec['slice_fields']['name']
339 for nodename in slice_spec['nodenames']:
340 result.append(self.locate_sliver_obj (nodename,slicename))
343 def locate_sliver_obj (self,nodename,slicename):
344 (site,node) = self.locate_node(nodename)
345 slice = self.locate_slice (slicename)
347 test_site = TestSite (self, site)
348 test_node = TestNode (self, test_site,node)
349 # xxx the slice site is assumed to be the node site - mhh - probably harmless
350 test_slice = TestSlice (self, test_site, slice)
351 return TestSliver (self, test_node, test_slice)
353 def locate_first_node(self):
354 nodename=self.plc_spec['slices'][0]['nodenames'][0]
355 (site,node) = self.locate_node(nodename)
356 test_site = TestSite (self, site)
357 test_node = TestNode (self, test_site,node)
360 def locate_first_sliver (self):
361 slice_spec=self.plc_spec['slices'][0]
362 slicename=slice_spec['slice_fields']['name']
363 nodename=slice_spec['nodenames'][0]
364 return self.locate_sliver_obj(nodename,slicename)
366 # all different hostboxes used in this plc
367 def get_BoxNodes(self):
368 # maps on sites and nodes, return [ (host_box,test_node) ]
370 for site_spec in self.plc_spec['sites']:
371 test_site = TestSite (self,site_spec)
372 for node_spec in site_spec['nodes']:
373 test_node = TestNode (self, test_site, node_spec)
374 if not test_node.is_real():
375 tuples.append( (test_node.host_box(),test_node) )
376 # transform into a dict { 'host_box' -> [ test_node .. ] }
378 for (box,node) in tuples:
379 if not result.has_key(box):
382 result[box].append(node)
385 # a step for checking this stuff
386 def show_boxes (self):
387 'print summary of nodes location'
388 for (box,nodes) in self.get_BoxNodes().iteritems():
389 print box,":"," + ".join( [ node.name() for node in nodes ] )
392 # make this a valid step
393 def qemu_kill_all(self):
394 'kill all qemu instances on the qemu boxes involved by this setup'
395 # this is the brute force version, kill all qemus on that host box
396 for (box,nodes) in self.get_BoxNodes().iteritems():
397 # pass the first nodename, as we don't push template-qemu on testboxes
398 nodedir=nodes[0].nodedir()
399 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
402 # make this a valid step
403 def qemu_list_all(self):
404 'list all qemu instances on the qemu boxes involved by this setup'
405 for (box,nodes) in self.get_BoxNodes().iteritems():
406 # this is the brute force version, kill all qemus on that host box
407 TestBoxQemu(box,self.options.buildname).qemu_list_all()
410 # kill only the qemus related to this test
411 def qemu_list_mine(self):
412 'list qemu instances for our nodes'
413 for (box,nodes) in self.get_BoxNodes().iteritems():
414 # the fine-grain version
419 # kill only the qemus related to this test
420 def qemu_clean_mine(self):
421 'cleanup (rm -rf) qemu instances for our nodes'
422 for (box,nodes) in self.get_BoxNodes().iteritems():
423 # the fine-grain version
428 # kill only the right qemus
429 def qemu_kill_mine(self):
430 'kill the qemu instances for our nodes'
431 for (box,nodes) in self.get_BoxNodes().iteritems():
432 # the fine-grain version
437 #################### display config
439 "show test configuration after localization"
444 # uggly hack to make sure 'run export' only reports about the 1st plc
445 # to avoid confusion - also we use 'inri_slice1' in various aliases..
448 "print cut'n paste-able stuff to export env variables to your shell"
449 # guess local domain from hostname
450 if TestPlc.exported_id>1:
451 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
453 TestPlc.exported_id+=1
454 domain=socket.gethostname().split('.',1)[1]
455 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
456 print "export BUILD=%s"%self.options.buildname
457 print "export PLCHOSTLXC=%s"%fqdn
458 print "export GUESTNAME=%s"%self.plc_spec['vservername']
459 vplcname=self.plc_spec['vservername'].split('-')[-1]
460 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
461 # find hostname of first node
462 (hostname,qemubox) = self.all_node_infos()[0]
463 print "export KVMHOST=%s.%s"%(qemubox,domain)
464 print "export NODE=%s"%(hostname)
468 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
469 def show_pass (self,passno):
470 for (key,val) in self.plc_spec.iteritems():
471 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
475 self.display_site_spec(site)
476 for node in site['nodes']:
477 self.display_node_spec(node)
478 elif key=='initscripts':
479 for initscript in val:
480 self.display_initscript_spec (initscript)
483 self.display_slice_spec (slice)
486 self.display_key_spec (key)
488 if key not in ['sites','initscripts','slices','keys', 'sfa']:
489 print '+ ',key,':',val
491 def display_site_spec (self,site):
492 print '+ ======== site',site['site_fields']['name']
493 for (k,v) in site.iteritems():
494 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
497 print '+ ','nodes : ',
499 print node['node_fields']['hostname'],'',
505 print user['name'],'',
507 elif k == 'site_fields':
508 print '+ login_base',':',v['login_base']
509 elif k == 'address_fields':
515 def display_initscript_spec (self,initscript):
516 print '+ ======== initscript',initscript['initscript_fields']['name']
518 def display_key_spec (self,key):
519 print '+ ======== key',key['key_name']
521 def display_slice_spec (self,slice):
522 print '+ ======== slice',slice['slice_fields']['name']
523 for (k,v) in slice.iteritems():
536 elif k=='slice_fields':
537 print '+ fields',':',
538 print 'max_nodes=',v['max_nodes'],
543 def display_node_spec (self,node):
544 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
545 print "hostname=",node['node_fields']['hostname'],
546 print "ip=",node['interface_fields']['ip']
547 if self.options.verbose:
548 utils.pprint("node details",node,depth=3)
550 # another entry point for just showing the boxes involved
551 def display_mapping (self):
552 TestPlc.display_mapping_plc(self.plc_spec)
556 def display_mapping_plc (plc_spec):
557 print '+ MyPLC',plc_spec['name']
558 # WARNING this would not be right for lxc-based PLC's - should be harmless though
559 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
560 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
561 for site_spec in plc_spec['sites']:
562 for node_spec in site_spec['nodes']:
563 TestPlc.display_mapping_node(node_spec)
566 def display_mapping_node (node_spec):
567 print '+ NODE %s'%(node_spec['name'])
568 print '+\tqemu box %s'%node_spec['host_box']
569 print '+\thostname=%s'%node_spec['node_fields']['hostname']
571 # write a timestamp in /vservers/<>.timestamp
572 # cannot be inside the vserver, that causes vserver .. build to cough
573 def timestamp_vs (self):
574 "Create a timestamp to remember creation date for this plc"
576 # TODO-lxc check this one
577 # a first approx. is to store the timestamp close to the VM root like vs does
578 stamp_path=self.vm_timestamp_path ()
579 stamp_dir = os.path.dirname (stamp_path)
580 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
581 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
583 # this is called inconditionnally at the beginning of the test sequence
584 # just in case this is a rerun, so if the vm is not running it's fine
586 "vserver delete the test myplc"
587 stamp_path=self.vm_timestamp_path()
588 self.run_in_host("rm -f %s"%stamp_path)
589 self.run_in_host("virsh -c lxc:// destroy %s"%self.vservername)
590 self.run_in_host("virsh -c lxc:// undefine %s"%self.vservername)
591 self.run_in_host("rm -fr /vservers/%s"%self.vservername)
595 # historically the build was being fetched by the tests
596 # now the build pushes itself as a subdir of the tests workdir
597 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
598 def vs_create (self):
599 "vserver creation (no install done)"
600 # push the local build/ dir to the testplc box
602 # a full path for the local calls
603 build_dir=os.path.dirname(sys.argv[0])
604 # sometimes this is empty - set to "." in such a case
605 if not build_dir: build_dir="."
606 build_dir += "/build"
608 # use a standard name - will be relative to remote buildname
610 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
611 self.test_ssh.rmdir(build_dir)
612 self.test_ssh.copy(build_dir,recursive=True)
613 # the repo url is taken from arch-rpms-url
614 # with the last step (i386) removed
615 repo_url = self.options.arch_rpms_url
616 for level in [ 'arch' ]:
617 repo_url = os.path.dirname(repo_url)
619 # invoke initvm (drop support for vs)
620 script="lbuild-initvm.sh"
622 # pass the vbuild-nightly options to [lv]test-initvm
623 script_options += " -p %s"%self.options.personality
624 script_options += " -d %s"%self.options.pldistro
625 script_options += " -f %s"%self.options.fcdistro
626 script_options += " -r %s"%repo_url
627 vserver_name = self.vservername
629 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
630 script_options += " -n %s"%vserver_hostname
632 print "Cannot reverse lookup %s"%self.vserverip
633 print "This is considered fatal, as this might pollute the test results"
635 create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s"%locals()
636 return self.run_in_host(create_vserver) == 0
639 def plc_install(self):
640 "yum install myplc, noderepo, and the plain bootstrapfs"
642 # workaround for getting pgsql8.2 on centos5
643 if self.options.fcdistro == "centos5":
644 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
647 if self.options.personality == "linux32":
649 elif self.options.personality == "linux64":
652 raise Exception, "Unsupported personality %r"%self.options.personality
653 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
656 pkgs_list.append ("slicerepo-%s"%nodefamily)
657 pkgs_list.append ("myplc")
658 pkgs_list.append ("noderepo-%s"%nodefamily)
659 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
660 pkgs_string=" ".join(pkgs_list)
661 return self.yum_install (pkgs_list)
664 def mod_python(self):
665 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
666 return self.yum_install ( [ 'mod_python' ] )
669 def plc_configure(self):
671 tmpname='%s.plc-config-tty'%(self.name())
672 fileconf=open(tmpname,'w')
673 for var in [ 'PLC_NAME',
678 'PLC_MAIL_SUPPORT_ADDRESS',
681 # Above line was added for integrating SFA Testing
687 'PLC_RESERVATION_GRANULARITY',
689 'PLC_OMF_XMPP_SERVER',
692 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
693 fileconf.write('w\n')
694 fileconf.write('q\n')
696 utils.system('cat %s'%tmpname)
697 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
698 utils.system('rm %s'%tmpname)
701 # f14 is a bit odd in this respect, although this worked fine in guests up to f18
702 # however using a vplc guest under f20 requires this trick
703 # the symptom is this: service plc start
704 # Starting plc (via systemctl): Failed to get D-Bus connection: \
705 # Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
706 # weird thing is the doc says f14 uses upstart by default and not systemd
707 # so this sounds kind of harmless
709 "service plc start (use a special trick to set SYSTEMCTL_SKIP_REDIRECT on f14)"
710 if self.options.fcdistro != 'f14':
711 self.run_in_guest ("service plc start")
713 # patch /sbin/service so it does not reset environment
714 # this is because our own scripts in turn call service
715 self.run_in_guest ("sed -i -e 's,env -i ,,' /sbin/service")
716 self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true /etc/init.d/plc start")
717 # retcod of service is not meaningful
721 "service plc stop (use a special trick to set SYSTEMCTL_SKIP_REDIRECT on f14)"
722 if self.options.fcdistro != 'f14':
723 self.run_in_guest ("service plc stop")
725 # patch /sbin/service so it does not reset environment
726 # this is because our own scripts in turn call service
727 self.run_in_guest ("sed -i -e 's,env -i ,,' /sbin/service")
728 self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true /etc/init.d/plc stop")
729 # retcod of service is not meaningful
733 "start the PLC vserver"
738 "stop the PLC vserver"
742 # stores the keys from the config for further use
743 def keys_store(self):
744 "stores test users ssh keys in keys/"
745 for key_spec in self.plc_spec['keys']:
746 TestKey(self,key_spec).store_key()
749 def keys_clean(self):
750 "removes keys cached in keys/"
751 utils.system("rm -rf ./keys")
754 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
755 # for later direct access to the nodes
756 def keys_fetch(self):
757 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
759 if not os.path.isdir(dir):
761 vservername=self.vservername
762 vm_root=self.vm_root_in_host()
764 prefix = 'debug_ssh_key'
765 for ext in [ 'pub', 'rsa' ] :
766 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
767 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
768 if self.test_ssh.fetch(src,dst) != 0: overall=False
772 "create sites with PLCAPI"
773 return self.do_sites()
775 def delete_sites (self):
776 "delete sites with PLCAPI"
777 return self.do_sites(action="delete")
779 def do_sites (self,action="add"):
780 for site_spec in self.plc_spec['sites']:
781 test_site = TestSite (self,site_spec)
782 if (action != "add"):
783 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
784 test_site.delete_site()
785 # deleted with the site
786 #test_site.delete_users()
789 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
790 test_site.create_site()
791 test_site.create_users()
794 def delete_all_sites (self):
795 "Delete all sites in PLC, and related objects"
796 print 'auth_root',self.auth_root()
797 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
799 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
800 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
801 site_id=site['site_id']
802 print 'Deleting site_id',site_id
803 self.apiserver.DeleteSite(self.auth_root(),site_id)
807 "create nodes with PLCAPI"
808 return self.do_nodes()
809 def delete_nodes (self):
810 "delete nodes with PLCAPI"
811 return self.do_nodes(action="delete")
813 def do_nodes (self,action="add"):
814 for site_spec in self.plc_spec['sites']:
815 test_site = TestSite (self,site_spec)
817 utils.header("Deleting nodes in site %s"%test_site.name())
818 for node_spec in site_spec['nodes']:
819 test_node=TestNode(self,test_site,node_spec)
820 utils.header("Deleting %s"%test_node.name())
821 test_node.delete_node()
823 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
824 for node_spec in site_spec['nodes']:
825 utils.pprint('Creating node %s'%node_spec,node_spec)
826 test_node = TestNode (self,test_site,node_spec)
827 test_node.create_node ()
830 def nodegroups (self):
831 "create nodegroups with PLCAPI"
832 return self.do_nodegroups("add")
833 def delete_nodegroups (self):
834 "delete nodegroups with PLCAPI"
835 return self.do_nodegroups("delete")
839 def translate_timestamp (start,grain,timestamp):
840 if timestamp < TestPlc.YEAR: return start+timestamp*grain
841 else: return timestamp
844 def timestamp_printable (timestamp):
845 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
848 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
850 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
851 print 'API answered grain=',grain
852 start=(now/grain)*grain
854 # find out all nodes that are reservable
855 nodes=self.all_reservable_nodenames()
857 utils.header ("No reservable node found - proceeding without leases")
860 # attach them to the leases as specified in plc_specs
861 # this is where the 'leases' field gets interpreted as relative of absolute
862 for lease_spec in self.plc_spec['leases']:
863 # skip the ones that come with a null slice id
864 if not lease_spec['slice']: continue
865 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
866 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
867 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
868 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
869 if lease_addition['errors']:
870 utils.header("Cannot create leases, %s"%lease_addition['errors'])
873 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
874 (nodes,lease_spec['slice'],
875 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
876 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
880 def delete_leases (self):
881 "remove all leases in the myplc side"
882 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
883 utils.header("Cleaning leases %r"%lease_ids)
884 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
887 def list_leases (self):
888 "list all leases known to the myplc"
889 leases = self.apiserver.GetLeases(self.auth_root())
892 current=l['t_until']>=now
893 if self.options.verbose or current:
894 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
895 TestPlc.timestamp_printable(l['t_from']),
896 TestPlc.timestamp_printable(l['t_until'])))
899 # create nodegroups if needed, and populate
900 def do_nodegroups (self, action="add"):
901 # 1st pass to scan contents
903 for site_spec in self.plc_spec['sites']:
904 test_site = TestSite (self,site_spec)
905 for node_spec in site_spec['nodes']:
906 test_node=TestNode (self,test_site,node_spec)
907 if node_spec.has_key('nodegroups'):
908 nodegroupnames=node_spec['nodegroups']
909 if isinstance(nodegroupnames,StringTypes):
910 nodegroupnames = [ nodegroupnames ]
911 for nodegroupname in nodegroupnames:
912 if not groups_dict.has_key(nodegroupname):
913 groups_dict[nodegroupname]=[]
914 groups_dict[nodegroupname].append(test_node.name())
915 auth=self.auth_root()
917 for (nodegroupname,group_nodes) in groups_dict.iteritems():
919 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
920 # first, check if the nodetagtype is here
921 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
923 tag_type_id = tag_types[0]['tag_type_id']
925 tag_type_id = self.apiserver.AddTagType(auth,
926 {'tagname':nodegroupname,
927 'description': 'for nodegroup %s'%nodegroupname,
929 print 'located tag (type)',nodegroupname,'as',tag_type_id
931 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
933 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
934 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
935 # set node tag on all nodes, value='yes'
936 for nodename in group_nodes:
938 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
940 traceback.print_exc()
941 print 'node',nodename,'seems to already have tag',nodegroupname
944 expect_yes = self.apiserver.GetNodeTags(auth,
945 {'hostname':nodename,
946 'tagname':nodegroupname},
947 ['value'])[0]['value']
948 if expect_yes != "yes":
949 print 'Mismatch node tag on node',nodename,'got',expect_yes
952 if not self.options.dry_run:
953 print 'Cannot find tag',nodegroupname,'on node',nodename
957 print 'cleaning nodegroup',nodegroupname
958 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
960 traceback.print_exc()
964 # a list of TestNode objs
965 def all_nodes (self):
967 for site_spec in self.plc_spec['sites']:
968 test_site = TestSite (self,site_spec)
969 for node_spec in site_spec['nodes']:
970 nodes.append(TestNode (self,test_site,node_spec))
973 # return a list of tuples (nodename,qemuname)
974 def all_node_infos (self) :
976 for site_spec in self.plc_spec['sites']:
977 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
978 for node_spec in site_spec['nodes'] ]
981 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
982 def all_reservable_nodenames (self):
984 for site_spec in self.plc_spec['sites']:
985 for node_spec in site_spec['nodes']:
986 node_fields=node_spec['node_fields']
987 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
988 res.append(node_fields['hostname'])
991 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
992 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
993 if self.options.dry_run:
997 class CompleterTaskBootState (CompleterTask):
998 def __init__ (self, test_plc,hostname):
999 self.test_plc=test_plc
1000 self.hostname=hostname
1001 self.last_boot_state='undef'
1002 def actual_run (self):
1004 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
1006 self.last_boot_state = node['boot_state']
1007 return self.last_boot_state == target_boot_state
1011 return "CompleterTaskBootState with node %s"%self.hostname
1012 def failure_message (self):
1013 return "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
1015 timeout = timedelta(minutes=timeout_minutes)
1016 graceout = timedelta(minutes=silent_minutes)
1017 period = timedelta(seconds=period_seconds)
1018 # the nodes that haven't checked yet - start with a full list and shrink over time
1019 utils.header("checking nodes boot state (expected %s)"%target_boot_state)
1020 tasks = [ CompleterTaskBootState (self,hostname) \
1021 for (hostname,_) in self.all_node_infos() ]
1022 return Completer (tasks).run (timeout, graceout, period)
1024 def nodes_booted(self):
1025 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
1027 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
1028 class CompleterTaskNodeSsh (CompleterTask):
1029 def __init__ (self, hostname, qemuname, boot_state, local_key):
1030 self.hostname=hostname
1031 self.qemuname=qemuname
1032 self.boot_state=boot_state
1033 self.local_key=local_key
1034 def run (self, silent):
1035 command = TestSsh (self.hostname,key=self.local_key).actual_command("hostname;uname -a")
1036 return utils.system (command, silent=silent)==0
1037 def failure_message (self):
1038 return "Cannot reach %s @ %s in %s mode"%(self.hostname, self.qemuname, self.boot_state)
1041 timeout = timedelta(minutes=timeout_minutes)
1042 graceout = timedelta(minutes=silent_minutes)
1043 period = timedelta(seconds=period_seconds)
1044 vservername=self.vservername
1047 local_key = "keys/%(vservername)s-debug.rsa"%locals()
1050 local_key = "keys/key_admin.rsa"
1051 utils.header("checking ssh access to nodes (expected in %s mode)"%message)
1052 node_infos = self.all_node_infos()
1053 tasks = [ CompleterTaskNodeSsh (nodename, qemuname, message, local_key) \
1054 for (nodename,qemuname) in node_infos ]
1055 return Completer (tasks).run (timeout, graceout, period)
1057 def ssh_node_debug(self):
1058 "Tries to ssh into nodes in debug mode with the debug ssh key"
1059 return self.check_nodes_ssh(debug=True,
1060 timeout_minutes=self.ssh_node_debug_timeout,
1061 silent_minutes=self.ssh_node_debug_silent)
1063 def ssh_node_boot(self):
1064 "Tries to ssh into nodes in production mode with the root ssh key"
1065 return self.check_nodes_ssh(debug=False,
1066 timeout_minutes=self.ssh_node_boot_timeout,
1067 silent_minutes=self.ssh_node_boot_silent)
1069 def node_bmlogs(self):
1070 "Checks that there's a non-empty dir. /var/log/bm/raw"
1071 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
1074 def qemu_local_init (self): pass
1076 def bootcd (self): pass
1078 def qemu_local_config (self): pass
1080 def nodestate_reinstall (self): pass
1082 def nodestate_safeboot (self): pass
1084 def nodestate_boot (self): pass
1086 def nodestate_show (self): pass
1088 def qemu_export (self): pass
1090 ### check hooks : invoke scripts from hooks/{node,slice}
1091 def check_hooks_node (self):
1092 return self.locate_first_node().check_hooks()
1093 def check_hooks_sliver (self) :
1094 return self.locate_first_sliver().check_hooks()
1096 def check_hooks (self):
1097 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1098 return self.check_hooks_node() and self.check_hooks_sliver()
1101 def do_check_initscripts(self):
1102 class CompleterTaskInitscript (CompleterTask):
1103 def __init__ (self, test_sliver, stamp):
1104 self.test_sliver=test_sliver
1106 def actual_run (self):
1107 return self.test_sliver.check_initscript_stamp (self.stamp)
1109 return "initscript checker for %s"%self.test_sliver.name()
1110 def failure_message (self):
1111 return "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
1114 for slice_spec in self.plc_spec['slices']:
1115 if not slice_spec.has_key('initscriptstamp'):
1117 stamp=slice_spec['initscriptstamp']
1118 slicename=slice_spec['slice_fields']['name']
1119 for nodename in slice_spec['nodenames']:
1120 print 'nodename',nodename,'slicename',slicename,'stamp',stamp
1121 (site,node) = self.locate_node (nodename)
1122 # xxx - passing the wrong site - probably harmless
1123 test_site = TestSite (self,site)
1124 test_slice = TestSlice (self,test_site,slice_spec)
1125 test_node = TestNode (self,test_site,node)
1126 test_sliver = TestSliver (self, test_node, test_slice)
1127 tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
1128 return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1130 def check_initscripts(self):
1131 "check that the initscripts have triggered"
1132 return self.do_check_initscripts()
1134 def initscripts (self):
1135 "create initscripts with PLCAPI"
1136 for initscript in self.plc_spec['initscripts']:
1137 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1138 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1141 def delete_initscripts (self):
1142 "delete initscripts with PLCAPI"
1143 for initscript in self.plc_spec['initscripts']:
1144 initscript_name = initscript['initscript_fields']['name']
1145 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1147 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1148 print initscript_name,'deleted'
1150 print 'deletion went wrong - probably did not exist'
1155 "create slices with PLCAPI"
1156 return self.do_slices(action="add")
1158 def delete_slices (self):
1159 "delete slices with PLCAPI"
1160 return self.do_slices(action="delete")
1162 def fill_slices (self):
1163 "add nodes in slices with PLCAPI"
1164 return self.do_slices(action="fill")
1166 def empty_slices (self):
1167 "remove nodes from slices with PLCAPI"
1168 return self.do_slices(action="empty")
1170 def do_slices (self, action="add"):
1171 for slice in self.plc_spec['slices']:
1172 site_spec = self.locate_site (slice['sitename'])
1173 test_site = TestSite(self,site_spec)
1174 test_slice=TestSlice(self,test_site,slice)
1175 if action == "delete":
1176 test_slice.delete_slice()
1177 elif action=="fill":
1178 test_slice.add_nodes()
1179 elif action=="empty":
1180 test_slice.delete_nodes()
1182 test_slice.create_slice()
1185 @slice_mapper__tasks(20,10,15)
1186 def ssh_slice(self): pass
1187 @slice_mapper__tasks(20,19,15)
1188 def ssh_slice_off (self): pass
1190 # use another name so we can exclude/ignore it from the tests on the nightly command line
1191 def ssh_slice_again(self): return self.ssh_slice()
1192 # note that simply doing ssh_slice_again=ssh_slice would kind od work too
1193 # but for some reason the ignore-wrapping thing would not
1196 def ssh_slice_basics(self): pass
1199 def check_vsys_defaults(self): pass
1202 def keys_clear_known_hosts (self): pass
1204 def plcapi_urls (self):
1205 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1207 def speed_up_slices (self):
1208 "tweak nodemanager settings on all nodes using a conf file"
1209 # create the template on the server-side
1210 template="%s.nodemanager"%self.name()
1211 template_file = open (template,"w")
1212 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1213 template_file.close()
1214 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1215 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1216 self.test_ssh.copy_abs(template,remote)
1218 self.apiserver.AddConfFile (self.auth_root(),
1219 {'dest':'/etc/sysconfig/nodemanager',
1220 'source':'PlanetLabConf/nodemanager',
1221 'postinstall_cmd':'service nm restart',})
1224 def debug_nodemanager (self):
1225 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1226 template="%s.nodemanager"%self.name()
1227 template_file = open (template,"w")
1228 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1229 template_file.close()
1230 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1231 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1232 self.test_ssh.copy_abs(template,remote)
1236 def qemu_start (self) : pass
1239 def timestamp_qemu (self) : pass
1241 # when a spec refers to a node possibly on another plc
1242 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1243 for plc in [ self ] + other_plcs:
1245 return plc.locate_sliver_obj (nodename, slicename)
1248 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1250 # implement this one as a cross step so that we can take advantage of different nodes
1251 # in multi-plcs mode
1252 def cross_check_tcp (self, other_plcs):
1253 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1254 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1255 utils.header ("check_tcp: no/empty config found")
1257 specs = self.plc_spec['tcp_specs']
1262 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1263 if not s_test_sliver.run_tcp_server(port,timeout=20):
1267 # idem for the client side
1268 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1269 # use nodename from locatesd sliver, unless 'client_connect' is set
1270 if 'client_connect' in spec:
1271 destination = spec['client_connect']
1273 destination=s_test_sliver.test_node.name()
1274 if not c_test_sliver.run_tcp_client(destination,port):
1278 # painfully enough, we need to allow for some time as netflow might show up last
1279 def check_system_slice (self):
1280 "all nodes: check that a system slice is alive"
1281 # netflow currently not working in the lxc distro
1282 # drl not built at all in the wtx distro
1283 # if we find either of them we're happy
1284 return self.check_netflow() or self.check_drl()
1287 def check_netflow (self): return self._check_system_slice ('netflow')
1288 def check_drl (self): return self._check_system_slice ('drl')
1290 # we have the slices up already here, so it should not take too long
1291 def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
1292 class CompleterTaskSystemSlice (CompleterTask):
1293 def __init__ (self, test_node, dry_run):
1294 self.test_node=test_node
1295 self.dry_run=dry_run
1296 def actual_run (self):
1297 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
1299 return "System slice %s @ %s"%(slicename, self.test_node.name())
1300 def failure_message (self):
1301 return "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
1302 timeout = timedelta(minutes=timeout_minutes)
1303 silent = timedelta (0)
1304 period = timedelta (seconds=period_seconds)
1305 tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
1306 for test_node in self.all_nodes() ]
1307 return Completer (tasks) . run (timeout, silent, period)
1309 def plcsh_stress_test (self):
1310 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1311 # install the stress-test in the plc image
1312 location = "/usr/share/plc_api/plcsh_stress_test.py"
1313 remote="%s/%s"%(self.vm_root_in_host(),location)
1314 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1316 command += " -- --check"
1317 if self.options.size == 1:
1318 command += " --tiny"
1319 return ( self.run_in_guest(command) == 0)
1321 # populate runs the same utility without slightly different options
1322 # in particular runs with --preserve (dont cleanup) and without --check
1323 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1325 def sfa_install_all (self):
1326 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1327 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1329 def sfa_install_core(self):
1331 return self.yum_install ("sfa")
1333 def sfa_install_plc(self):
1334 "yum install sfa-plc"
1335 return self.yum_install("sfa-plc")
1337 def sfa_install_sfatables(self):
1338 "yum install sfa-sfatables"
1339 return self.yum_install ("sfa-sfatables")
1341 # for some very odd reason, this sometimes fails with the following symptom
1342 # # yum install sfa-client
1343 # Setting up Install Process
1345 # Downloading Packages:
1346 # Running rpm_check_debug
1347 # Running Transaction Test
1348 # Transaction Test Succeeded
1349 # Running Transaction
1350 # Transaction couldn't start:
1351 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1352 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1353 # even though in the same context I have
1354 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1355 # Filesystem Size Used Avail Use% Mounted on
1356 # /dev/hdv1 806G 264G 501G 35% /
1357 # none 16M 36K 16M 1% /tmp
1359 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1360 def sfa_install_client(self):
1361 "yum install sfa-client"
1362 first_try=self.yum_install("sfa-client")
1363 if first_try: return True
1364 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1365 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1366 utils.header("rpm_path=<<%s>>"%rpm_path)
1368 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1369 return self.yum_check_installed ("sfa-client")
1371 def sfa_dbclean(self):
1372 "thoroughly wipes off the SFA database"
1373 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1374 self.run_in_guest("sfa-nuke.py")==0 or \
1375 self.run_in_guest("sfa-nuke-plc.py")==0
1377 def sfa_fsclean(self):
1378 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1379 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1382 def sfa_plcclean(self):
1383 "cleans the PLC entries that were created as a side effect of running the script"
1385 sfa_spec=self.plc_spec['sfa']
1387 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1388 login_base=auth_sfa_spec['login_base']
1389 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1390 except: print "Site %s already absent from PLC db"%login_base
1392 for spec_name in ['pi_spec','user_spec']:
1393 user_spec=auth_sfa_spec[spec_name]
1394 username=user_spec['email']
1395 try: self.apiserver.DeletePerson(self.auth_root(),username)
1397 # this in fact is expected as sites delete their members
1398 #print "User %s already absent from PLC db"%username
1401 print "REMEMBER TO RUN sfa_import AGAIN"
1404 def sfa_uninstall(self):
1405 "uses rpm to uninstall sfa - ignore result"
1406 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1407 self.run_in_guest("rm -rf /var/lib/sfa")
1408 self.run_in_guest("rm -rf /etc/sfa")
1409 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1411 self.run_in_guest("rpm -e --noscripts sfa-plc")
1414 ### run unit tests for SFA
1415 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1416 # Running Transaction
1417 # Transaction couldn't start:
1418 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1419 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1420 # no matter how many Gbs are available on the testplc
1421 # could not figure out what's wrong, so...
1422 # if the yum install phase fails, consider the test is successful
1423 # other combinations will eventually run it hopefully
1424 def sfa_utest(self):
1425 "yum install sfa-tests and run SFA unittests"
1426 self.run_in_guest("yum -y install sfa-tests")
1427 # failed to install - forget it
1428 if self.run_in_guest("rpm -q sfa-tests")!=0:
1429 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1431 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1435 dirname="conf.%s"%self.plc_spec['name']
1436 if not os.path.isdir(dirname):
1437 utils.system("mkdir -p %s"%dirname)
1438 if not os.path.isdir(dirname):
1439 raise Exception,"Cannot create config dir for plc %s"%self.name()
1442 def conffile(self,filename):
1443 return "%s/%s"%(self.confdir(),filename)
1444 def confsubdir(self,dirname,clean,dry_run=False):
1445 subdirname="%s/%s"%(self.confdir(),dirname)
1447 utils.system("rm -rf %s"%subdirname)
1448 if not os.path.isdir(subdirname):
1449 utils.system("mkdir -p %s"%subdirname)
1450 if not dry_run and not os.path.isdir(subdirname):
1451 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1454 def conffile_clean (self,filename):
1455 filename=self.conffile(filename)
1456 return utils.system("rm -rf %s"%filename)==0
1459 def sfa_configure(self):
1460 "run sfa-config-tty"
1461 tmpname=self.conffile("sfa-config-tty")
1462 fileconf=open(tmpname,'w')
1463 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1464 'SFA_INTERFACE_HRN',
1465 'SFA_REGISTRY_LEVEL1_AUTH',
1466 'SFA_REGISTRY_HOST',
1467 'SFA_AGGREGATE_HOST',
1477 'SFA_GENERIC_FLAVOUR',
1478 'SFA_AGGREGATE_ENABLED',
1480 if self.plc_spec['sfa'].has_key(var):
1481 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1482 # the way plc_config handles booleans just sucks..
1485 if self.plc_spec['sfa'][var]: val='true'
1486 fileconf.write ('e %s\n%s\n'%(var,val))
1487 fileconf.write('w\n')
1488 fileconf.write('R\n')
1489 fileconf.write('q\n')
1491 utils.system('cat %s'%tmpname)
1492 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1495 def aggregate_xml_line(self):
1496 port=self.plc_spec['sfa']['neighbours-port']
1497 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1498 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1500 def registry_xml_line(self):
1501 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1502 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1505 # a cross step that takes all other plcs in argument
1506 def cross_sfa_configure(self, other_plcs):
1507 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1508 # of course with a single plc, other_plcs is an empty list
1511 agg_fname=self.conffile("agg.xml")
1512 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1513 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1514 utils.header ("(Over)wrote %s"%agg_fname)
1515 reg_fname=self.conffile("reg.xml")
1516 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1517 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1518 utils.header ("(Over)wrote %s"%reg_fname)
1519 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1520 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1522 def sfa_import(self):
1523 "use sfaadmin to import from plc"
1524 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1526 self.run_in_guest('sfaadmin reg import_registry')==0
1527 # not needed anymore
1528 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1530 def sfa_start(self):
1532 return self.run_in_guest('service sfa start')==0
1534 def sfi_configure(self):
1535 "Create /root/sfi on the plc side for sfi client configuration"
1536 if self.options.dry_run:
1537 utils.header("DRY RUN - skipping step")
1539 sfa_spec=self.plc_spec['sfa']
1540 # cannot use auth_sfa_mapper to pass dir_name
1541 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1542 test_slice=TestAuthSfa(self,slice_spec)
1543 dir_basename=os.path.basename(test_slice.sfi_path())
1544 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1545 test_slice.sfi_configure(dir_name)
1546 # push into the remote /root/sfi area
1547 location = test_slice.sfi_path()
1548 remote="%s/%s"%(self.vm_root_in_host(),location)
1549 self.test_ssh.mkdir(remote,abs=True)
1550 # need to strip last level or remote otherwise we get an extra dir level
1551 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1555 def sfi_clean (self):
1556 "clean up /root/sfi on the plc side"
1557 self.run_in_guest("rm -rf /root/sfi")
1561 def sfa_add_site (self): pass
1563 def sfa_add_pi (self): pass
1565 def sfa_add_user(self): pass
1567 def sfa_update_user(self): pass
1569 def sfa_add_slice(self): pass
1571 def sfa_renew_slice(self): pass
1573 def sfa_discover(self): pass
1575 def sfa_create_slice(self): pass
1577 def sfa_check_slice_plc(self): pass
1579 def sfa_update_slice(self): pass
1581 def sfi_list(self): pass
1583 def sfi_show(self): pass
1585 def ssh_slice_sfa(self): pass
1587 def sfa_delete_user(self): pass
1589 def sfa_delete_slice(self): pass
1593 self.run_in_guest('service sfa stop')==0
1596 def populate (self):
1597 "creates random entries in the PLCAPI"
1598 # install the stress-test in the plc image
1599 location = "/usr/share/plc_api/plcsh_stress_test.py"
1600 remote="%s/%s"%(self.vm_root_in_host(),location)
1601 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1603 command += " -- --preserve --short-names"
1604 local = (self.run_in_guest(command) == 0);
1605 # second run with --foreign
1606 command += ' --foreign'
1607 remote = (self.run_in_guest(command) == 0);
1608 return ( local and remote)
1610 def gather_logs (self):
1611 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1612 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1613 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1614 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1615 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1616 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1617 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1619 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1620 self.gather_var_logs ()
1622 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1623 self.gather_pgsql_logs ()
1625 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1626 self.gather_root_sfi ()
1628 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1629 for site_spec in self.plc_spec['sites']:
1630 test_site = TestSite (self,site_spec)
1631 for node_spec in site_spec['nodes']:
1632 test_node=TestNode(self,test_site,node_spec)
1633 test_node.gather_qemu_logs()
1635 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1636 self.gather_nodes_var_logs()
1638 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1639 self.gather_slivers_var_logs()
1642 def gather_slivers_var_logs(self):
1643 for test_sliver in self.all_sliver_objs():
1644 remote = test_sliver.tar_var_logs()
1645 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1646 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1647 utils.system(command)
1650 def gather_var_logs (self):
1651 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1652 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1653 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1654 utils.system(command)
1655 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1656 utils.system(command)
1658 def gather_pgsql_logs (self):
1659 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1660 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1661 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1662 utils.system(command)
1664 def gather_root_sfi (self):
1665 utils.system("mkdir -p logs/sfi.%s"%self.name())
1666 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1667 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1668 utils.system(command)
1670 def gather_nodes_var_logs (self):
1671 for site_spec in self.plc_spec['sites']:
1672 test_site = TestSite (self,site_spec)
1673 for node_spec in site_spec['nodes']:
1674 test_node=TestNode(self,test_site,node_spec)
1675 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1676 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1677 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1678 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1679 utils.system(command)
1682 # returns the filename to use for sql dump/restore, using options.dbname if set
1683 def dbfile (self, database):
1684 # uses options.dbname if it is found
1686 name=self.options.dbname
1687 if not isinstance(name,StringTypes):
1693 return "/root/%s-%s.sql"%(database,name)
1695 def plc_db_dump(self):
1696 'dump the planetlab5 DB in /root in the PLC - filename has time'
1697 dump=self.dbfile("planetab5")
1698 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1699 utils.header('Dumped planetlab5 database in %s'%dump)
1702 def plc_db_restore(self):
1703 'restore the planetlab5 DB - looks broken, but run -n might help'
1704 dump=self.dbfile("planetab5")
1705 ##stop httpd service
1706 self.run_in_guest('service httpd stop')
1707 # xxx - need another wrapper
1708 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1709 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1710 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1711 ##starting httpd service
1712 self.run_in_guest('service httpd start')
1714 utils.header('Database restored from ' + dump)
1717 def create_ignore_steps ():
1718 for step in TestPlc.default_steps + TestPlc.other_steps:
1719 # default step can have a plc qualifier
1720 if '@' in step: (step,qualifier)=step.split('@')
1721 # or be defined as forced or ignored by default
1722 for keyword in ['_ignore','_force']:
1723 if step.endswith (keyword): step=step.replace(keyword,'')
1724 if step == SEP or step == SEPSFA : continue
1725 method=getattr(TestPlc,step)
1727 wrapped=ignore_result(method)
1728 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1729 setattr(TestPlc, name, wrapped)
1732 # def ssh_slice_again_ignore (self): pass
1734 # def check_initscripts_ignore (self): pass
1736 def standby_1_through_20(self):
1737 """convenience function to wait for a specified number of minutes"""
1740 def standby_1(): pass
1742 def standby_2(): pass
1744 def standby_3(): pass
1746 def standby_4(): pass
1748 def standby_5(): pass
1750 def standby_6(): pass
1752 def standby_7(): pass
1754 def standby_8(): pass
1756 def standby_9(): pass
1758 def standby_10(): pass
1760 def standby_11(): pass
1762 def standby_12(): pass
1764 def standby_13(): pass
1766 def standby_14(): pass
1768 def standby_15(): pass
1770 def standby_16(): pass
1772 def standby_17(): pass
1774 def standby_18(): pass
1776 def standby_19(): pass
1778 def standby_20(): pass
1780 # convenience for debugging the test logic
1781 def yes (self): return True
1782 def no (self): return False