1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
10 from types import StringTypes
13 from Completer import Completer, CompleterTask
14 from TestSite import TestSite
15 from TestNode import TestNode, CompleterTaskNodeSsh
16 from TestUser import TestUser
17 from TestKey import TestKey
18 from TestSlice import TestSlice
19 from TestSliver import TestSliver
20 from TestBoxQemu import TestBoxQemu
21 from TestSsh import TestSsh
22 from TestApiserver import TestApiserver
23 from TestAuthSfa import TestAuthSfa
24 from PlcapiUrlScanner import PlcapiUrlScanner
26 has_sfa_cache_filename="sfa-cache"
28 # step methods must take (self) and return a boolean (options is a member of the class)
30 def standby(minutes,dry_run):
31 utils.header('Entering StandBy for %d mn'%minutes)
35 time.sleep(60*minutes)
38 def standby_generic (func):
40 minutes=int(func.__name__.split("_")[1])
41 return standby(minutes,self.options.dry_run)
44 def node_mapper (method):
45 def map_on_nodes(self,*args, **kwds):
47 node_method = TestNode.__dict__[method.__name__]
48 for test_node in self.all_nodes():
49 if not node_method(test_node, *args, **kwds): overall=False
51 # maintain __name__ for ignore_result
52 map_on_nodes.__name__=method.__name__
53 # restore the doc text
54 map_on_nodes.__doc__=TestNode.__dict__[method.__name__].__doc__
57 def slice_mapper (method):
58 def map_on_slices(self):
60 slice_method = TestSlice.__dict__[method.__name__]
61 for slice_spec in self.plc_spec['slices']:
62 site_spec = self.locate_site (slice_spec['sitename'])
63 test_site = TestSite(self,site_spec)
64 test_slice=TestSlice(self,test_site,slice_spec)
65 if not slice_method(test_slice,self.options): overall=False
67 # maintain __name__ for ignore_result
68 map_on_slices.__name__=method.__name__
69 # restore the doc text
70 map_on_slices.__doc__=TestSlice.__dict__[method.__name__].__doc__
73 # run a step but return True so that we can go on
74 def ignore_result (method):
76 # ssh_slice_ignore->ssh_slice
77 ref_name=method.__name__.replace('_ignore','').replace('force_','')
78 ref_method=TestPlc.__dict__[ref_name]
79 result=ref_method(self)
80 print "Actual (but ignored) result for %(ref_name)s is %(result)s"%locals()
81 return Ignored (result)
82 name=method.__name__.replace('_ignore','').replace('force_','')
83 ignoring.__name__=name
84 ignoring.__doc__="ignored version of " + name
87 # a variant that expects the TestSlice method to return a list of CompleterTasks that
88 # are then merged into a single Completer run to avoid wating for all the slices
89 # esp. useful when a test fails of course
90 # because we need to pass arguments we use a class instead..
91 class slice_mapper__tasks (object):
92 # could not get this to work with named arguments
93 def __init__ (self,timeout_minutes,silent_minutes,period_seconds):
94 self.timeout=timedelta(minutes=timeout_minutes)
95 self.silent=timedelta(minutes=silent_minutes)
96 self.period=timedelta(seconds=period_seconds)
97 def __call__ (self, method):
99 # compute augmented method name
100 method_name = method.__name__ + "__tasks"
101 # locate in TestSlice
102 slice_method = TestSlice.__dict__[ method_name ]
105 for slice_spec in self.plc_spec['slices']:
106 site_spec = self.locate_site (slice_spec['sitename'])
107 test_site = TestSite(self,site_spec)
108 test_slice=TestSlice(self,test_site,slice_spec)
109 tasks += slice_method (test_slice, self.options)
110 return Completer (tasks).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
111 # restore the doc text from the TestSlice method even if a bit odd
112 wrappee.__name__ = method.__name__
113 wrappee.__doc__ = slice_method.__doc__
116 def auth_sfa_mapper (method):
119 auth_method = TestAuthSfa.__dict__[method.__name__]
120 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
121 test_auth=TestAuthSfa(self,auth_spec)
122 if not auth_method(test_auth,self.options): overall=False
124 # restore the doc text
125 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
129 def __init__ (self,result):
139 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
140 'plc_install', 'plc_configure', 'plc_start', SEP,
141 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
142 'plcapi_urls','speed_up_slices', SEP,
143 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
144 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
145 # keep this our of the way for now
146 'check_vsys_defaults_ignore', SEP,
147 # run this first off so it's easier to re-run on another qemu box
148 'qemu_kill_mine', SEP,
149 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
150 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP,
151 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
152 'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
153 'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
154 'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
155 'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
156 'sfa_discover@1', 'sfa_rspec@1', 'sfa_allocate@1', 'sfa_provision@1', SEPSFA,
157 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
158 'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
159 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
160 # but as the stress test might take a while, we sometimes missed the debug mode..
161 'probe_kvm_iptables',
162 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
163 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts_ignore', SEP,
164 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
165 'cross_check_tcp@1', 'check_system_slice', SEP,
166 # check slices are turned off properly
167 'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
168 # check they are properly re-created with the same name
169 'fill_slices', 'ssh_slice_again', SEP,
170 'gather_logs_force', SEP,
173 'export', 'show_boxes', 'super_speed_up_slices', SEP,
174 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
175 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
176 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
177 'delete_leases', 'list_leases', SEP,
179 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
180 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
181 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
182 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
183 'sfa_get_expires', SEPSFA,
184 'plc_db_dump' , 'plc_db_restore', SEP,
185 'check_netflow','check_drl', SEP,
186 'debug_nodemanager', 'slice_fs_present', SEP,
187 'standby_1_through_20','yes','no',SEP,
191 def printable_steps (list):
192 single_line=" ".join(list)+" "
193 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
195 def valid_step (step):
196 return step != SEP and step != SEPSFA
198 # turn off the sfa-related steps when build has skipped SFA
199 # this was originally for centos5 but is still valid
200 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
202 def _has_sfa_cached (rpms_url):
203 if os.path.isfile(has_sfa_cache_filename):
204 cached=file(has_sfa_cache_filename).read()=="yes"
205 utils.header("build provides SFA (cached):%s"%cached)
207 # warning, we're now building 'sface' so let's be a bit more picky
208 # full builds are expected to return with 0 here
209 utils.header ("Checking if build provides SFA package...")
210 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)==0
211 encoded='yes' if retcod else 'no'
212 file(has_sfa_cache_filename,'w').write(encoded)
216 def check_whether_build_has_sfa (rpms_url):
217 has_sfa=TestPlc._has_sfa_cached(rpms_url)
219 utils.header("build does provide SFA")
221 # move all steps containing 'sfa' from default_steps to other_steps
222 utils.header("SFA package not found - removing steps with sfa or sfi")
223 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
224 TestPlc.other_steps += sfa_steps
225 for step in sfa_steps: TestPlc.default_steps.remove(step)
227 def __init__ (self,plc_spec,options):
228 self.plc_spec=plc_spec
230 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
231 self.vserverip=plc_spec['vserverip']
232 self.vservername=plc_spec['vservername']
233 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
234 self.apiserver=TestApiserver(self.url,options.dry_run)
235 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
236 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
238 def has_addresses_api (self):
239 return self.apiserver.has_method('AddIpAddress')
242 name=self.plc_spec['name']
243 return "%s.%s"%(name,self.vservername)
246 return self.plc_spec['host_box']
249 return self.test_ssh.is_local()
251 # define the API methods on this object through xmlrpc
252 # would help, but not strictly necessary
256 def actual_command_in_guest (self,command, backslash=False):
257 raw1=self.host_to_guest(command)
258 raw2=self.test_ssh.actual_command(raw1,dry_run=self.options.dry_run, backslash=backslash)
261 def start_guest (self):
262 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),dry_run=self.options.dry_run))
264 def stop_guest (self):
265 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),dry_run=self.options.dry_run))
267 def run_in_guest (self,command,backslash=False):
268 raw=self.actual_command_in_guest(command,backslash)
269 return utils.system(raw)
271 def run_in_host (self,command):
272 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
274 # backslashing turned out so awful at some point that I've turned off auto-backslashing
275 # see e.g. plc_start esp. the version for f14
276 #command gets run in the plc's vm
277 def host_to_guest(self,command):
278 # f14 still needs some extra help
279 if self.options.fcdistro == 'f14':
280 raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %s" %(self.vservername,command)
282 raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env %s" %(self.vservername,command)
285 # this /vservers thing is legacy...
286 def vm_root_in_host(self):
287 return "/vservers/%s/"%(self.vservername)
289 def vm_timestamp_path (self):
290 return "/vservers/%s/%s.timestamp"%(self.vservername,self.vservername)
292 #start/stop the vserver
293 def start_guest_in_host(self):
294 return "virsh -c lxc:/// start %s"%(self.vservername)
296 def stop_guest_in_host(self):
297 return "virsh -c lxc:/// destroy %s"%(self.vservername)
300 def run_in_guest_piped (self,local,remote):
301 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
303 def yum_check_installed (self, rpms):
304 if isinstance (rpms, list):
306 return self.run_in_guest("rpm -q %s"%rpms)==0
308 # does a yum install in the vs, ignore yum retcod, check with rpm
309 def yum_install (self, rpms):
310 if isinstance (rpms, list):
312 self.run_in_guest("yum -y install %s"%rpms)
313 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
314 self.run_in_guest("yum-complete-transaction -y")
315 return self.yum_check_installed (rpms)
317 def auth_root (self):
318 return {'Username':self.plc_spec['settings']['PLC_ROOT_USER'],
319 'AuthMethod':'password',
320 'AuthString':self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
321 'Role' : self.plc_spec['role']
323 def locate_site (self,sitename):
324 for site in self.plc_spec['sites']:
325 if site['site_fields']['name'] == sitename:
327 if site['site_fields']['login_base'] == sitename:
329 raise Exception,"Cannot locate site %s"%sitename
331 def locate_node (self,nodename):
332 for site in self.plc_spec['sites']:
333 for node in site['nodes']:
334 if node['name'] == nodename:
336 raise Exception,"Cannot locate node %s"%nodename
338 def locate_hostname (self,hostname):
339 for site in self.plc_spec['sites']:
340 for node in site['nodes']:
341 if node['node_fields']['hostname'] == hostname:
343 raise Exception,"Cannot locate hostname %s"%hostname
345 def locate_key (self,key_name):
346 for key in self.plc_spec['keys']:
347 if key['key_name'] == key_name:
349 raise Exception,"Cannot locate key %s"%key_name
351 def locate_private_key_from_key_names (self, key_names):
352 # locate the first avail. key
354 for key_name in key_names:
355 key_spec=self.locate_key(key_name)
356 test_key=TestKey(self,key_spec)
357 publickey=test_key.publicpath()
358 privatekey=test_key.privatepath()
359 if os.path.isfile(publickey) and os.path.isfile(privatekey):
361 if found: return privatekey
364 def locate_slice (self, slicename):
365 for slice in self.plc_spec['slices']:
366 if slice['slice_fields']['name'] == slicename:
368 raise Exception,"Cannot locate slice %s"%slicename
370 def all_sliver_objs (self):
372 for slice_spec in self.plc_spec['slices']:
373 slicename = slice_spec['slice_fields']['name']
374 for nodename in slice_spec['nodenames']:
375 result.append(self.locate_sliver_obj (nodename,slicename))
378 def locate_sliver_obj (self,nodename,slicename):
379 (site,node) = self.locate_node(nodename)
380 slice = self.locate_slice (slicename)
382 test_site = TestSite (self, site)
383 test_node = TestNode (self, test_site,node)
384 # xxx the slice site is assumed to be the node site - mhh - probably harmless
385 test_slice = TestSlice (self, test_site, slice)
386 return TestSliver (self, test_node, test_slice)
388 def locate_first_node(self):
389 nodename=self.plc_spec['slices'][0]['nodenames'][0]
390 (site,node) = self.locate_node(nodename)
391 test_site = TestSite (self, site)
392 test_node = TestNode (self, test_site,node)
395 def locate_first_sliver (self):
396 slice_spec=self.plc_spec['slices'][0]
397 slicename=slice_spec['slice_fields']['name']
398 nodename=slice_spec['nodenames'][0]
399 return self.locate_sliver_obj(nodename,slicename)
401 # all different hostboxes used in this plc
402 def get_BoxNodes(self):
403 # maps on sites and nodes, return [ (host_box,test_node) ]
405 for site_spec in self.plc_spec['sites']:
406 test_site = TestSite (self,site_spec)
407 for node_spec in site_spec['nodes']:
408 test_node = TestNode (self, test_site, node_spec)
409 if not test_node.is_real():
410 tuples.append( (test_node.host_box(),test_node) )
411 # transform into a dict { 'host_box' -> [ test_node .. ] }
413 for (box,node) in tuples:
414 if not result.has_key(box):
417 result[box].append(node)
420 # a step for checking this stuff
421 def show_boxes (self):
422 'print summary of nodes location'
423 for (box,nodes) in self.get_BoxNodes().iteritems():
424 print box,":"," + ".join( [ node.name() for node in nodes ] )
427 # make this a valid step
428 def qemu_kill_all(self):
429 'kill all qemu instances on the qemu boxes involved by this setup'
430 # this is the brute force version, kill all qemus on that host box
431 for (box,nodes) in self.get_BoxNodes().iteritems():
432 # pass the first nodename, as we don't push template-qemu on testboxes
433 nodedir=nodes[0].nodedir()
434 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
437 # make this a valid step
438 def qemu_list_all(self):
439 'list all qemu instances on the qemu boxes involved by this setup'
440 for (box,nodes) in self.get_BoxNodes().iteritems():
441 # this is the brute force version, kill all qemus on that host box
442 TestBoxQemu(box,self.options.buildname).qemu_list_all()
445 # kill only the qemus related to this test
446 def qemu_list_mine(self):
447 'list qemu instances for our nodes'
448 for (box,nodes) in self.get_BoxNodes().iteritems():
449 # the fine-grain version
454 # kill only the qemus related to this test
455 def qemu_clean_mine(self):
456 'cleanup (rm -rf) qemu instances for our nodes'
457 for (box,nodes) in self.get_BoxNodes().iteritems():
458 # the fine-grain version
463 # kill only the right qemus
464 def qemu_kill_mine(self):
465 'kill the qemu instances for our nodes'
466 for (box,nodes) in self.get_BoxNodes().iteritems():
467 # the fine-grain version
472 #################### display config
474 "show test configuration after localization"
479 # uggly hack to make sure 'run export' only reports about the 1st plc
480 # to avoid confusion - also we use 'inri_slice1' in various aliases..
483 "print cut'n paste-able stuff to export env variables to your shell"
484 # guess local domain from hostname
485 if TestPlc.exported_id>1:
486 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
488 TestPlc.exported_id+=1
489 domain=socket.gethostname().split('.',1)[1]
490 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
491 print "export BUILD=%s"%self.options.buildname
492 print "export PLCHOSTLXC=%s"%fqdn
493 print "export GUESTNAME=%s"%self.plc_spec['vservername']
494 vplcname=self.plc_spec['vservername'].split('-')[-1]
495 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
496 # find hostname of first node
497 (hostname,qemubox) = self.all_node_infos()[0]
498 print "export KVMHOST=%s.%s"%(qemubox,domain)
499 print "export NODE=%s"%(hostname)
503 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
504 def show_pass (self,passno):
505 for (key,val) in self.plc_spec.iteritems():
506 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
510 self.display_site_spec(site)
511 for node in site['nodes']:
512 self.display_node_spec(node)
513 elif key=='initscripts':
514 for initscript in val:
515 self.display_initscript_spec (initscript)
518 self.display_slice_spec (slice)
521 self.display_key_spec (key)
523 if key not in ['sites','initscripts','slices','keys']:
524 print '+ ',key,':',val
526 def display_site_spec (self,site):
527 print '+ ======== site',site['site_fields']['name']
528 for (k,v) in site.iteritems():
529 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
532 print '+ ','nodes : ',
534 print node['node_fields']['hostname'],'',
540 print user['name'],'',
542 elif k == 'site_fields':
543 print '+ login_base',':',v['login_base']
544 elif k == 'address_fields':
550 def display_initscript_spec (self,initscript):
551 print '+ ======== initscript',initscript['initscript_fields']['name']
553 def display_key_spec (self,key):
554 print '+ ======== key',key['key_name']
556 def display_slice_spec (self,slice):
557 print '+ ======== slice',slice['slice_fields']['name']
558 for (k,v) in slice.iteritems():
571 elif k=='slice_fields':
572 print '+ fields',':',
573 print 'max_nodes=',v['max_nodes'],
578 def display_node_spec (self,node):
579 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
580 print "hostname=",node['node_fields']['hostname'],
581 print "ip=",node['interface_fields']['ip']
582 if self.options.verbose:
583 utils.pprint("node details",node,depth=3)
585 # another entry point for just showing the boxes involved
586 def display_mapping (self):
587 TestPlc.display_mapping_plc(self.plc_spec)
591 def display_mapping_plc (plc_spec):
592 print '+ MyPLC',plc_spec['name']
593 # WARNING this would not be right for lxc-based PLC's - should be harmless though
594 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
595 print '+\tIP = %s/%s'%(plc_spec['settings']['PLC_API_HOST'],plc_spec['vserverip'])
596 for site_spec in plc_spec['sites']:
597 for node_spec in site_spec['nodes']:
598 TestPlc.display_mapping_node(node_spec)
601 def display_mapping_node (node_spec):
602 print '+ NODE %s'%(node_spec['name'])
603 print '+\tqemu box %s'%node_spec['host_box']
604 print '+\thostname=%s'%node_spec['node_fields']['hostname']
606 # write a timestamp in /vservers/<>.timestamp
607 # cannot be inside the vserver, that causes vserver .. build to cough
608 def plcvm_timestamp (self):
609 "Create a timestamp to remember creation date for this plc"
611 # TODO-lxc check this one
612 # a first approx. is to store the timestamp close to the VM root like vs does
613 stamp_path=self.vm_timestamp_path ()
614 stamp_dir = os.path.dirname (stamp_path)
615 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
616 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
618 # this is called inconditionnally at the beginning of the test sequence
619 # just in case this is a rerun, so if the vm is not running it's fine
620 def plcvm_delete(self):
621 "vserver delete the test myplc"
622 stamp_path=self.vm_timestamp_path()
623 self.run_in_host("rm -f %s"%stamp_path)
624 self.run_in_host("virsh -c lxc:// destroy %s"%self.vservername)
625 self.run_in_host("virsh -c lxc:// undefine %s"%self.vservername)
626 self.run_in_host("rm -fr /vservers/%s"%self.vservername)
630 # historically the build was being fetched by the tests
631 # now the build pushes itself as a subdir of the tests workdir
632 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
633 def plcvm_create (self):
634 "vserver creation (no install done)"
635 # push the local build/ dir to the testplc box
637 # a full path for the local calls
638 build_dir=os.path.dirname(sys.argv[0])
639 # sometimes this is empty - set to "." in such a case
640 if not build_dir: build_dir="."
641 build_dir += "/build"
643 # use a standard name - will be relative to remote buildname
645 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
646 self.test_ssh.rmdir(build_dir)
647 self.test_ssh.copy(build_dir,recursive=True)
648 # the repo url is taken from arch-rpms-url
649 # with the last step (i386) removed
650 repo_url = self.options.arch_rpms_url
651 for level in [ 'arch' ]:
652 repo_url = os.path.dirname(repo_url)
654 # invoke initvm (drop support for vs)
655 script="lbuild-initvm.sh"
657 # pass the vbuild-nightly options to [lv]test-initvm
658 script_options += " -p %s"%self.options.personality
659 script_options += " -d %s"%self.options.pldistro
660 script_options += " -f %s"%self.options.fcdistro
661 script_options += " -r %s"%repo_url
662 vserver_name = self.vservername
664 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
665 script_options += " -n %s"%vserver_hostname
667 print "Cannot reverse lookup %s"%self.vserverip
668 print "This is considered fatal, as this might pollute the test results"
670 create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s"%locals()
671 return self.run_in_host(create_vserver) == 0
674 def plc_install(self):
675 "yum install myplc, noderepo, and the plain bootstrapfs"
677 # workaround for getting pgsql8.2 on centos5
678 if self.options.fcdistro == "centos5":
679 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
682 if self.options.personality == "linux32":
684 elif self.options.personality == "linux64":
687 raise Exception, "Unsupported personality %r"%self.options.personality
688 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
691 pkgs_list.append ("slicerepo-%s"%nodefamily)
692 pkgs_list.append ("myplc")
693 pkgs_list.append ("noderepo-%s"%nodefamily)
694 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
695 pkgs_string=" ".join(pkgs_list)
696 return self.yum_install (pkgs_list)
699 def mod_python(self):
700 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
701 return self.yum_install ( [ 'mod_python' ] )
704 def plc_configure(self):
706 tmpname='%s.plc-config-tty'%(self.name())
707 fileconf=open(tmpname,'w')
708 for (var,value) in self.plc_spec['settings'].iteritems():
709 fileconf.write ('e %s\n%s\n'%(var,value))
710 fileconf.write('w\n')
711 fileconf.write('q\n')
713 utils.system('cat %s'%tmpname)
714 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
715 utils.system('rm %s'%tmpname)
718 # f14 is a bit odd in this respect, although this worked fine in guests up to f18
719 # however using a vplc guest under f20 requires this trick
720 # the symptom is this: service plc start
721 # Starting plc (via systemctl): Failed to get D-Bus connection: \
722 # Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
723 # weird thing is the doc says f14 uses upstart by default and not systemd
724 # so this sounds kind of harmless
725 def start_service (self,service): return self.start_stop_service (service,'start')
726 def stop_service (self,service): return self.start_stop_service (service,'stop')
728 def start_stop_service (self, service,start_or_stop):
729 "utility to start/stop a service with the special trick for f14"
730 if self.options.fcdistro != 'f14':
731 return self.run_in_guest ("service %s %s"%(service,start_or_stop))==0
733 # patch /sbin/service so it does not reset environment
734 self.run_in_guest ('sed -i -e \\"s,env -i,env,\\" /sbin/service')
735 # this is because our own scripts in turn call service
736 return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service %s %s"%(service,start_or_stop))==0
740 return self.start_service ('plc')
744 return self.stop_service ('plc')
746 def plcvm_start (self):
747 "start the PLC vserver"
751 def plcvm_stop (self):
752 "stop the PLC vserver"
756 # stores the keys from the config for further use
757 def keys_store(self):
758 "stores test users ssh keys in keys/"
759 for key_spec in self.plc_spec['keys']:
760 TestKey(self,key_spec).store_key()
763 def keys_clean(self):
764 "removes keys cached in keys/"
765 utils.system("rm -rf ./keys")
768 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
769 # for later direct access to the nodes
770 def keys_fetch(self):
771 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
773 if not os.path.isdir(dir):
775 vservername=self.vservername
776 vm_root=self.vm_root_in_host()
778 prefix = 'debug_ssh_key'
779 for ext in [ 'pub', 'rsa' ] :
780 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
781 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
782 if self.test_ssh.fetch(src,dst) != 0: overall=False
786 "create sites with PLCAPI"
787 return self.do_sites()
789 def delete_sites (self):
790 "delete sites with PLCAPI"
791 return self.do_sites(action="delete")
793 def do_sites (self,action="add"):
794 for site_spec in self.plc_spec['sites']:
795 test_site = TestSite (self,site_spec)
796 if (action != "add"):
797 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
798 test_site.delete_site()
799 # deleted with the site
800 #test_site.delete_users()
803 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
804 test_site.create_site()
805 test_site.create_users()
808 def delete_all_sites (self):
809 "Delete all sites in PLC, and related objects"
810 print 'auth_root',self.auth_root()
811 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
813 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
814 if site['login_base']==self.plc_spec['settings']['PLC_SLICE_PREFIX']: continue
815 site_id=site['site_id']
816 print 'Deleting site_id',site_id
817 self.apiserver.DeleteSite(self.auth_root(),site_id)
821 "create nodes with PLCAPI"
822 return self.do_nodes()
823 def delete_nodes (self):
824 "delete nodes with PLCAPI"
825 return self.do_nodes(action="delete")
827 def do_nodes (self,action="add"):
828 for site_spec in self.plc_spec['sites']:
829 test_site = TestSite (self,site_spec)
831 utils.header("Deleting nodes in site %s"%test_site.name())
832 for node_spec in site_spec['nodes']:
833 test_node=TestNode(self,test_site,node_spec)
834 utils.header("Deleting %s"%test_node.name())
835 test_node.delete_node()
837 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
838 for node_spec in site_spec['nodes']:
839 utils.pprint('Creating node %s'%node_spec,node_spec)
840 test_node = TestNode (self,test_site,node_spec)
841 test_node.create_node ()
844 def nodegroups (self):
845 "create nodegroups with PLCAPI"
846 return self.do_nodegroups("add")
847 def delete_nodegroups (self):
848 "delete nodegroups with PLCAPI"
849 return self.do_nodegroups("delete")
853 def translate_timestamp (start,grain,timestamp):
854 if timestamp < TestPlc.YEAR: return start+timestamp*grain
855 else: return timestamp
858 def timestamp_printable (timestamp):
859 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
862 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
864 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
865 print 'API answered grain=',grain
866 start=(now/grain)*grain
868 # find out all nodes that are reservable
869 nodes=self.all_reservable_nodenames()
871 utils.header ("No reservable node found - proceeding without leases")
874 # attach them to the leases as specified in plc_specs
875 # this is where the 'leases' field gets interpreted as relative of absolute
876 for lease_spec in self.plc_spec['leases']:
877 # skip the ones that come with a null slice id
878 if not lease_spec['slice']: continue
879 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
880 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
881 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
882 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
883 if lease_addition['errors']:
884 utils.header("Cannot create leases, %s"%lease_addition['errors'])
887 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
888 (nodes,lease_spec['slice'],
889 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
890 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
894 def delete_leases (self):
895 "remove all leases in the myplc side"
896 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
897 utils.header("Cleaning leases %r"%lease_ids)
898 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
901 def list_leases (self):
902 "list all leases known to the myplc"
903 leases = self.apiserver.GetLeases(self.auth_root())
906 current=l['t_until']>=now
907 if self.options.verbose or current:
908 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
909 TestPlc.timestamp_printable(l['t_from']),
910 TestPlc.timestamp_printable(l['t_until'])))
913 # create nodegroups if needed, and populate
914 def do_nodegroups (self, action="add"):
915 # 1st pass to scan contents
917 for site_spec in self.plc_spec['sites']:
918 test_site = TestSite (self,site_spec)
919 for node_spec in site_spec['nodes']:
920 test_node=TestNode (self,test_site,node_spec)
921 if node_spec.has_key('nodegroups'):
922 nodegroupnames=node_spec['nodegroups']
923 if isinstance(nodegroupnames,StringTypes):
924 nodegroupnames = [ nodegroupnames ]
925 for nodegroupname in nodegroupnames:
926 if not groups_dict.has_key(nodegroupname):
927 groups_dict[nodegroupname]=[]
928 groups_dict[nodegroupname].append(test_node.name())
929 auth=self.auth_root()
931 for (nodegroupname,group_nodes) in groups_dict.iteritems():
933 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
934 # first, check if the nodetagtype is here
935 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
937 tag_type_id = tag_types[0]['tag_type_id']
939 tag_type_id = self.apiserver.AddTagType(auth,
940 {'tagname':nodegroupname,
941 'description': 'for nodegroup %s'%nodegroupname,
943 print 'located tag (type)',nodegroupname,'as',tag_type_id
945 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
947 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
948 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
949 # set node tag on all nodes, value='yes'
950 for nodename in group_nodes:
952 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
954 traceback.print_exc()
955 print 'node',nodename,'seems to already have tag',nodegroupname
958 expect_yes = self.apiserver.GetNodeTags(auth,
959 {'hostname':nodename,
960 'tagname':nodegroupname},
961 ['value'])[0]['value']
962 if expect_yes != "yes":
963 print 'Mismatch node tag on node',nodename,'got',expect_yes
966 if not self.options.dry_run:
967 print 'Cannot find tag',nodegroupname,'on node',nodename
971 print 'cleaning nodegroup',nodegroupname
972 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
974 traceback.print_exc()
978 # a list of TestNode objs
979 def all_nodes (self):
981 for site_spec in self.plc_spec['sites']:
982 test_site = TestSite (self,site_spec)
983 for node_spec in site_spec['nodes']:
984 nodes.append(TestNode (self,test_site,node_spec))
987 # return a list of tuples (nodename,qemuname)
988 def all_node_infos (self) :
990 for site_spec in self.plc_spec['sites']:
991 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
992 for node_spec in site_spec['nodes'] ]
995 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
996 def all_reservable_nodenames (self):
998 for site_spec in self.plc_spec['sites']:
999 for node_spec in site_spec['nodes']:
1000 node_fields=node_spec['node_fields']
1001 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
1002 res.append(node_fields['hostname'])
1005 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1006 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
1007 if self.options.dry_run:
1011 class CompleterTaskBootState (CompleterTask):
1012 def __init__ (self, test_plc,hostname):
1013 self.test_plc=test_plc
1014 self.hostname=hostname
1015 self.last_boot_state='undef'
1016 def actual_run (self):
1018 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
1020 self.last_boot_state = node['boot_state']
1021 return self.last_boot_state == target_boot_state
1025 return "CompleterTaskBootState with node %s"%self.hostname
1026 def failure_epilogue (self):
1027 print "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
1029 timeout = timedelta(minutes=timeout_minutes)
1030 graceout = timedelta(minutes=silent_minutes)
1031 period = timedelta(seconds=period_seconds)
1032 # the nodes that haven't checked yet - start with a full list and shrink over time
1033 utils.header("checking nodes boot state (expected %s)"%target_boot_state)
1034 tasks = [ CompleterTaskBootState (self,hostname) \
1035 for (hostname,_) in self.all_node_infos() ]
1036 return Completer (tasks).run (timeout, graceout, period)
1038 def nodes_booted(self):
1039 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
1041 def probe_kvm_iptables (self):
1042 (_,kvmbox) = self.all_node_infos()[0]
1043 TestSsh(kvmbox).run("iptables-save")
1047 def check_nodes_ping(self,timeout_seconds=120,period_seconds=10):
1048 class CompleterTaskPingNode (CompleterTask):
1049 def __init__ (self, hostname):
1050 self.hostname=hostname
1051 def run(self,silent):
1052 command="ping -c 1 -w 1 %s >& /dev/null"%self.hostname
1053 return utils.system (command, silent=silent)==0
1054 def failure_epilogue (self):
1055 print "Cannot ping node with name %s"%self.hostname
1056 timeout=timedelta (seconds=timeout_seconds)
1058 period=timedelta (seconds=period_seconds)
1059 node_infos = self.all_node_infos()
1060 tasks = [ CompleterTaskPingNode (h) for (h,_) in node_infos ]
1061 return Completer (tasks).run (timeout, graceout, period)
1063 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1064 def ping_node (self):
1066 return self.check_nodes_ping ()
1068 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
1070 timeout = timedelta(minutes=timeout_minutes)
1071 graceout = timedelta(minutes=silent_minutes)
1072 period = timedelta(seconds=period_seconds)
1073 vservername=self.vservername
1076 local_key = "keys/%(vservername)s-debug.rsa"%locals()
1079 local_key = "keys/key_admin.rsa"
1080 utils.header("checking ssh access to nodes (expected in %s mode)"%message)
1081 node_infos = self.all_node_infos()
1082 tasks = [ CompleterTaskNodeSsh (nodename, qemuname, local_key, boot_state=message) \
1083 for (nodename,qemuname) in node_infos ]
1084 return Completer (tasks).run (timeout, graceout, period)
1086 def ssh_node_debug(self):
1087 "Tries to ssh into nodes in debug mode with the debug ssh key"
1088 return self.check_nodes_ssh(debug=True,
1089 timeout_minutes=self.ssh_node_debug_timeout,
1090 silent_minutes=self.ssh_node_debug_silent)
1092 def ssh_node_boot(self):
1093 "Tries to ssh into nodes in production mode with the root ssh key"
1094 return self.check_nodes_ssh(debug=False,
1095 timeout_minutes=self.ssh_node_boot_timeout,
1096 silent_minutes=self.ssh_node_boot_silent)
1098 def node_bmlogs(self):
1099 "Checks that there's a non-empty dir. /var/log/bm/raw"
1100 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
1103 def qemu_local_init (self): pass
1105 def bootcd (self): pass
1107 def qemu_local_config (self): pass
1109 def nodestate_reinstall (self): pass
1111 def nodestate_safeboot (self): pass
1113 def nodestate_boot (self): pass
1115 def nodestate_show (self): pass
1117 def qemu_export (self): pass
1119 ### check hooks : invoke scripts from hooks/{node,slice}
1120 def check_hooks_node (self):
1121 return self.locate_first_node().check_hooks()
1122 def check_hooks_sliver (self) :
1123 return self.locate_first_sliver().check_hooks()
1125 def check_hooks (self):
1126 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1127 return self.check_hooks_node() and self.check_hooks_sliver()
1130 def do_check_initscripts(self):
1131 class CompleterTaskInitscript (CompleterTask):
1132 def __init__ (self, test_sliver, stamp):
1133 self.test_sliver=test_sliver
1135 def actual_run (self):
1136 return self.test_sliver.check_initscript_stamp (self.stamp)
1138 return "initscript checker for %s"%self.test_sliver.name()
1139 def failure_epilogue (self):
1140 print "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
1143 for slice_spec in self.plc_spec['slices']:
1144 if not slice_spec.has_key('initscriptstamp'):
1146 stamp=slice_spec['initscriptstamp']
1147 slicename=slice_spec['slice_fields']['name']
1148 for nodename in slice_spec['nodenames']:
1149 print 'nodename',nodename,'slicename',slicename,'stamp',stamp
1150 (site,node) = self.locate_node (nodename)
1151 # xxx - passing the wrong site - probably harmless
1152 test_site = TestSite (self,site)
1153 test_slice = TestSlice (self,test_site,slice_spec)
1154 test_node = TestNode (self,test_site,node)
1155 test_sliver = TestSliver (self, test_node, test_slice)
1156 tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
1157 return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1159 def check_initscripts(self):
1160 "check that the initscripts have triggered"
1161 return self.do_check_initscripts()
1163 def initscripts (self):
1164 "create initscripts with PLCAPI"
1165 for initscript in self.plc_spec['initscripts']:
1166 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1167 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1170 def delete_initscripts (self):
1171 "delete initscripts with PLCAPI"
1172 for initscript in self.plc_spec['initscripts']:
1173 initscript_name = initscript['initscript_fields']['name']
1174 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1176 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1177 print initscript_name,'deleted'
1179 print 'deletion went wrong - probably did not exist'
1184 "create slices with PLCAPI"
1185 return self.do_slices(action="add")
1187 def delete_slices (self):
1188 "delete slices with PLCAPI"
1189 return self.do_slices(action="delete")
1191 def fill_slices (self):
1192 "add nodes in slices with PLCAPI"
1193 return self.do_slices(action="fill")
1195 def empty_slices (self):
1196 "remove nodes from slices with PLCAPI"
1197 return self.do_slices(action="empty")
1199 def do_slices (self, action="add"):
1200 for slice in self.plc_spec['slices']:
1201 site_spec = self.locate_site (slice['sitename'])
1202 test_site = TestSite(self,site_spec)
1203 test_slice=TestSlice(self,test_site,slice)
1204 if action == "delete":
1205 test_slice.delete_slice()
1206 elif action=="fill":
1207 test_slice.add_nodes()
1208 elif action=="empty":
1209 test_slice.delete_nodes()
1211 test_slice.create_slice()
1214 @slice_mapper__tasks(20,10,15)
1215 def ssh_slice(self): pass
1216 @slice_mapper__tasks(20,19,15)
1217 def ssh_slice_off (self): pass
1218 @slice_mapper__tasks(1,1,15)
1219 def slice_fs_present(self): pass
1220 @slice_mapper__tasks(1,1,15)
1221 def slice_fs_deleted(self): pass
1223 # use another name so we can exclude/ignore it from the tests on the nightly command line
1224 def ssh_slice_again(self): return self.ssh_slice()
1225 # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1226 # but for some reason the ignore-wrapping thing would not
1229 def ssh_slice_basics(self): pass
1231 def check_vsys_defaults(self): pass
1234 def keys_clear_known_hosts (self): pass
1236 def plcapi_urls (self):
1237 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1239 def speed_up_slices (self):
1240 "tweak nodemanager cycle (wait time) to 30+/-10 s"
1241 return self._speed_up_slices (30,10)
1242 def super_speed_up_slices (self):
1243 "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
1244 return self._speed_up_slices (5,1)
1246 def _speed_up_slices (self, p, r):
1247 # create the template on the server-side
1248 template="%s.nodemanager"%self.name()
1249 template_file = open (template,"w")
1250 template_file.write('OPTIONS="-p %s -r %s -d"\n'%(p,r))
1251 template_file.close()
1252 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1253 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1254 self.test_ssh.copy_abs(template,remote)
1256 if not self.apiserver.GetConfFiles (self.auth_root(),
1257 {'dest':'/etc/sysconfig/nodemanager'}):
1258 self.apiserver.AddConfFile (self.auth_root(),
1259 {'dest':'/etc/sysconfig/nodemanager',
1260 'source':'PlanetLabConf/nodemanager',
1261 'postinstall_cmd':'service nm restart',})
1264 def debug_nodemanager (self):
1265 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1266 template="%s.nodemanager"%self.name()
1267 template_file = open (template,"w")
1268 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1269 template_file.close()
1270 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1271 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1272 self.test_ssh.copy_abs(template,remote)
1276 def qemu_start (self) : pass
1279 def qemu_timestamp (self) : pass
1281 # when a spec refers to a node possibly on another plc
1282 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1283 for plc in [ self ] + other_plcs:
1285 return plc.locate_sliver_obj (nodename, slicename)
1288 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1290 # implement this one as a cross step so that we can take advantage of different nodes
1291 # in multi-plcs mode
1292 def cross_check_tcp (self, other_plcs):
1293 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1294 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1295 utils.header ("check_tcp: no/empty config found")
1297 specs = self.plc_spec['tcp_specs']
1302 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1303 if not s_test_sliver.run_tcp_server(port,timeout=20):
1307 # idem for the client side
1308 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1309 # use nodename from locatesd sliver, unless 'client_connect' is set
1310 if 'client_connect' in spec:
1311 destination = spec['client_connect']
1313 destination=s_test_sliver.test_node.name()
1314 if not c_test_sliver.run_tcp_client(destination,port):
1318 # painfully enough, we need to allow for some time as netflow might show up last
1319 def check_system_slice (self):
1320 "all nodes: check that a system slice is alive"
1321 # netflow currently not working in the lxc distro
1322 # drl not built at all in the wtx distro
1323 # if we find either of them we're happy
1324 return self.check_netflow() or self.check_drl()
1327 def check_netflow (self): return self._check_system_slice ('netflow')
1328 def check_drl (self): return self._check_system_slice ('drl')
1330 # we have the slices up already here, so it should not take too long
1331 def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
1332 class CompleterTaskSystemSlice (CompleterTask):
1333 def __init__ (self, test_node, dry_run):
1334 self.test_node=test_node
1335 self.dry_run=dry_run
1336 def actual_run (self):
1337 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
1339 return "System slice %s @ %s"%(slicename, self.test_node.name())
1340 def failure_epilogue (self):
1341 print "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
1342 timeout = timedelta(minutes=timeout_minutes)
1343 silent = timedelta (0)
1344 period = timedelta (seconds=period_seconds)
1345 tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
1346 for test_node in self.all_nodes() ]
1347 return Completer (tasks) . run (timeout, silent, period)
1349 def plcsh_stress_test (self):
1350 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1351 # install the stress-test in the plc image
1352 location = "/usr/share/plc_api/plcsh_stress_test.py"
1353 remote="%s/%s"%(self.vm_root_in_host(),location)
1354 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1356 command += " -- --check"
1357 if self.options.size == 1:
1358 command += " --tiny"
1359 return ( self.run_in_guest(command) == 0)
1361 # populate runs the same utility without slightly different options
1362 # in particular runs with --preserve (dont cleanup) and without --check
1363 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1365 def sfa_install_all (self):
1366 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1367 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1369 def sfa_install_core(self):
1371 return self.yum_install ("sfa")
1373 def sfa_install_plc(self):
1374 "yum install sfa-plc"
1375 return self.yum_install("sfa-plc")
1377 def sfa_install_sfatables(self):
1378 "yum install sfa-sfatables"
1379 return self.yum_install ("sfa-sfatables")
1381 # for some very odd reason, this sometimes fails with the following symptom
1382 # # yum install sfa-client
1383 # Setting up Install Process
1385 # Downloading Packages:
1386 # Running rpm_check_debug
1387 # Running Transaction Test
1388 # Transaction Test Succeeded
1389 # Running Transaction
1390 # Transaction couldn't start:
1391 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1392 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1393 # even though in the same context I have
1394 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1395 # Filesystem Size Used Avail Use% Mounted on
1396 # /dev/hdv1 806G 264G 501G 35% /
1397 # none 16M 36K 16M 1% /tmp
1399 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1400 def sfa_install_client(self):
1401 "yum install sfa-client"
1402 first_try=self.yum_install("sfa-client")
1403 if first_try: return True
1404 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1405 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1406 utils.header("rpm_path=<<%s>>"%rpm_path)
1408 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1409 return self.yum_check_installed ("sfa-client")
1411 def sfa_dbclean(self):
1412 "thoroughly wipes off the SFA database"
1413 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1414 self.run_in_guest("sfa-nuke.py")==0 or \
1415 self.run_in_guest("sfa-nuke-plc.py")==0
1417 def sfa_fsclean(self):
1418 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1419 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1422 def sfa_plcclean(self):
1423 "cleans the PLC entries that were created as a side effect of running the script"
1425 sfa_spec=self.plc_spec['sfa']
1427 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1428 login_base=auth_sfa_spec['login_base']
1429 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1430 except: print "Site %s already absent from PLC db"%login_base
1432 for spec_name in ['pi_spec','user_spec']:
1433 user_spec=auth_sfa_spec[spec_name]
1434 username=user_spec['email']
1435 try: self.apiserver.DeletePerson(self.auth_root(),username)
1437 # this in fact is expected as sites delete their members
1438 #print "User %s already absent from PLC db"%username
1441 print "REMEMBER TO RUN sfa_import AGAIN"
1444 def sfa_uninstall(self):
1445 "uses rpm to uninstall sfa - ignore result"
1446 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1447 self.run_in_guest("rm -rf /var/lib/sfa")
1448 self.run_in_guest("rm -rf /etc/sfa")
1449 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1451 self.run_in_guest("rpm -e --noscripts sfa-plc")
1454 ### run unit tests for SFA
1455 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1456 # Running Transaction
1457 # Transaction couldn't start:
1458 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1459 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1460 # no matter how many Gbs are available on the testplc
1461 # could not figure out what's wrong, so...
1462 # if the yum install phase fails, consider the test is successful
1463 # other combinations will eventually run it hopefully
1464 def sfa_utest(self):
1465 "yum install sfa-tests and run SFA unittests"
1466 self.run_in_guest("yum -y install sfa-tests")
1467 # failed to install - forget it
1468 if self.run_in_guest("rpm -q sfa-tests")!=0:
1469 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1471 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1475 dirname="conf.%s"%self.plc_spec['name']
1476 if not os.path.isdir(dirname):
1477 utils.system("mkdir -p %s"%dirname)
1478 if not os.path.isdir(dirname):
1479 raise Exception,"Cannot create config dir for plc %s"%self.name()
1482 def conffile(self,filename):
1483 return "%s/%s"%(self.confdir(),filename)
1484 def confsubdir(self,dirname,clean,dry_run=False):
1485 subdirname="%s/%s"%(self.confdir(),dirname)
1487 utils.system("rm -rf %s"%subdirname)
1488 if not os.path.isdir(subdirname):
1489 utils.system("mkdir -p %s"%subdirname)
1490 if not dry_run and not os.path.isdir(subdirname):
1491 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1494 def conffile_clean (self,filename):
1495 filename=self.conffile(filename)
1496 return utils.system("rm -rf %s"%filename)==0
1499 def sfa_configure(self):
1500 "run sfa-config-tty"
1501 tmpname=self.conffile("sfa-config-tty")
1502 fileconf=open(tmpname,'w')
1503 for (var,value) in self.plc_spec['sfa']['settings'].iteritems():
1504 fileconf.write ('e %s\n%s\n'%(var,value))
1505 # # the way plc_config handles booleans just sucks..
1508 # if self.plc_spec['sfa'][var]: val='true'
1509 # fileconf.write ('e %s\n%s\n'%(var,val))
1510 fileconf.write('w\n')
1511 fileconf.write('R\n')
1512 fileconf.write('q\n')
1514 utils.system('cat %s'%tmpname)
1515 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1518 def aggregate_xml_line(self):
1519 port=self.plc_spec['sfa']['neighbours-port']
1520 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1521 (self.vserverip,self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'],port)
1523 def registry_xml_line(self):
1524 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1525 (self.vserverip,self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1528 # a cross step that takes all other plcs in argument
1529 def cross_sfa_configure(self, other_plcs):
1530 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1531 # of course with a single plc, other_plcs is an empty list
1534 agg_fname=self.conffile("agg.xml")
1535 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1536 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1537 utils.header ("(Over)wrote %s"%agg_fname)
1538 reg_fname=self.conffile("reg.xml")
1539 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1540 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1541 utils.header ("(Over)wrote %s"%reg_fname)
1542 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1543 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1545 def sfa_import(self):
1546 "use sfaadmin to import from plc"
1547 auth=self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1548 return self.run_in_guest('sfaadmin reg import_registry')==0
1550 def sfa_start(self):
1552 return self.start_service('sfa')
1555 def sfi_configure(self):
1556 "Create /root/sfi on the plc side for sfi client configuration"
1557 if self.options.dry_run:
1558 utils.header("DRY RUN - skipping step")
1560 sfa_spec=self.plc_spec['sfa']
1561 # cannot use auth_sfa_mapper to pass dir_name
1562 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1563 test_slice=TestAuthSfa(self,slice_spec)
1564 dir_basename=os.path.basename(test_slice.sfi_path())
1565 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1566 test_slice.sfi_configure(dir_name)
1567 # push into the remote /root/sfi area
1568 location = test_slice.sfi_path()
1569 remote="%s/%s"%(self.vm_root_in_host(),location)
1570 self.test_ssh.mkdir(remote,abs=True)
1571 # need to strip last level or remote otherwise we get an extra dir level
1572 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1576 def sfi_clean (self):
1577 "clean up /root/sfi on the plc side"
1578 self.run_in_guest("rm -rf /root/sfi")
1582 def sfa_register_site (self): pass
1584 def sfa_register_pi (self): pass
1586 def sfa_register_user(self): pass
1588 def sfa_update_user(self): pass
1590 def sfa_register_slice(self): pass
1592 def sfa_renew_slice(self): pass
1594 def sfa_get_expires(self): pass
1596 def sfa_discover(self): pass
1598 def sfa_rspec(self): pass
1600 def sfa_allocate(self): pass
1602 def sfa_provision(self): pass
1604 def sfa_check_slice_plc(self): pass
1606 def sfa_update_slice(self): pass
1608 def sfa_remove_user_from_slice(self): pass
1610 def sfa_insert_user_in_slice(self): pass
1612 def sfi_list(self): pass
1614 def sfi_show_site(self): pass
1616 def sfi_show_slice(self): pass
1618 def sfi_show_slice_researchers(self): pass
1620 def ssh_slice_sfa(self): pass
1622 def sfa_delete_user(self): pass
1624 def sfa_delete_slice(self): pass
1628 return self.stop_service ('sfa')
1630 def populate (self):
1631 "creates random entries in the PLCAPI"
1632 # install the stress-test in the plc image
1633 location = "/usr/share/plc_api/plcsh_stress_test.py"
1634 remote="%s/%s"%(self.vm_root_in_host(),location)
1635 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1637 command += " -- --preserve --short-names"
1638 local = (self.run_in_guest(command) == 0);
1639 # second run with --foreign
1640 command += ' --foreign'
1641 remote = (self.run_in_guest(command) == 0);
1642 return ( local and remote)
1644 def gather_logs (self):
1645 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1646 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1647 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1648 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1649 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1650 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1651 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1653 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1654 self.gather_var_logs ()
1656 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1657 self.gather_pgsql_logs ()
1659 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1660 self.gather_root_sfi ()
1662 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1663 for site_spec in self.plc_spec['sites']:
1664 test_site = TestSite (self,site_spec)
1665 for node_spec in site_spec['nodes']:
1666 test_node=TestNode(self,test_site,node_spec)
1667 test_node.gather_qemu_logs()
1669 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1670 self.gather_nodes_var_logs()
1672 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1673 self.gather_slivers_var_logs()
1676 def gather_slivers_var_logs(self):
1677 for test_sliver in self.all_sliver_objs():
1678 remote = test_sliver.tar_var_logs()
1679 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1680 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1681 utils.system(command)
1684 def gather_var_logs (self):
1685 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1686 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1687 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1688 utils.system(command)
1689 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1690 utils.system(command)
1692 def gather_pgsql_logs (self):
1693 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1694 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1695 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1696 utils.system(command)
1698 def gather_root_sfi (self):
1699 utils.system("mkdir -p logs/sfi.%s"%self.name())
1700 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1701 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1702 utils.system(command)
1704 def gather_nodes_var_logs (self):
1705 for site_spec in self.plc_spec['sites']:
1706 test_site = TestSite (self,site_spec)
1707 for node_spec in site_spec['nodes']:
1708 test_node=TestNode(self,test_site,node_spec)
1709 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1710 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1711 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1712 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1713 utils.system(command)
1716 # returns the filename to use for sql dump/restore, using options.dbname if set
1717 def dbfile (self, database):
1718 # uses options.dbname if it is found
1720 name=self.options.dbname
1721 if not isinstance(name,StringTypes):
1727 return "/root/%s-%s.sql"%(database,name)
1729 def plc_db_dump(self):
1730 'dump the planetlab5 DB in /root in the PLC - filename has time'
1731 dump=self.dbfile("planetab5")
1732 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1733 utils.header('Dumped planetlab5 database in %s'%dump)
1736 def plc_db_restore(self):
1737 'restore the planetlab5 DB - looks broken, but run -n might help'
1738 dump=self.dbfile("planetab5")
1739 ##stop httpd service
1740 self.run_in_guest('service httpd stop')
1741 # xxx - need another wrapper
1742 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1743 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1744 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1745 ##starting httpd service
1746 self.run_in_guest('service httpd start')
1748 utils.header('Database restored from ' + dump)
1751 def create_ignore_steps ():
1752 for step in TestPlc.default_steps + TestPlc.other_steps:
1753 # default step can have a plc qualifier
1754 if '@' in step: (step,qualifier)=step.split('@')
1755 # or be defined as forced or ignored by default
1756 for keyword in ['_ignore','_force']:
1757 if step.endswith (keyword): step=step.replace(keyword,'')
1758 if step == SEP or step == SEPSFA : continue
1759 method=getattr(TestPlc,step)
1761 wrapped=ignore_result(method)
1762 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1763 setattr(TestPlc, name, wrapped)
1766 # def ssh_slice_again_ignore (self): pass
1768 # def check_initscripts_ignore (self): pass
1770 def standby_1_through_20(self):
1771 """convenience function to wait for a specified number of minutes"""
1774 def standby_1(): pass
1776 def standby_2(): pass
1778 def standby_3(): pass
1780 def standby_4(): pass
1782 def standby_5(): pass
1784 def standby_6(): pass
1786 def standby_7(): pass
1788 def standby_8(): pass
1790 def standby_9(): pass
1792 def standby_10(): pass
1794 def standby_11(): pass
1796 def standby_12(): pass
1798 def standby_13(): pass
1800 def standby_14(): pass
1802 def standby_15(): pass
1804 def standby_16(): pass
1806 def standby_17(): pass
1808 def standby_18(): pass
1810 def standby_19(): pass
1812 def standby_20(): pass
1814 # convenience for debugging the test logic
1815 def yes (self): return True
1816 def no (self): return False