1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
10 from types import StringTypes
13 from Completer import Completer, CompleterTask
14 from TestSite import TestSite
15 from TestNode import TestNode, CompleterTaskNodeSsh
16 from TestUser import TestUser
17 from TestKey import TestKey
18 from TestSlice import TestSlice
19 from TestSliver import TestSliver
20 from TestBoxQemu import TestBoxQemu
21 from TestSsh import TestSsh
22 from TestApiserver import TestApiserver
23 from TestAuthSfa import TestAuthSfa
24 from PlcapiUrlScanner import PlcapiUrlScanner
26 has_sfa_cache_filename="sfa-cache"
28 # step methods must take (self) and return a boolean (options is a member of the class)
30 def standby(minutes,dry_run):
31 utils.header('Entering StandBy for %d mn'%minutes)
35 time.sleep(60*minutes)
38 def standby_generic (func):
40 minutes=int(func.__name__.split("_")[1])
41 return standby(minutes,self.options.dry_run)
44 def node_mapper (method):
45 def map_on_nodes(self,*args, **kwds):
47 node_method = TestNode.__dict__[method.__name__]
48 for test_node in self.all_nodes():
49 if not node_method(test_node, *args, **kwds): overall=False
51 # maintain __name__ for ignore_result
52 map_on_nodes.__name__=method.__name__
53 # restore the doc text
54 map_on_nodes.__doc__=TestNode.__dict__[method.__name__].__doc__
57 def slice_mapper (method):
58 def map_on_slices(self):
60 slice_method = TestSlice.__dict__[method.__name__]
61 for slice_spec in self.plc_spec['slices']:
62 site_spec = self.locate_site (slice_spec['sitename'])
63 test_site = TestSite(self,site_spec)
64 test_slice=TestSlice(self,test_site,slice_spec)
65 if not slice_method(test_slice,self.options): overall=False
67 # maintain __name__ for ignore_result
68 map_on_slices.__name__=method.__name__
69 # restore the doc text
70 map_on_slices.__doc__=TestSlice.__dict__[method.__name__].__doc__
73 # run a step but return True so that we can go on
74 def ignore_result (method):
76 # ssh_slice_ignore->ssh_slice
77 ref_name=method.__name__.replace('_ignore','').replace('force_','')
78 ref_method=TestPlc.__dict__[ref_name]
79 result=ref_method(self)
80 print "Actual (but ignored) result for %(ref_name)s is %(result)s"%locals()
81 return Ignored (result)
82 name=method.__name__.replace('_ignore','').replace('force_','')
83 ignoring.__name__=name
84 ignoring.__doc__="ignored version of " + name
87 # a variant that expects the TestSlice method to return a list of CompleterTasks that
88 # are then merged into a single Completer run to avoid wating for all the slices
89 # esp. useful when a test fails of course
90 # because we need to pass arguments we use a class instead..
91 class slice_mapper__tasks (object):
92 # could not get this to work with named arguments
93 def __init__ (self,timeout_minutes,silent_minutes,period_seconds):
94 self.timeout=timedelta(minutes=timeout_minutes)
95 self.silent=timedelta(minutes=silent_minutes)
96 self.period=timedelta(seconds=period_seconds)
97 def __call__ (self, method):
99 # compute augmented method name
100 method_name = method.__name__ + "__tasks"
101 # locate in TestSlice
102 slice_method = TestSlice.__dict__[ method_name ]
105 for slice_spec in self.plc_spec['slices']:
106 site_spec = self.locate_site (slice_spec['sitename'])
107 test_site = TestSite(self,site_spec)
108 test_slice=TestSlice(self,test_site,slice_spec)
109 tasks += slice_method (test_slice, self.options)
110 return Completer (tasks, message=method.__name__).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
111 # restore the doc text from the TestSlice method even if a bit odd
112 wrappee.__name__ = method.__name__
113 wrappee.__doc__ = slice_method.__doc__
116 def auth_sfa_mapper (method):
119 auth_method = TestAuthSfa.__dict__[method.__name__]
120 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
121 test_auth=TestAuthSfa(self,auth_spec)
122 if not auth_method(test_auth,self.options): overall=False
124 # restore the doc text
125 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
129 def __init__ (self,result):
139 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
140 'plc_install', 'plc_configure', 'plc_start', SEP,
141 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
142 'plcapi_urls','speed_up_slices', SEP,
143 # noinitscript branch: 'initscripts',
144 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
145 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
146 # keep this our of the way for now
147 'check_vsys_defaults_ignore', SEP,
148 # run this first off so it's easier to re-run on another qemu box
149 'qemu_kill_mine', SEP,
150 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
151 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP,
152 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
153 'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
154 'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
155 'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
156 'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
157 'sfa_discover@1', 'sfa_rspec@1', 'sfa_allocate@1', 'sfa_provision@1', SEPSFA,
158 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
159 'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
160 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
161 # but as the stress test might take a while, we sometimes missed the debug mode..
162 'probe_kvm_iptables',
163 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
164 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics',
165 # noinitscript branch: 'check_initscripts_ignore',
167 'ssh_slice_sfa@1', SEPSFA,
168 'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1','sfa_check_slice_plc_empty@1', SEPSFA,
169 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
170 'cross_check_tcp@1', 'check_system_slice', SEP,
171 # for inspecting the slice while it runs the first time
173 # check slices are turned off properly
174 'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
175 # check they are properly re-created with the same name
176 'fill_slices', 'ssh_slice_again', SEP,
177 'gather_logs_force', SEP,
180 'export', 'show_boxes', 'super_speed_up_slices', SEP,
181 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
182 # noinitscript branch: 'delete_initscripts',
183 'delete_nodegroups','delete_all_sites', SEP,
184 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
185 'delete_leases', 'list_leases', SEP,
187 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
188 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
189 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
190 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
191 'sfa_get_expires', SEPSFA,
192 'plc_db_dump' , 'plc_db_restore', SEP,
193 'check_netflow','check_drl', SEP,
194 'debug_nodemanager', 'slice_fs_present', SEP,
195 'standby_1_through_20','yes','no',SEP,
199 def printable_steps (list):
200 single_line=" ".join(list)+" "
201 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
203 def valid_step (step):
204 return step != SEP and step != SEPSFA
206 # turn off the sfa-related steps when build has skipped SFA
207 # this was originally for centos5 but is still valid
208 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
210 def _has_sfa_cached (rpms_url):
211 if os.path.isfile(has_sfa_cache_filename):
212 cached=file(has_sfa_cache_filename).read()=="yes"
213 utils.header("build provides SFA (cached):%s"%cached)
215 # warning, we're now building 'sface' so let's be a bit more picky
216 # full builds are expected to return with 0 here
217 utils.header ("Checking if build provides SFA package...")
218 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)==0
219 encoded='yes' if retcod else 'no'
220 file(has_sfa_cache_filename,'w').write(encoded)
224 def check_whether_build_has_sfa (rpms_url):
225 has_sfa=TestPlc._has_sfa_cached(rpms_url)
227 utils.header("build does provide SFA")
229 # move all steps containing 'sfa' from default_steps to other_steps
230 utils.header("SFA package not found - removing steps with sfa or sfi")
231 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
232 TestPlc.other_steps += sfa_steps
233 for step in sfa_steps: TestPlc.default_steps.remove(step)
235 def __init__ (self,plc_spec,options):
236 self.plc_spec=plc_spec
238 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
239 self.vserverip=plc_spec['vserverip']
240 self.vservername=plc_spec['vservername']
241 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
242 self.apiserver=TestApiserver(self.url,options.dry_run)
243 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
244 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
246 def has_addresses_api (self):
247 return self.apiserver.has_method('AddIpAddress')
250 name=self.plc_spec['name']
251 return "%s.%s"%(name,self.vservername)
254 return self.plc_spec['host_box']
257 return self.test_ssh.is_local()
259 # define the API methods on this object through xmlrpc
260 # would help, but not strictly necessary
264 def actual_command_in_guest (self,command, backslash=False):
265 raw1=self.host_to_guest(command)
266 raw2=self.test_ssh.actual_command(raw1,dry_run=self.options.dry_run, backslash=backslash)
269 def start_guest (self):
270 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),dry_run=self.options.dry_run))
272 def stop_guest (self):
273 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),dry_run=self.options.dry_run))
275 def run_in_guest (self,command,backslash=False):
276 raw=self.actual_command_in_guest(command,backslash)
277 return utils.system(raw)
279 def run_in_host (self,command):
280 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
282 # backslashing turned out so awful at some point that I've turned off auto-backslashing
283 # see e.g. plc_start esp. the version for f14
284 #command gets run in the plc's vm
285 def host_to_guest(self,command):
286 vservername=self.vservername
287 personality=self.options.personality
288 raw="%(personality)s virsh -c lxc:/// lxc-enter-namespace %(vservername)s"%locals()
289 # f14 still needs some extra help
290 if self.options.fcdistro == 'f14':
291 raw +=" -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %(command)s" %locals()
293 raw +=" -- /usr/bin/env %(command)s"%locals()
296 # this /vservers thing is legacy...
297 def vm_root_in_host(self):
298 return "/vservers/%s/"%(self.vservername)
300 def vm_timestamp_path (self):
301 return "/vservers/%s/%s.timestamp"%(self.vservername,self.vservername)
303 #start/stop the vserver
304 def start_guest_in_host(self):
305 return "virsh -c lxc:/// start %s"%(self.vservername)
307 def stop_guest_in_host(self):
308 return "virsh -c lxc:/// destroy %s"%(self.vservername)
311 def run_in_guest_piped (self,local,remote):
312 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
314 def yum_check_installed (self, rpms):
315 if isinstance (rpms, list):
317 return self.run_in_guest("rpm -q %s"%rpms)==0
319 # does a yum install in the vs, ignore yum retcod, check with rpm
320 def yum_install (self, rpms):
321 if isinstance (rpms, list):
323 self.run_in_guest("yum -y install %s"%rpms)
324 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
325 self.run_in_guest("yum-complete-transaction -y")
326 return self.yum_check_installed (rpms)
328 def auth_root (self):
329 return {'Username':self.plc_spec['settings']['PLC_ROOT_USER'],
330 'AuthMethod':'password',
331 'AuthString':self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
332 'Role' : self.plc_spec['role']
334 def locate_site (self,sitename):
335 for site in self.plc_spec['sites']:
336 if site['site_fields']['name'] == sitename:
338 if site['site_fields']['login_base'] == sitename:
340 raise Exception,"Cannot locate site %s"%sitename
342 def locate_node (self,nodename):
343 for site in self.plc_spec['sites']:
344 for node in site['nodes']:
345 if node['name'] == nodename:
347 raise Exception,"Cannot locate node %s"%nodename
349 def locate_hostname (self,hostname):
350 for site in self.plc_spec['sites']:
351 for node in site['nodes']:
352 if node['node_fields']['hostname'] == hostname:
354 raise Exception,"Cannot locate hostname %s"%hostname
356 def locate_key (self,key_name):
357 for key in self.plc_spec['keys']:
358 if key['key_name'] == key_name:
360 raise Exception,"Cannot locate key %s"%key_name
362 def locate_private_key_from_key_names (self, key_names):
363 # locate the first avail. key
365 for key_name in key_names:
366 key_spec=self.locate_key(key_name)
367 test_key=TestKey(self,key_spec)
368 publickey=test_key.publicpath()
369 privatekey=test_key.privatepath()
370 if os.path.isfile(publickey) and os.path.isfile(privatekey):
372 if found: return privatekey
375 def locate_slice (self, slicename):
376 for slice in self.plc_spec['slices']:
377 if slice['slice_fields']['name'] == slicename:
379 raise Exception,"Cannot locate slice %s"%slicename
381 def all_sliver_objs (self):
383 for slice_spec in self.plc_spec['slices']:
384 slicename = slice_spec['slice_fields']['name']
385 for nodename in slice_spec['nodenames']:
386 result.append(self.locate_sliver_obj (nodename,slicename))
389 def locate_sliver_obj (self,nodename,slicename):
390 (site,node) = self.locate_node(nodename)
391 slice = self.locate_slice (slicename)
393 test_site = TestSite (self, site)
394 test_node = TestNode (self, test_site,node)
395 # xxx the slice site is assumed to be the node site - mhh - probably harmless
396 test_slice = TestSlice (self, test_site, slice)
397 return TestSliver (self, test_node, test_slice)
399 def locate_first_node(self):
400 nodename=self.plc_spec['slices'][0]['nodenames'][0]
401 (site,node) = self.locate_node(nodename)
402 test_site = TestSite (self, site)
403 test_node = TestNode (self, test_site,node)
406 def locate_first_sliver (self):
407 slice_spec=self.plc_spec['slices'][0]
408 slicename=slice_spec['slice_fields']['name']
409 nodename=slice_spec['nodenames'][0]
410 return self.locate_sliver_obj(nodename,slicename)
412 # all different hostboxes used in this plc
413 def get_BoxNodes(self):
414 # maps on sites and nodes, return [ (host_box,test_node) ]
416 for site_spec in self.plc_spec['sites']:
417 test_site = TestSite (self,site_spec)
418 for node_spec in site_spec['nodes']:
419 test_node = TestNode (self, test_site, node_spec)
420 if not test_node.is_real():
421 tuples.append( (test_node.host_box(),test_node) )
422 # transform into a dict { 'host_box' -> [ test_node .. ] }
424 for (box,node) in tuples:
425 if not result.has_key(box):
428 result[box].append(node)
431 # a step for checking this stuff
432 def show_boxes (self):
433 'print summary of nodes location'
434 for (box,nodes) in self.get_BoxNodes().iteritems():
435 print box,":"," + ".join( [ node.name() for node in nodes ] )
438 # make this a valid step
439 def qemu_kill_all(self):
440 'kill all qemu instances on the qemu boxes involved by this setup'
441 # this is the brute force version, kill all qemus on that host box
442 for (box,nodes) in self.get_BoxNodes().iteritems():
443 # pass the first nodename, as we don't push template-qemu on testboxes
444 nodedir=nodes[0].nodedir()
445 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
448 # make this a valid step
449 def qemu_list_all(self):
450 'list all qemu instances on the qemu boxes involved by this setup'
451 for (box,nodes) in self.get_BoxNodes().iteritems():
452 # this is the brute force version, kill all qemus on that host box
453 TestBoxQemu(box,self.options.buildname).qemu_list_all()
456 # kill only the qemus related to this test
457 def qemu_list_mine(self):
458 'list qemu instances for our nodes'
459 for (box,nodes) in self.get_BoxNodes().iteritems():
460 # the fine-grain version
465 # kill only the qemus related to this test
466 def qemu_clean_mine(self):
467 'cleanup (rm -rf) qemu instances for our nodes'
468 for (box,nodes) in self.get_BoxNodes().iteritems():
469 # the fine-grain version
474 # kill only the right qemus
475 def qemu_kill_mine(self):
476 'kill the qemu instances for our nodes'
477 for (box,nodes) in self.get_BoxNodes().iteritems():
478 # the fine-grain version
483 #################### display config
485 "show test configuration after localization"
490 # uggly hack to make sure 'run export' only reports about the 1st plc
491 # to avoid confusion - also we use 'inri_slice1' in various aliases..
494 "print cut'n paste-able stuff to export env variables to your shell"
495 # guess local domain from hostname
496 if TestPlc.exported_id>1:
497 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
499 TestPlc.exported_id+=1
500 domain=socket.gethostname().split('.',1)[1]
501 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
502 print "export BUILD=%s"%self.options.buildname
503 print "export PLCHOSTLXC=%s"%fqdn
504 print "export GUESTNAME=%s"%self.plc_spec['vservername']
505 vplcname=self.plc_spec['vservername'].split('-')[-1]
506 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
507 # find hostname of first node
508 (hostname,qemubox) = self.all_node_infos()[0]
509 print "export KVMHOST=%s.%s"%(qemubox,domain)
510 print "export NODE=%s"%(hostname)
514 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
515 def show_pass (self,passno):
516 for (key,val) in self.plc_spec.iteritems():
517 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
521 self.display_site_spec(site)
522 for node in site['nodes']:
523 self.display_node_spec(node)
524 elif key=='initscripts':
525 for initscript in val:
526 self.display_initscript_spec (initscript)
529 self.display_slice_spec (slice)
532 self.display_key_spec (key)
534 if key not in ['sites','initscripts','slices','keys']:
535 print '+ ',key,':',val
537 def display_site_spec (self,site):
538 print '+ ======== site',site['site_fields']['name']
539 for (k,v) in site.iteritems():
540 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
543 print '+ ','nodes : ',
545 print node['node_fields']['hostname'],'',
551 print user['name'],'',
553 elif k == 'site_fields':
554 print '+ login_base',':',v['login_base']
555 elif k == 'address_fields':
561 def display_initscript_spec (self,initscript):
562 print '+ ======== initscript',initscript['initscript_fields']['name']
564 def display_key_spec (self,key):
565 print '+ ======== key',key['key_name']
567 def display_slice_spec (self,slice):
568 print '+ ======== slice',slice['slice_fields']['name']
569 for (k,v) in slice.iteritems():
582 elif k=='slice_fields':
583 print '+ fields',':',
584 print 'max_nodes=',v['max_nodes'],
589 def display_node_spec (self,node):
590 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
591 print "hostname=",node['node_fields']['hostname'],
592 print "ip=",node['interface_fields']['ip']
593 if self.options.verbose:
594 utils.pprint("node details",node,depth=3)
596 # another entry point for just showing the boxes involved
597 def display_mapping (self):
598 TestPlc.display_mapping_plc(self.plc_spec)
602 def display_mapping_plc (plc_spec):
603 print '+ MyPLC',plc_spec['name']
604 # WARNING this would not be right for lxc-based PLC's - should be harmless though
605 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
606 print '+\tIP = %s/%s'%(plc_spec['settings']['PLC_API_HOST'],plc_spec['vserverip'])
607 for site_spec in plc_spec['sites']:
608 for node_spec in site_spec['nodes']:
609 TestPlc.display_mapping_node(node_spec)
612 def display_mapping_node (node_spec):
613 print '+ NODE %s'%(node_spec['name'])
614 print '+\tqemu box %s'%node_spec['host_box']
615 print '+\thostname=%s'%node_spec['node_fields']['hostname']
617 # write a timestamp in /vservers/<>.timestamp
618 # cannot be inside the vserver, that causes vserver .. build to cough
619 def plcvm_timestamp (self):
620 "Create a timestamp to remember creation date for this plc"
622 # TODO-lxc check this one
623 # a first approx. is to store the timestamp close to the VM root like vs does
624 stamp_path=self.vm_timestamp_path ()
625 stamp_dir = os.path.dirname (stamp_path)
626 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
627 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
629 # this is called inconditionnally at the beginning of the test sequence
630 # just in case this is a rerun, so if the vm is not running it's fine
631 def plcvm_delete(self):
632 "vserver delete the test myplc"
633 stamp_path=self.vm_timestamp_path()
634 self.run_in_host("rm -f %s"%stamp_path)
635 self.run_in_host("virsh -c lxc:// destroy %s"%self.vservername)
636 self.run_in_host("virsh -c lxc:// undefine %s"%self.vservername)
637 self.run_in_host("rm -fr /vservers/%s"%self.vservername)
641 # historically the build was being fetched by the tests
642 # now the build pushes itself as a subdir of the tests workdir
643 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
644 def plcvm_create (self):
645 "vserver creation (no install done)"
646 # push the local build/ dir to the testplc box
648 # a full path for the local calls
649 build_dir=os.path.dirname(sys.argv[0])
650 # sometimes this is empty - set to "." in such a case
651 if not build_dir: build_dir="."
652 build_dir += "/build"
654 # use a standard name - will be relative to remote buildname
656 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
657 self.test_ssh.rmdir(build_dir)
658 self.test_ssh.copy(build_dir,recursive=True)
659 # the repo url is taken from arch-rpms-url
660 # with the last step (i386) removed
661 repo_url = self.options.arch_rpms_url
662 for level in [ 'arch' ]:
663 repo_url = os.path.dirname(repo_url)
665 # invoke initvm (drop support for vs)
666 script="lbuild-initvm.sh"
668 # pass the vbuild-nightly options to [lv]test-initvm
669 script_options += " -p %s"%self.options.personality
670 script_options += " -d %s"%self.options.pldistro
671 script_options += " -f %s"%self.options.fcdistro
672 script_options += " -r %s"%repo_url
673 vserver_name = self.vservername
675 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
676 script_options += " -n %s"%vserver_hostname
678 print "Cannot reverse lookup %s"%self.vserverip
679 print "This is considered fatal, as this might pollute the test results"
681 create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s"%locals()
682 return self.run_in_host(create_vserver) == 0
685 def plc_install(self):
686 "yum install myplc, noderepo, and the plain bootstrapfs"
688 # workaround for getting pgsql8.2 on centos5
689 if self.options.fcdistro == "centos5":
690 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
693 if self.options.personality == "linux32":
695 elif self.options.personality == "linux64":
698 raise Exception, "Unsupported personality %r"%self.options.personality
699 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
702 pkgs_list.append ("slicerepo-%s"%nodefamily)
703 pkgs_list.append ("myplc")
704 pkgs_list.append ("noderepo-%s"%nodefamily)
705 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
706 pkgs_string=" ".join(pkgs_list)
707 return self.yum_install (pkgs_list)
710 def mod_python(self):
711 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
712 return self.yum_install ( [ 'mod_python' ] )
715 def plc_configure(self):
717 tmpname='%s.plc-config-tty'%(self.name())
718 fileconf=open(tmpname,'w')
719 for (var,value) in self.plc_spec['settings'].iteritems():
720 fileconf.write ('e %s\n%s\n'%(var,value))
721 fileconf.write('w\n')
722 fileconf.write('q\n')
724 utils.system('cat %s'%tmpname)
725 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
726 utils.system('rm %s'%tmpname)
729 # f14 is a bit odd in this respect, although this worked fine in guests up to f18
730 # however using a vplc guest under f20 requires this trick
731 # the symptom is this: service plc start
732 # Starting plc (via systemctl): Failed to get D-Bus connection: \
733 # Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
734 # weird thing is the doc says f14 uses upstart by default and not systemd
735 # so this sounds kind of harmless
736 def start_service (self,service): return self.start_stop_service (service,'start')
737 def stop_service (self,service): return self.start_stop_service (service,'stop')
739 def start_stop_service (self, service,start_or_stop):
740 "utility to start/stop a service with the special trick for f14"
741 if self.options.fcdistro != 'f14':
742 return self.run_in_guest ("service %s %s"%(service,start_or_stop))==0
744 # patch /sbin/service so it does not reset environment
745 self.run_in_guest ('sed -i -e \\"s,env -i,env,\\" /sbin/service')
746 # this is because our own scripts in turn call service
747 return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service %s %s"%(service,start_or_stop))==0
751 return self.start_service ('plc')
755 return self.stop_service ('plc')
757 def plcvm_start (self):
758 "start the PLC vserver"
762 def plcvm_stop (self):
763 "stop the PLC vserver"
767 # stores the keys from the config for further use
768 def keys_store(self):
769 "stores test users ssh keys in keys/"
770 for key_spec in self.plc_spec['keys']:
771 TestKey(self,key_spec).store_key()
774 def keys_clean(self):
775 "removes keys cached in keys/"
776 utils.system("rm -rf ./keys")
779 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
780 # for later direct access to the nodes
781 def keys_fetch(self):
782 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
784 if not os.path.isdir(dir):
786 vservername=self.vservername
787 vm_root=self.vm_root_in_host()
789 prefix = 'debug_ssh_key'
790 for ext in [ 'pub', 'rsa' ] :
791 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
792 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
793 if self.test_ssh.fetch(src,dst) != 0: overall=False
797 "create sites with PLCAPI"
798 return self.do_sites()
800 def delete_sites (self):
801 "delete sites with PLCAPI"
802 return self.do_sites(action="delete")
804 def do_sites (self,action="add"):
805 for site_spec in self.plc_spec['sites']:
806 test_site = TestSite (self,site_spec)
807 if (action != "add"):
808 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
809 test_site.delete_site()
810 # deleted with the site
811 #test_site.delete_users()
814 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
815 test_site.create_site()
816 test_site.create_users()
819 def delete_all_sites (self):
820 "Delete all sites in PLC, and related objects"
821 print 'auth_root',self.auth_root()
822 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
824 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
825 if site['login_base']==self.plc_spec['settings']['PLC_SLICE_PREFIX']: continue
826 site_id=site['site_id']
827 print 'Deleting site_id',site_id
828 self.apiserver.DeleteSite(self.auth_root(),site_id)
832 "create nodes with PLCAPI"
833 return self.do_nodes()
834 def delete_nodes (self):
835 "delete nodes with PLCAPI"
836 return self.do_nodes(action="delete")
838 def do_nodes (self,action="add"):
839 for site_spec in self.plc_spec['sites']:
840 test_site = TestSite (self,site_spec)
842 utils.header("Deleting nodes in site %s"%test_site.name())
843 for node_spec in site_spec['nodes']:
844 test_node=TestNode(self,test_site,node_spec)
845 utils.header("Deleting %s"%test_node.name())
846 test_node.delete_node()
848 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
849 for node_spec in site_spec['nodes']:
850 utils.pprint('Creating node %s'%node_spec,node_spec)
851 test_node = TestNode (self,test_site,node_spec)
852 test_node.create_node ()
855 def nodegroups (self):
856 "create nodegroups with PLCAPI"
857 return self.do_nodegroups("add")
858 def delete_nodegroups (self):
859 "delete nodegroups with PLCAPI"
860 return self.do_nodegroups("delete")
864 def translate_timestamp (start,grain,timestamp):
865 if timestamp < TestPlc.YEAR: return start+timestamp*grain
866 else: return timestamp
869 def timestamp_printable (timestamp):
870 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
873 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
875 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
876 print 'API answered grain=',grain
877 start=(now/grain)*grain
879 # find out all nodes that are reservable
880 nodes=self.all_reservable_nodenames()
882 utils.header ("No reservable node found - proceeding without leases")
885 # attach them to the leases as specified in plc_specs
886 # this is where the 'leases' field gets interpreted as relative of absolute
887 for lease_spec in self.plc_spec['leases']:
888 # skip the ones that come with a null slice id
889 if not lease_spec['slice']: continue
890 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
891 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
892 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
893 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
894 if lease_addition['errors']:
895 utils.header("Cannot create leases, %s"%lease_addition['errors'])
898 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
899 (nodes,lease_spec['slice'],
900 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
901 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
905 def delete_leases (self):
906 "remove all leases in the myplc side"
907 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
908 utils.header("Cleaning leases %r"%lease_ids)
909 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
912 def list_leases (self):
913 "list all leases known to the myplc"
914 leases = self.apiserver.GetLeases(self.auth_root())
917 current=l['t_until']>=now
918 if self.options.verbose or current:
919 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
920 TestPlc.timestamp_printable(l['t_from']),
921 TestPlc.timestamp_printable(l['t_until'])))
924 # create nodegroups if needed, and populate
925 def do_nodegroups (self, action="add"):
926 # 1st pass to scan contents
928 for site_spec in self.plc_spec['sites']:
929 test_site = TestSite (self,site_spec)
930 for node_spec in site_spec['nodes']:
931 test_node=TestNode (self,test_site,node_spec)
932 if node_spec.has_key('nodegroups'):
933 nodegroupnames=node_spec['nodegroups']
934 if isinstance(nodegroupnames,StringTypes):
935 nodegroupnames = [ nodegroupnames ]
936 for nodegroupname in nodegroupnames:
937 if not groups_dict.has_key(nodegroupname):
938 groups_dict[nodegroupname]=[]
939 groups_dict[nodegroupname].append(test_node.name())
940 auth=self.auth_root()
942 for (nodegroupname,group_nodes) in groups_dict.iteritems():
944 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
945 # first, check if the nodetagtype is here
946 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
948 tag_type_id = tag_types[0]['tag_type_id']
950 tag_type_id = self.apiserver.AddTagType(auth,
951 {'tagname':nodegroupname,
952 'description': 'for nodegroup %s'%nodegroupname,
954 print 'located tag (type)',nodegroupname,'as',tag_type_id
956 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
958 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
959 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
960 # set node tag on all nodes, value='yes'
961 for nodename in group_nodes:
963 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
965 traceback.print_exc()
966 print 'node',nodename,'seems to already have tag',nodegroupname
969 expect_yes = self.apiserver.GetNodeTags(auth,
970 {'hostname':nodename,
971 'tagname':nodegroupname},
972 ['value'])[0]['value']
973 if expect_yes != "yes":
974 print 'Mismatch node tag on node',nodename,'got',expect_yes
977 if not self.options.dry_run:
978 print 'Cannot find tag',nodegroupname,'on node',nodename
982 print 'cleaning nodegroup',nodegroupname
983 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
985 traceback.print_exc()
989 # a list of TestNode objs
990 def all_nodes (self):
992 for site_spec in self.plc_spec['sites']:
993 test_site = TestSite (self,site_spec)
994 for node_spec in site_spec['nodes']:
995 nodes.append(TestNode (self,test_site,node_spec))
998 # return a list of tuples (nodename,qemuname)
999 def all_node_infos (self) :
1001 for site_spec in self.plc_spec['sites']:
1002 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
1003 for node_spec in site_spec['nodes'] ]
1006 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
1007 def all_reservable_nodenames (self):
1009 for site_spec in self.plc_spec['sites']:
1010 for node_spec in site_spec['nodes']:
1011 node_fields=node_spec['node_fields']
1012 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
1013 res.append(node_fields['hostname'])
1016 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1017 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
1018 if self.options.dry_run:
1022 class CompleterTaskBootState (CompleterTask):
1023 def __init__ (self, test_plc,hostname):
1024 self.test_plc=test_plc
1025 self.hostname=hostname
1026 self.last_boot_state='undef'
1027 def actual_run (self):
1029 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
1031 self.last_boot_state = node['boot_state']
1032 return self.last_boot_state == target_boot_state
1036 return "CompleterTaskBootState with node %s"%self.hostname
1037 def failure_epilogue (self):
1038 print "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
1040 timeout = timedelta(minutes=timeout_minutes)
1041 graceout = timedelta(minutes=silent_minutes)
1042 period = timedelta(seconds=period_seconds)
1043 # the nodes that haven't checked yet - start with a full list and shrink over time
1044 utils.header("checking nodes boot state (expected %s)"%target_boot_state)
1045 tasks = [ CompleterTaskBootState (self,hostname) \
1046 for (hostname,_) in self.all_node_infos() ]
1047 message = 'check_boot_state={}'.format(target_boot_state)
1048 return Completer (tasks, message=message).run (timeout, graceout, period)
1050 def nodes_booted(self):
1051 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
1053 def probe_kvm_iptables (self):
1054 (_,kvmbox) = self.all_node_infos()[0]
1055 TestSsh(kvmbox).run("iptables-save")
1059 def check_nodes_ping(self, timeout_seconds=240, period_seconds=10):
1060 class CompleterTaskPingNode(CompleterTask):
1061 def __init__ (self, hostname):
1062 self.hostname=hostname
1063 def run(self, silent):
1064 command="ping -c 1 -w 1 %s >& /dev/null"%self.hostname
1065 return utils.system (command, silent=silent)==0
1066 def failure_epilogue (self):
1067 print "Cannot ping node with name %s"%self.hostname
1068 timeout=timedelta (seconds=timeout_seconds)
1070 period=timedelta (seconds=period_seconds)
1071 node_infos = self.all_node_infos()
1072 tasks = [ CompleterTaskPingNode (h) for (h,_) in node_infos ]
1073 return Completer (tasks, message='ping_node').run (timeout, graceout, period)
1075 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1076 def ping_node (self):
1078 return self.check_nodes_ping ()
1080 def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
1082 timeout = timedelta(minutes=timeout_minutes)
1083 graceout = timedelta(minutes=silent_minutes)
1084 period = timedelta(seconds=period_seconds)
1085 vservername=self.vservername
1088 completer_message = 'ssh_node_debug'
1089 local_key = "keys/%(vservername)s-debug.rsa"%locals()
1092 completer_message = 'ssh_node_boot'
1093 local_key = "keys/key_admin.rsa"
1094 utils.header("checking ssh access to nodes (expected in %s mode)"%message)
1095 node_infos = self.all_node_infos()
1096 tasks = [ CompleterTaskNodeSsh (nodename, qemuname, local_key, boot_state=message) \
1097 for (nodename,qemuname) in node_infos ]
1098 return Completer (tasks, message=completer_message).run (timeout, graceout, period)
1100 def ssh_node_debug(self):
1101 "Tries to ssh into nodes in debug mode with the debug ssh key"
1102 return self.check_nodes_ssh(debug=True,
1103 timeout_minutes=self.ssh_node_debug_timeout,
1104 silent_minutes=self.ssh_node_debug_silent)
1106 def ssh_node_boot(self):
1107 "Tries to ssh into nodes in production mode with the root ssh key"
1108 return self.check_nodes_ssh(debug=False,
1109 timeout_minutes=self.ssh_node_boot_timeout,
1110 silent_minutes=self.ssh_node_boot_silent)
1112 def node_bmlogs(self):
1113 "Checks that there's a non-empty dir. /var/log/bm/raw"
1114 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
1117 def qemu_local_init (self): pass
1119 def bootcd (self): pass
1121 def qemu_local_config (self): pass
1123 def nodestate_reinstall (self): pass
1125 def nodestate_safeboot (self): pass
1127 def nodestate_boot (self): pass
1129 def nodestate_show (self): pass
1131 def qemu_export (self): pass
1133 ### check hooks : invoke scripts from hooks/{node,slice}
1134 def check_hooks_node (self):
1135 return self.locate_first_node().check_hooks()
1136 def check_hooks_sliver (self) :
1137 return self.locate_first_sliver().check_hooks()
1139 def check_hooks (self):
1140 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1141 return self.check_hooks_node() and self.check_hooks_sliver()
1144 def do_check_initscripts(self):
1145 class CompleterTaskInitscript (CompleterTask):
1146 def __init__ (self, test_sliver, stamp):
1147 self.test_sliver=test_sliver
1149 def actual_run (self):
1150 return self.test_sliver.check_initscript_stamp (self.stamp)
1152 return "initscript checker for %s"%self.test_sliver.name()
1153 def failure_epilogue (self):
1154 print "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
1157 for slice_spec in self.plc_spec['slices']:
1158 if not slice_spec.has_key('initscriptstamp'):
1160 stamp=slice_spec['initscriptstamp']
1161 slicename=slice_spec['slice_fields']['name']
1162 for nodename in slice_spec['nodenames']:
1163 print 'nodename',nodename,'slicename',slicename,'stamp',stamp
1164 (site,node) = self.locate_node (nodename)
1165 # xxx - passing the wrong site - probably harmless
1166 test_site = TestSite (self,site)
1167 test_slice = TestSlice (self,test_site,slice_spec)
1168 test_node = TestNode (self,test_site,node)
1169 test_sliver = TestSliver (self, test_node, test_slice)
1170 tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
1171 return Completer (tasks, message='check_initscripts').run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1173 def check_initscripts(self):
1174 "check that the initscripts have triggered"
1175 return self.do_check_initscripts()
1177 def initscripts (self):
1178 "create initscripts with PLCAPI"
1179 for initscript in self.plc_spec['initscripts']:
1180 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1181 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1184 def delete_initscripts (self):
1185 "delete initscripts with PLCAPI"
1186 for initscript in self.plc_spec['initscripts']:
1187 initscript_name = initscript['initscript_fields']['name']
1188 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1190 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1191 print initscript_name,'deleted'
1193 print 'deletion went wrong - probably did not exist'
1198 "create slices with PLCAPI"
1199 return self.do_slices(action="add")
1201 def delete_slices (self):
1202 "delete slices with PLCAPI"
1203 return self.do_slices(action="delete")
1205 def fill_slices (self):
1206 "add nodes in slices with PLCAPI"
1207 return self.do_slices(action="fill")
1209 def empty_slices (self):
1210 "remove nodes from slices with PLCAPI"
1211 return self.do_slices(action="empty")
1213 def do_slices (self, action="add"):
1214 for slice in self.plc_spec['slices']:
1215 site_spec = self.locate_site (slice['sitename'])
1216 test_site = TestSite(self,site_spec)
1217 test_slice=TestSlice(self,test_site,slice)
1218 if action == "delete":
1219 test_slice.delete_slice()
1220 elif action=="fill":
1221 test_slice.add_nodes()
1222 elif action=="empty":
1223 test_slice.delete_nodes()
1225 test_slice.create_slice()
1228 @slice_mapper__tasks(20,10,15)
1229 def ssh_slice(self): pass
1230 @slice_mapper__tasks(20,19,15)
1231 def ssh_slice_off (self): pass
1232 @slice_mapper__tasks(1,1,15)
1233 def slice_fs_present(self): pass
1234 @slice_mapper__tasks(1,1,15)
1235 def slice_fs_deleted(self): pass
1237 # use another name so we can exclude/ignore it from the tests on the nightly command line
1238 def ssh_slice_again(self): return self.ssh_slice()
1239 # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1240 # but for some reason the ignore-wrapping thing would not
1243 def ssh_slice_basics(self): pass
1245 def check_vsys_defaults(self): pass
1248 def keys_clear_known_hosts (self): pass
1250 def plcapi_urls (self):
1251 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1253 def speed_up_slices (self):
1254 "tweak nodemanager cycle (wait time) to 30+/-10 s"
1255 return self._speed_up_slices (30,10)
1256 def super_speed_up_slices (self):
1257 "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
1258 return self._speed_up_slices (5,1)
1260 def _speed_up_slices (self, p, r):
1261 # create the template on the server-side
1262 template="%s.nodemanager"%self.name()
1263 template_file = open (template,"w")
1264 template_file.write('OPTIONS="-p %s -r %s -d"\n'%(p,r))
1265 template_file.close()
1266 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1267 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1268 self.test_ssh.copy_abs(template,remote)
1270 if not self.apiserver.GetConfFiles (self.auth_root(),
1271 {'dest':'/etc/sysconfig/nodemanager'}):
1272 self.apiserver.AddConfFile (self.auth_root(),
1273 {'dest':'/etc/sysconfig/nodemanager',
1274 'source':'PlanetLabConf/nodemanager',
1275 'postinstall_cmd':'service nm restart',})
1278 def debug_nodemanager (self):
1279 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1280 template="%s.nodemanager"%self.name()
1281 template_file = open (template,"w")
1282 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1283 template_file.close()
1284 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1285 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1286 self.test_ssh.copy_abs(template,remote)
1290 def qemu_start (self) : pass
1293 def qemu_timestamp (self) : pass
1295 # when a spec refers to a node possibly on another plc
1296 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1297 for plc in [ self ] + other_plcs:
1299 return plc.locate_sliver_obj (nodename, slicename)
1302 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1304 # implement this one as a cross step so that we can take advantage of different nodes
1305 # in multi-plcs mode
1306 def cross_check_tcp (self, other_plcs):
1307 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1308 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1309 utils.header ("check_tcp: no/empty config found")
1311 specs = self.plc_spec['tcp_specs']
1316 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1317 if not s_test_sliver.run_tcp_server(port,timeout=20):
1321 # idem for the client side
1322 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1323 # use nodename from locatesd sliver, unless 'client_connect' is set
1324 if 'client_connect' in spec:
1325 destination = spec['client_connect']
1327 destination=s_test_sliver.test_node.name()
1328 if not c_test_sliver.run_tcp_client(destination,port):
1332 # painfully enough, we need to allow for some time as netflow might show up last
1333 def check_system_slice (self):
1334 "all nodes: check that a system slice is alive"
1335 # netflow currently not working in the lxc distro
1336 # drl not built at all in the wtx distro
1337 # if we find either of them we're happy
1338 return self.check_netflow() or self.check_drl()
1341 def check_netflow (self): return self._check_system_slice ('netflow')
1342 def check_drl (self): return self._check_system_slice ('drl')
1344 # we have the slices up already here, so it should not take too long
1345 def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
1346 class CompleterTaskSystemSlice (CompleterTask):
1347 def __init__ (self, test_node, dry_run):
1348 self.test_node=test_node
1349 self.dry_run=dry_run
1350 def actual_run (self):
1351 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
1353 return "System slice %s @ %s"%(slicename, self.test_node.name())
1354 def failure_epilogue (self):
1355 print "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
1356 timeout = timedelta(minutes=timeout_minutes)
1357 silent = timedelta (0)
1358 period = timedelta (seconds=period_seconds)
1359 tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
1360 for test_node in self.all_nodes() ]
1361 return Completer (tasks, message='_check_system_slice') . run (timeout, silent, period)
1363 def plcsh_stress_test (self):
1364 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1365 # install the stress-test in the plc image
1366 location = "/usr/share/plc_api/plcsh_stress_test.py"
1367 remote="%s/%s"%(self.vm_root_in_host(),location)
1368 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1370 command += " -- --check"
1371 if self.options.size == 1:
1372 command += " --tiny"
1373 return ( self.run_in_guest(command) == 0)
1375 # populate runs the same utility without slightly different options
1376 # in particular runs with --preserve (dont cleanup) and without --check
1377 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1379 def sfa_install_all (self):
1380 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1381 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1383 def sfa_install_core(self):
1385 return self.yum_install ("sfa")
1387 def sfa_install_plc(self):
1388 "yum install sfa-plc"
1389 return self.yum_install("sfa-plc")
1391 def sfa_install_sfatables(self):
1392 "yum install sfa-sfatables"
1393 return self.yum_install ("sfa-sfatables")
1395 # for some very odd reason, this sometimes fails with the following symptom
1396 # # yum install sfa-client
1397 # Setting up Install Process
1399 # Downloading Packages:
1400 # Running rpm_check_debug
1401 # Running Transaction Test
1402 # Transaction Test Succeeded
1403 # Running Transaction
1404 # Transaction couldn't start:
1405 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1406 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1407 # even though in the same context I have
1408 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1409 # Filesystem Size Used Avail Use% Mounted on
1410 # /dev/hdv1 806G 264G 501G 35% /
1411 # none 16M 36K 16M 1% /tmp
1413 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1414 def sfa_install_client(self):
1415 "yum install sfa-client"
1416 first_try=self.yum_install("sfa-client")
1417 if first_try: return True
1418 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1419 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1420 utils.header("rpm_path=<<%s>>"%rpm_path)
1422 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1423 return self.yum_check_installed ("sfa-client")
1425 def sfa_dbclean(self):
1426 "thoroughly wipes off the SFA database"
1427 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1428 self.run_in_guest("sfa-nuke.py")==0 or \
1429 self.run_in_guest("sfa-nuke-plc.py")==0
1431 def sfa_fsclean(self):
1432 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1433 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1436 def sfa_plcclean(self):
1437 "cleans the PLC entries that were created as a side effect of running the script"
1439 sfa_spec=self.plc_spec['sfa']
1441 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1442 login_base=auth_sfa_spec['login_base']
1443 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1444 except: print "Site %s already absent from PLC db"%login_base
1446 for spec_name in ['pi_spec','user_spec']:
1447 user_spec=auth_sfa_spec[spec_name]
1448 username=user_spec['email']
1449 try: self.apiserver.DeletePerson(self.auth_root(),username)
1451 # this in fact is expected as sites delete their members
1452 #print "User %s already absent from PLC db"%username
1455 print "REMEMBER TO RUN sfa_import AGAIN"
1458 def sfa_uninstall(self):
1459 "uses rpm to uninstall sfa - ignore result"
1460 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1461 self.run_in_guest("rm -rf /var/lib/sfa")
1462 self.run_in_guest("rm -rf /etc/sfa")
1463 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1465 self.run_in_guest("rpm -e --noscripts sfa-plc")
1468 ### run unit tests for SFA
1469 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1470 # Running Transaction
1471 # Transaction couldn't start:
1472 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1473 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1474 # no matter how many Gbs are available on the testplc
1475 # could not figure out what's wrong, so...
1476 # if the yum install phase fails, consider the test is successful
1477 # other combinations will eventually run it hopefully
1478 def sfa_utest(self):
1479 "yum install sfa-tests and run SFA unittests"
1480 self.run_in_guest("yum -y install sfa-tests")
1481 # failed to install - forget it
1482 if self.run_in_guest("rpm -q sfa-tests")!=0:
1483 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1485 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1489 dirname="conf.%s"%self.plc_spec['name']
1490 if not os.path.isdir(dirname):
1491 utils.system("mkdir -p %s"%dirname)
1492 if not os.path.isdir(dirname):
1493 raise Exception,"Cannot create config dir for plc %s"%self.name()
1496 def conffile(self,filename):
1497 return "%s/%s"%(self.confdir(),filename)
1498 def confsubdir(self,dirname,clean,dry_run=False):
1499 subdirname="%s/%s"%(self.confdir(),dirname)
1501 utils.system("rm -rf %s"%subdirname)
1502 if not os.path.isdir(subdirname):
1503 utils.system("mkdir -p %s"%subdirname)
1504 if not dry_run and not os.path.isdir(subdirname):
1505 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1508 def conffile_clean (self,filename):
1509 filename=self.conffile(filename)
1510 return utils.system("rm -rf %s"%filename)==0
1513 def sfa_configure(self):
1514 "run sfa-config-tty"
1515 tmpname=self.conffile("sfa-config-tty")
1516 fileconf=open(tmpname,'w')
1517 for (var,value) in self.plc_spec['sfa']['settings'].iteritems():
1518 fileconf.write ('e %s\n%s\n'%(var,value))
1519 # # the way plc_config handles booleans just sucks..
1522 # if self.plc_spec['sfa'][var]: val='true'
1523 # fileconf.write ('e %s\n%s\n'%(var,val))
1524 fileconf.write('w\n')
1525 fileconf.write('R\n')
1526 fileconf.write('q\n')
1528 utils.system('cat %s'%tmpname)
1529 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1532 def aggregate_xml_line(self):
1533 port=self.plc_spec['sfa']['neighbours-port']
1534 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1535 (self.vserverip,self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'],port)
1537 def registry_xml_line(self):
1538 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1539 (self.vserverip,self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1542 # a cross step that takes all other plcs in argument
1543 def cross_sfa_configure(self, other_plcs):
1544 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1545 # of course with a single plc, other_plcs is an empty list
1548 agg_fname=self.conffile("agg.xml")
1549 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1550 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1551 utils.header ("(Over)wrote %s"%agg_fname)
1552 reg_fname=self.conffile("reg.xml")
1553 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1554 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1555 utils.header ("(Over)wrote %s"%reg_fname)
1556 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1557 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1559 def sfa_import(self):
1560 "use sfaadmin to import from plc"
1561 auth=self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1562 return self.run_in_guest('sfaadmin reg import_registry')==0
1564 def sfa_start(self):
1566 return self.start_service('sfa')
1569 def sfi_configure(self):
1570 "Create /root/sfi on the plc side for sfi client configuration"
1571 if self.options.dry_run:
1572 utils.header("DRY RUN - skipping step")
1574 sfa_spec=self.plc_spec['sfa']
1575 # cannot use auth_sfa_mapper to pass dir_name
1576 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1577 test_slice=TestAuthSfa(self,slice_spec)
1578 dir_basename=os.path.basename(test_slice.sfi_path())
1579 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1580 test_slice.sfi_configure(dir_name)
1581 # push into the remote /root/sfi area
1582 location = test_slice.sfi_path()
1583 remote="%s/%s"%(self.vm_root_in_host(),location)
1584 self.test_ssh.mkdir(remote,abs=True)
1585 # need to strip last level or remote otherwise we get an extra dir level
1586 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1590 def sfi_clean (self):
1591 "clean up /root/sfi on the plc side"
1592 self.run_in_guest("rm -rf /root/sfi")
1595 def sfa_rspec_empty(self):
1596 "expose a static empty rspec (ships with the tests module) in the sfi directory"
1597 filename="empty-rspec.xml"
1599 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1600 test_slice=TestAuthSfa(self,slice_spec)
1601 in_vm = test_slice.sfi_path()
1602 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1603 if self.test_ssh.copy_abs (filename, remote) !=0: overall=False
1607 def sfa_register_site (self): pass
1609 def sfa_register_pi (self): pass
1611 def sfa_register_user(self): pass
1613 def sfa_update_user(self): pass
1615 def sfa_register_slice(self): pass
1617 def sfa_renew_slice(self): pass
1619 def sfa_get_expires(self): pass
1621 def sfa_discover(self): pass
1623 def sfa_rspec(self): pass
1625 def sfa_allocate(self): pass
1627 def sfa_allocate_empty(self): pass
1629 def sfa_provision(self): pass
1631 def sfa_provision_empty(self): pass
1633 def sfa_check_slice_plc(self): pass
1635 def sfa_check_slice_plc_empty(self): pass
1637 def sfa_update_slice(self): pass
1639 def sfa_remove_user_from_slice(self): pass
1641 def sfa_insert_user_in_slice(self): pass
1643 def sfi_list(self): pass
1645 def sfi_show_site(self): pass
1647 def sfi_show_slice(self): pass
1649 def sfi_show_slice_researchers(self): pass
1651 def ssh_slice_sfa(self): pass
1653 def sfa_delete_user(self): pass
1655 def sfa_delete_slice(self): pass
1659 return self.stop_service ('sfa')
1661 def populate (self):
1662 "creates random entries in the PLCAPI"
1663 # install the stress-test in the plc image
1664 location = "/usr/share/plc_api/plcsh_stress_test.py"
1665 remote="%s/%s"%(self.vm_root_in_host(),location)
1666 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1668 command += " -- --preserve --short-names"
1669 local = (self.run_in_guest(command) == 0);
1670 # second run with --foreign
1671 command += ' --foreign'
1672 remote = (self.run_in_guest(command) == 0);
1673 return ( local and remote)
1675 def gather_logs (self):
1676 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1677 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1678 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1679 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1680 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1681 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1682 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1684 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1685 self.gather_var_logs ()
1687 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1688 self.gather_pgsql_logs ()
1690 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1691 self.gather_root_sfi ()
1693 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1694 for site_spec in self.plc_spec['sites']:
1695 test_site = TestSite (self,site_spec)
1696 for node_spec in site_spec['nodes']:
1697 test_node=TestNode(self,test_site,node_spec)
1698 test_node.gather_qemu_logs()
1700 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1701 self.gather_nodes_var_logs()
1703 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1704 self.gather_slivers_var_logs()
1707 def gather_slivers_var_logs(self):
1708 for test_sliver in self.all_sliver_objs():
1709 remote = test_sliver.tar_var_logs()
1710 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1711 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1712 utils.system(command)
1715 def gather_var_logs (self):
1716 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1717 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1718 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1719 utils.system(command)
1720 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1721 utils.system(command)
1723 def gather_pgsql_logs (self):
1724 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1725 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1726 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1727 utils.system(command)
1729 def gather_root_sfi (self):
1730 utils.system("mkdir -p logs/sfi.%s"%self.name())
1731 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1732 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1733 utils.system(command)
1735 def gather_nodes_var_logs (self):
1736 for site_spec in self.plc_spec['sites']:
1737 test_site = TestSite (self,site_spec)
1738 for node_spec in site_spec['nodes']:
1739 test_node=TestNode(self,test_site,node_spec)
1740 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1741 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1742 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1743 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1744 utils.system(command)
1747 # returns the filename to use for sql dump/restore, using options.dbname if set
1748 def dbfile (self, database):
1749 # uses options.dbname if it is found
1751 name=self.options.dbname
1752 if not isinstance(name,StringTypes):
1758 return "/root/%s-%s.sql"%(database,name)
1760 def plc_db_dump(self):
1761 'dump the planetlab5 DB in /root in the PLC - filename has time'
1762 dump=self.dbfile("planetab5")
1763 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1764 utils.header('Dumped planetlab5 database in %s'%dump)
1767 def plc_db_restore(self):
1768 'restore the planetlab5 DB - looks broken, but run -n might help'
1769 dump=self.dbfile("planetab5")
1770 ##stop httpd service
1771 self.run_in_guest('service httpd stop')
1772 # xxx - need another wrapper
1773 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1774 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1775 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1776 ##starting httpd service
1777 self.run_in_guest('service httpd start')
1779 utils.header('Database restored from ' + dump)
1782 def create_ignore_steps ():
1783 for step in TestPlc.default_steps + TestPlc.other_steps:
1784 # default step can have a plc qualifier
1785 if '@' in step: (step,qualifier)=step.split('@')
1786 # or be defined as forced or ignored by default
1787 for keyword in ['_ignore','_force']:
1788 if step.endswith (keyword): step=step.replace(keyword,'')
1789 if step == SEP or step == SEPSFA : continue
1790 method=getattr(TestPlc,step)
1792 wrapped=ignore_result(method)
1793 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1794 setattr(TestPlc, name, wrapped)
1797 # def ssh_slice_again_ignore (self): pass
1799 # def check_initscripts_ignore (self): pass
1801 def standby_1_through_20(self):
1802 """convenience function to wait for a specified number of minutes"""
1805 def standby_1(): pass
1807 def standby_2(): pass
1809 def standby_3(): pass
1811 def standby_4(): pass
1813 def standby_5(): pass
1815 def standby_6(): pass
1817 def standby_7(): pass
1819 def standby_8(): pass
1821 def standby_9(): pass
1823 def standby_10(): pass
1825 def standby_11(): pass
1827 def standby_12(): pass
1829 def standby_13(): pass
1831 def standby_14(): pass
1833 def standby_15(): pass
1835 def standby_16(): pass
1837 def standby_17(): pass
1839 def standby_18(): pass
1841 def standby_19(): pass
1843 def standby_20(): pass
1845 # convenience for debugging the test logic
1846 def yes (self): return True
1847 def no (self): return False
1848 def fail (self): return False