1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
10 from types import StringTypes
13 from Completer import Completer, CompleterTask
14 from TestSite import TestSite
15 from TestNode import TestNode, CompleterTaskNodeSsh
16 from TestUser import TestUser
17 from TestKey import TestKey
18 from TestSlice import TestSlice
19 from TestSliver import TestSliver
20 from TestBoxQemu import TestBoxQemu
21 from TestSsh import TestSsh
22 from TestApiserver import TestApiserver
23 from TestAuthSfa import TestAuthSfa
24 from PlcapiUrlScanner import PlcapiUrlScanner
26 has_sfa_cache_filename="sfa-cache"
28 # step methods must take (self) and return a boolean (options is a member of the class)
30 def standby(minutes,dry_run):
31 utils.header('Entering StandBy for %d mn'%minutes)
35 time.sleep(60*minutes)
38 def standby_generic (func):
40 minutes=int(func.__name__.split("_")[1])
41 return standby(minutes,self.options.dry_run)
44 def node_mapper (method):
45 def map_on_nodes(self,*args, **kwds):
47 node_method = TestNode.__dict__[method.__name__]
48 for test_node in self.all_nodes():
49 if not node_method(test_node, *args, **kwds): overall=False
51 # maintain __name__ for ignore_result
52 map_on_nodes.__name__=method.__name__
53 # restore the doc text
54 map_on_nodes.__doc__=TestNode.__dict__[method.__name__].__doc__
57 def slice_mapper (method):
58 def map_on_slices(self):
60 slice_method = TestSlice.__dict__[method.__name__]
61 for slice_spec in self.plc_spec['slices']:
62 site_spec = self.locate_site (slice_spec['sitename'])
63 test_site = TestSite(self,site_spec)
64 test_slice=TestSlice(self,test_site,slice_spec)
65 if not slice_method(test_slice,self.options): overall=False
67 # maintain __name__ for ignore_result
68 map_on_slices.__name__=method.__name__
69 # restore the doc text
70 map_on_slices.__doc__=TestSlice.__dict__[method.__name__].__doc__
73 # run a step but return True so that we can go on
74 def ignore_result (method):
76 # ssh_slice_ignore->ssh_slice
77 ref_name=method.__name__.replace('_ignore','').replace('force_','')
78 ref_method=TestPlc.__dict__[ref_name]
79 result=ref_method(self)
80 print "Actual (but ignored) result for %(ref_name)s is %(result)s"%locals()
81 return Ignored (result)
82 name=method.__name__.replace('_ignore','').replace('force_','')
83 ignoring.__name__=name
84 ignoring.__doc__="ignored version of " + name
87 # a variant that expects the TestSlice method to return a list of CompleterTasks that
88 # are then merged into a single Completer run to avoid wating for all the slices
89 # esp. useful when a test fails of course
90 # because we need to pass arguments we use a class instead..
91 class slice_mapper__tasks (object):
92 # could not get this to work with named arguments
93 def __init__ (self,timeout_minutes,silent_minutes,period_seconds):
94 self.timeout=timedelta(minutes=timeout_minutes)
95 self.silent=timedelta(minutes=silent_minutes)
96 self.period=timedelta(seconds=period_seconds)
97 def __call__ (self, method):
99 # compute augmented method name
100 method_name = method.__name__ + "__tasks"
101 # locate in TestSlice
102 slice_method = TestSlice.__dict__[ method_name ]
105 for slice_spec in self.plc_spec['slices']:
106 site_spec = self.locate_site (slice_spec['sitename'])
107 test_site = TestSite(self,site_spec)
108 test_slice=TestSlice(self,test_site,slice_spec)
109 tasks += slice_method (test_slice, self.options)
110 return Completer (tasks, message=method.__name__).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
111 # restore the doc text from the TestSlice method even if a bit odd
112 wrappee.__name__ = method.__name__
113 wrappee.__doc__ = slice_method.__doc__
116 def auth_sfa_mapper (method):
119 auth_method = TestAuthSfa.__dict__[method.__name__]
120 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
121 test_auth=TestAuthSfa(self,auth_spec)
122 if not auth_method(test_auth,self.options): overall=False
124 # restore the doc text
125 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
129 def __init__ (self,result):
139 'plcvm_delete','plcvm_timestamp','plcvm_create', SEP,
140 'plc_install', 'plc_configure', 'plc_start', SEP,
141 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
142 'plcapi_urls','speed_up_slices', SEP,
143 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
144 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
145 # keep this our of the way for now
146 'check_vsys_defaults_ignore', SEP,
147 # run this first off so it's easier to re-run on another qemu box
148 'qemu_kill_mine', SEP,
149 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
150 'qemu_clean_mine', 'qemu_export', 'qemu_start', 'qemu_timestamp', SEP,
151 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
152 'sfi_configure@1', 'sfa_register_site@1','sfa_register_pi@1', SEPSFA,
153 'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
154 'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
155 'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
156 'sfa_discover@1', 'sfa_rspec@1', 'sfa_allocate@1', 'sfa_provision@1', SEPSFA,
157 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
158 'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
159 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
160 # but as the stress test might take a while, we sometimes missed the debug mode..
161 'probe_kvm_iptables',
162 'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
163 'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts_ignore', SEP,
164 'ssh_slice_sfa@1', SEPSFA,
165 'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1','sfa_check_slice_plc_empty@1', SEPSFA,
166 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
167 'cross_check_tcp@1', 'check_system_slice', SEP,
168 # for inspecting the slice while it runs the first time
170 # check slices are turned off properly
171 'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
172 # check they are properly re-created with the same name
173 'fill_slices', 'ssh_slice_again', SEP,
174 'gather_logs_force', SEP,
177 'export', 'show_boxes', 'super_speed_up_slices', SEP,
178 'check_hooks', 'plc_stop', 'plcvm_start', 'plcvm_stop', SEP,
179 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
180 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
181 'delete_leases', 'list_leases', SEP,
183 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
184 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
185 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
186 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
187 'sfa_get_expires', SEPSFA,
188 'plc_db_dump' , 'plc_db_restore', SEP,
189 'check_netflow','check_drl', SEP,
190 'debug_nodemanager', 'slice_fs_present', SEP,
191 'standby_1_through_20','yes','no',SEP,
195 def printable_steps (list):
196 single_line=" ".join(list)+" "
197 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
199 def valid_step (step):
200 return step != SEP and step != SEPSFA
202 # turn off the sfa-related steps when build has skipped SFA
203 # this was originally for centos5 but is still valid
204 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
206 def _has_sfa_cached (rpms_url):
207 if os.path.isfile(has_sfa_cache_filename):
208 cached=file(has_sfa_cache_filename).read()=="yes"
209 utils.header("build provides SFA (cached):%s"%cached)
211 # warning, we're now building 'sface' so let's be a bit more picky
212 # full builds are expected to return with 0 here
213 utils.header ("Checking if build provides SFA package...")
214 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)==0
215 encoded='yes' if retcod else 'no'
216 file(has_sfa_cache_filename,'w').write(encoded)
220 def check_whether_build_has_sfa (rpms_url):
221 has_sfa=TestPlc._has_sfa_cached(rpms_url)
223 utils.header("build does provide SFA")
225 # move all steps containing 'sfa' from default_steps to other_steps
226 utils.header("SFA package not found - removing steps with sfa or sfi")
227 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
228 TestPlc.other_steps += sfa_steps
229 for step in sfa_steps: TestPlc.default_steps.remove(step)
231 def __init__ (self,plc_spec,options):
232 self.plc_spec=plc_spec
234 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
235 self.vserverip=plc_spec['vserverip']
236 self.vservername=plc_spec['vservername']
237 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
238 self.apiserver=TestApiserver(self.url,options.dry_run)
239 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
240 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
242 def has_addresses_api (self):
243 return self.apiserver.has_method('AddIpAddress')
246 name=self.plc_spec['name']
247 return "%s.%s"%(name,self.vservername)
250 return self.plc_spec['host_box']
253 return self.test_ssh.is_local()
255 # define the API methods on this object through xmlrpc
256 # would help, but not strictly necessary
260 def actual_command_in_guest (self,command, backslash=False):
261 raw1=self.host_to_guest(command)
262 raw2=self.test_ssh.actual_command(raw1,dry_run=self.options.dry_run, backslash=backslash)
265 def start_guest (self):
266 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host(),dry_run=self.options.dry_run))
268 def stop_guest (self):
269 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host(),dry_run=self.options.dry_run))
271 def run_in_guest (self,command,backslash=False):
272 raw=self.actual_command_in_guest(command,backslash)
273 return utils.system(raw)
275 def run_in_host (self,command):
276 return self.test_ssh.run_in_buildname(command, dry_run=self.options.dry_run)
278 # backslashing turned out so awful at some point that I've turned off auto-backslashing
279 # see e.g. plc_start esp. the version for f14
280 #command gets run in the plc's vm
281 def host_to_guest(self,command):
282 vservername=self.vservername
283 personality=self.options.personality
284 raw="%(personality)s virsh -c lxc:/// lxc-enter-namespace %(vservername)s"%locals()
285 # f14 still needs some extra help
286 if self.options.fcdistro == 'f14':
287 raw +=" -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %(command)s" %locals()
289 raw +=" -- /usr/bin/env %(command)s"%locals()
292 # this /vservers thing is legacy...
293 def vm_root_in_host(self):
294 return "/vservers/%s/"%(self.vservername)
296 def vm_timestamp_path (self):
297 return "/vservers/%s/%s.timestamp"%(self.vservername,self.vservername)
299 #start/stop the vserver
300 def start_guest_in_host(self):
301 return "virsh -c lxc:/// start %s"%(self.vservername)
303 def stop_guest_in_host(self):
304 return "virsh -c lxc:/// destroy %s"%(self.vservername)
307 def run_in_guest_piped (self,local,remote):
308 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
310 def yum_check_installed (self, rpms):
311 if isinstance (rpms, list):
313 return self.run_in_guest("rpm -q %s"%rpms)==0
315 # does a yum install in the vs, ignore yum retcod, check with rpm
316 def yum_install (self, rpms):
317 if isinstance (rpms, list):
319 self.run_in_guest("yum -y install %s"%rpms)
320 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
321 self.run_in_guest("yum-complete-transaction -y")
322 return self.yum_check_installed (rpms)
324 def auth_root (self):
325 return {'Username':self.plc_spec['settings']['PLC_ROOT_USER'],
326 'AuthMethod':'password',
327 'AuthString':self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
328 'Role' : self.plc_spec['role']
330 def locate_site (self,sitename):
331 for site in self.plc_spec['sites']:
332 if site['site_fields']['name'] == sitename:
334 if site['site_fields']['login_base'] == sitename:
336 raise Exception,"Cannot locate site %s"%sitename
338 def locate_node (self,nodename):
339 for site in self.plc_spec['sites']:
340 for node in site['nodes']:
341 if node['name'] == nodename:
343 raise Exception,"Cannot locate node %s"%nodename
345 def locate_hostname (self,hostname):
346 for site in self.plc_spec['sites']:
347 for node in site['nodes']:
348 if node['node_fields']['hostname'] == hostname:
350 raise Exception,"Cannot locate hostname %s"%hostname
352 def locate_key (self,key_name):
353 for key in self.plc_spec['keys']:
354 if key['key_name'] == key_name:
356 raise Exception,"Cannot locate key %s"%key_name
358 def locate_private_key_from_key_names (self, key_names):
359 # locate the first avail. key
361 for key_name in key_names:
362 key_spec=self.locate_key(key_name)
363 test_key=TestKey(self,key_spec)
364 publickey=test_key.publicpath()
365 privatekey=test_key.privatepath()
366 if os.path.isfile(publickey) and os.path.isfile(privatekey):
368 if found: return privatekey
371 def locate_slice (self, slicename):
372 for slice in self.plc_spec['slices']:
373 if slice['slice_fields']['name'] == slicename:
375 raise Exception,"Cannot locate slice %s"%slicename
377 def all_sliver_objs (self):
379 for slice_spec in self.plc_spec['slices']:
380 slicename = slice_spec['slice_fields']['name']
381 for nodename in slice_spec['nodenames']:
382 result.append(self.locate_sliver_obj (nodename,slicename))
385 def locate_sliver_obj (self,nodename,slicename):
386 (site,node) = self.locate_node(nodename)
387 slice = self.locate_slice (slicename)
389 test_site = TestSite (self, site)
390 test_node = TestNode (self, test_site,node)
391 # xxx the slice site is assumed to be the node site - mhh - probably harmless
392 test_slice = TestSlice (self, test_site, slice)
393 return TestSliver (self, test_node, test_slice)
395 def locate_first_node(self):
396 nodename=self.plc_spec['slices'][0]['nodenames'][0]
397 (site,node) = self.locate_node(nodename)
398 test_site = TestSite (self, site)
399 test_node = TestNode (self, test_site,node)
402 def locate_first_sliver (self):
403 slice_spec=self.plc_spec['slices'][0]
404 slicename=slice_spec['slice_fields']['name']
405 nodename=slice_spec['nodenames'][0]
406 return self.locate_sliver_obj(nodename,slicename)
408 # all different hostboxes used in this plc
409 def get_BoxNodes(self):
410 # maps on sites and nodes, return [ (host_box,test_node) ]
412 for site_spec in self.plc_spec['sites']:
413 test_site = TestSite (self,site_spec)
414 for node_spec in site_spec['nodes']:
415 test_node = TestNode (self, test_site, node_spec)
416 if not test_node.is_real():
417 tuples.append( (test_node.host_box(),test_node) )
418 # transform into a dict { 'host_box' -> [ test_node .. ] }
420 for (box,node) in tuples:
421 if not result.has_key(box):
424 result[box].append(node)
427 # a step for checking this stuff
428 def show_boxes (self):
429 'print summary of nodes location'
430 for (box,nodes) in self.get_BoxNodes().iteritems():
431 print box,":"," + ".join( [ node.name() for node in nodes ] )
434 # make this a valid step
435 def qemu_kill_all(self):
436 'kill all qemu instances on the qemu boxes involved by this setup'
437 # this is the brute force version, kill all qemus on that host box
438 for (box,nodes) in self.get_BoxNodes().iteritems():
439 # pass the first nodename, as we don't push template-qemu on testboxes
440 nodedir=nodes[0].nodedir()
441 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
444 # make this a valid step
445 def qemu_list_all(self):
446 'list all qemu instances on the qemu boxes involved by this setup'
447 for (box,nodes) in self.get_BoxNodes().iteritems():
448 # this is the brute force version, kill all qemus on that host box
449 TestBoxQemu(box,self.options.buildname).qemu_list_all()
452 # kill only the qemus related to this test
453 def qemu_list_mine(self):
454 'list qemu instances for our nodes'
455 for (box,nodes) in self.get_BoxNodes().iteritems():
456 # the fine-grain version
461 # kill only the qemus related to this test
462 def qemu_clean_mine(self):
463 'cleanup (rm -rf) qemu instances for our nodes'
464 for (box,nodes) in self.get_BoxNodes().iteritems():
465 # the fine-grain version
470 # kill only the right qemus
471 def qemu_kill_mine(self):
472 'kill the qemu instances for our nodes'
473 for (box,nodes) in self.get_BoxNodes().iteritems():
474 # the fine-grain version
479 #################### display config
481 "show test configuration after localization"
486 # uggly hack to make sure 'run export' only reports about the 1st plc
487 # to avoid confusion - also we use 'inri_slice1' in various aliases..
490 "print cut'n paste-able stuff to export env variables to your shell"
491 # guess local domain from hostname
492 if TestPlc.exported_id>1:
493 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
495 TestPlc.exported_id+=1
496 domain=socket.gethostname().split('.',1)[1]
497 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
498 print "export BUILD=%s"%self.options.buildname
499 print "export PLCHOSTLXC=%s"%fqdn
500 print "export GUESTNAME=%s"%self.plc_spec['vservername']
501 vplcname=self.plc_spec['vservername'].split('-')[-1]
502 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
503 # find hostname of first node
504 (hostname,qemubox) = self.all_node_infos()[0]
505 print "export KVMHOST=%s.%s"%(qemubox,domain)
506 print "export NODE=%s"%(hostname)
510 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
511 def show_pass (self,passno):
512 for (key,val) in self.plc_spec.iteritems():
513 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
517 self.display_site_spec(site)
518 for node in site['nodes']:
519 self.display_node_spec(node)
520 elif key=='initscripts':
521 for initscript in val:
522 self.display_initscript_spec (initscript)
525 self.display_slice_spec (slice)
528 self.display_key_spec (key)
530 if key not in ['sites','initscripts','slices','keys']:
531 print '+ ',key,':',val
533 def display_site_spec (self,site):
534 print '+ ======== site',site['site_fields']['name']
535 for (k,v) in site.iteritems():
536 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
539 print '+ ','nodes : ',
541 print node['node_fields']['hostname'],'',
547 print user['name'],'',
549 elif k == 'site_fields':
550 print '+ login_base',':',v['login_base']
551 elif k == 'address_fields':
557 def display_initscript_spec (self,initscript):
558 print '+ ======== initscript',initscript['initscript_fields']['name']
560 def display_key_spec (self,key):
561 print '+ ======== key',key['key_name']
563 def display_slice_spec (self,slice):
564 print '+ ======== slice',slice['slice_fields']['name']
565 for (k,v) in slice.iteritems():
578 elif k=='slice_fields':
579 print '+ fields',':',
580 print 'max_nodes=',v['max_nodes'],
585 def display_node_spec (self,node):
586 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
587 print "hostname=",node['node_fields']['hostname'],
588 print "ip=",node['interface_fields']['ip']
589 if self.options.verbose:
590 utils.pprint("node details",node,depth=3)
592 # another entry point for just showing the boxes involved
593 def display_mapping (self):
594 TestPlc.display_mapping_plc(self.plc_spec)
598 def display_mapping_plc (plc_spec):
599 print '+ MyPLC',plc_spec['name']
600 # WARNING this would not be right for lxc-based PLC's - should be harmless though
601 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
602 print '+\tIP = %s/%s'%(plc_spec['settings']['PLC_API_HOST'],plc_spec['vserverip'])
603 for site_spec in plc_spec['sites']:
604 for node_spec in site_spec['nodes']:
605 TestPlc.display_mapping_node(node_spec)
608 def display_mapping_node (node_spec):
609 print '+ NODE %s'%(node_spec['name'])
610 print '+\tqemu box %s'%node_spec['host_box']
611 print '+\thostname=%s'%node_spec['node_fields']['hostname']
613 # write a timestamp in /vservers/<>.timestamp
614 # cannot be inside the vserver, that causes vserver .. build to cough
615 def plcvm_timestamp (self):
616 "Create a timestamp to remember creation date for this plc"
618 # TODO-lxc check this one
619 # a first approx. is to store the timestamp close to the VM root like vs does
620 stamp_path=self.vm_timestamp_path ()
621 stamp_dir = os.path.dirname (stamp_path)
622 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
623 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
625 # this is called inconditionnally at the beginning of the test sequence
626 # just in case this is a rerun, so if the vm is not running it's fine
627 def plcvm_delete(self):
628 "vserver delete the test myplc"
629 stamp_path=self.vm_timestamp_path()
630 self.run_in_host("rm -f %s"%stamp_path)
631 self.run_in_host("virsh -c lxc:// destroy %s"%self.vservername)
632 self.run_in_host("virsh -c lxc:// undefine %s"%self.vservername)
633 self.run_in_host("rm -fr /vservers/%s"%self.vservername)
637 # historically the build was being fetched by the tests
638 # now the build pushes itself as a subdir of the tests workdir
639 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
640 def plcvm_create (self):
641 "vserver creation (no install done)"
642 # push the local build/ dir to the testplc box
644 # a full path for the local calls
645 build_dir=os.path.dirname(sys.argv[0])
646 # sometimes this is empty - set to "." in such a case
647 if not build_dir: build_dir="."
648 build_dir += "/build"
650 # use a standard name - will be relative to remote buildname
652 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
653 self.test_ssh.rmdir(build_dir)
654 self.test_ssh.copy(build_dir,recursive=True)
655 # the repo url is taken from arch-rpms-url
656 # with the last step (i386) removed
657 repo_url = self.options.arch_rpms_url
658 for level in [ 'arch' ]:
659 repo_url = os.path.dirname(repo_url)
661 # invoke initvm (drop support for vs)
662 script="lbuild-initvm.sh"
664 # pass the vbuild-nightly options to [lv]test-initvm
665 script_options += " -p %s"%self.options.personality
666 script_options += " -d %s"%self.options.pldistro
667 script_options += " -f %s"%self.options.fcdistro
668 script_options += " -r %s"%repo_url
669 vserver_name = self.vservername
671 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
672 script_options += " -n %s"%vserver_hostname
674 print "Cannot reverse lookup %s"%self.vserverip
675 print "This is considered fatal, as this might pollute the test results"
677 create_vserver="%(build_dir)s/%(script)s %(script_options)s %(vserver_name)s"%locals()
678 return self.run_in_host(create_vserver) == 0
681 def plc_install(self):
682 "yum install myplc, noderepo, and the plain bootstrapfs"
684 # workaround for getting pgsql8.2 on centos5
685 if self.options.fcdistro == "centos5":
686 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
689 if self.options.personality == "linux32":
691 elif self.options.personality == "linux64":
694 raise Exception, "Unsupported personality %r"%self.options.personality
695 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
698 pkgs_list.append ("slicerepo-%s"%nodefamily)
699 pkgs_list.append ("myplc")
700 pkgs_list.append ("noderepo-%s"%nodefamily)
701 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
702 pkgs_string=" ".join(pkgs_list)
703 return self.yum_install (pkgs_list)
706 def mod_python(self):
707 """yum install mod_python, useful on f18 and above so as to avoid broken wsgi"""
708 return self.yum_install ( [ 'mod_python' ] )
711 def plc_configure(self):
713 tmpname='%s.plc-config-tty'%(self.name())
714 fileconf=open(tmpname,'w')
715 for (var,value) in self.plc_spec['settings'].iteritems():
716 fileconf.write ('e %s\n%s\n'%(var,value))
717 fileconf.write('w\n')
718 fileconf.write('q\n')
720 utils.system('cat %s'%tmpname)
721 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
722 utils.system('rm %s'%tmpname)
725 # f14 is a bit odd in this respect, although this worked fine in guests up to f18
726 # however using a vplc guest under f20 requires this trick
727 # the symptom is this: service plc start
728 # Starting plc (via systemctl): Failed to get D-Bus connection: \
729 # Failed to connect to socket /org/freedesktop/systemd1/private: Connection refused
730 # weird thing is the doc says f14 uses upstart by default and not systemd
731 # so this sounds kind of harmless
732 def start_service (self,service): return self.start_stop_service (service,'start')
733 def stop_service (self,service): return self.start_stop_service (service,'stop')
735 def start_stop_service (self, service,start_or_stop):
736 "utility to start/stop a service with the special trick for f14"
737 if self.options.fcdistro != 'f14':
738 return self.run_in_guest ("service %s %s"%(service,start_or_stop))==0
740 # patch /sbin/service so it does not reset environment
741 self.run_in_guest ('sed -i -e \\"s,env -i,env,\\" /sbin/service')
742 # this is because our own scripts in turn call service
743 return self.run_in_guest("SYSTEMCTL_SKIP_REDIRECT=true service %s %s"%(service,start_or_stop))==0
747 return self.start_service ('plc')
751 return self.stop_service ('plc')
753 def plcvm_start (self):
754 "start the PLC vserver"
758 def plcvm_stop (self):
759 "stop the PLC vserver"
763 # stores the keys from the config for further use
764 def keys_store(self):
765 "stores test users ssh keys in keys/"
766 for key_spec in self.plc_spec['keys']:
767 TestKey(self,key_spec).store_key()
770 def keys_clean(self):
771 "removes keys cached in keys/"
772 utils.system("rm -rf ./keys")
775 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
776 # for later direct access to the nodes
777 def keys_fetch(self):
778 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
780 if not os.path.isdir(dir):
782 vservername=self.vservername
783 vm_root=self.vm_root_in_host()
785 prefix = 'debug_ssh_key'
786 for ext in [ 'pub', 'rsa' ] :
787 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
788 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
789 if self.test_ssh.fetch(src,dst) != 0: overall=False
793 "create sites with PLCAPI"
794 return self.do_sites()
796 def delete_sites (self):
797 "delete sites with PLCAPI"
798 return self.do_sites(action="delete")
800 def do_sites (self,action="add"):
801 for site_spec in self.plc_spec['sites']:
802 test_site = TestSite (self,site_spec)
803 if (action != "add"):
804 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
805 test_site.delete_site()
806 # deleted with the site
807 #test_site.delete_users()
810 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
811 test_site.create_site()
812 test_site.create_users()
815 def delete_all_sites (self):
816 "Delete all sites in PLC, and related objects"
817 print 'auth_root',self.auth_root()
818 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
820 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
821 if site['login_base']==self.plc_spec['settings']['PLC_SLICE_PREFIX']: continue
822 site_id=site['site_id']
823 print 'Deleting site_id',site_id
824 self.apiserver.DeleteSite(self.auth_root(),site_id)
828 "create nodes with PLCAPI"
829 return self.do_nodes()
830 def delete_nodes (self):
831 "delete nodes with PLCAPI"
832 return self.do_nodes(action="delete")
834 def do_nodes (self,action="add"):
835 for site_spec in self.plc_spec['sites']:
836 test_site = TestSite (self,site_spec)
838 utils.header("Deleting nodes in site %s"%test_site.name())
839 for node_spec in site_spec['nodes']:
840 test_node=TestNode(self,test_site,node_spec)
841 utils.header("Deleting %s"%test_node.name())
842 test_node.delete_node()
844 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
845 for node_spec in site_spec['nodes']:
846 utils.pprint('Creating node %s'%node_spec,node_spec)
847 test_node = TestNode (self,test_site,node_spec)
848 test_node.create_node ()
851 def nodegroups (self):
852 "create nodegroups with PLCAPI"
853 return self.do_nodegroups("add")
854 def delete_nodegroups (self):
855 "delete nodegroups with PLCAPI"
856 return self.do_nodegroups("delete")
860 def translate_timestamp (start,grain,timestamp):
861 if timestamp < TestPlc.YEAR: return start+timestamp*grain
862 else: return timestamp
865 def timestamp_printable (timestamp):
866 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
869 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
871 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
872 print 'API answered grain=',grain
873 start=(now/grain)*grain
875 # find out all nodes that are reservable
876 nodes=self.all_reservable_nodenames()
878 utils.header ("No reservable node found - proceeding without leases")
881 # attach them to the leases as specified in plc_specs
882 # this is where the 'leases' field gets interpreted as relative of absolute
883 for lease_spec in self.plc_spec['leases']:
884 # skip the ones that come with a null slice id
885 if not lease_spec['slice']: continue
886 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
887 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
888 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
889 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
890 if lease_addition['errors']:
891 utils.header("Cannot create leases, %s"%lease_addition['errors'])
894 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
895 (nodes,lease_spec['slice'],
896 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
897 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
901 def delete_leases (self):
902 "remove all leases in the myplc side"
903 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
904 utils.header("Cleaning leases %r"%lease_ids)
905 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
908 def list_leases (self):
909 "list all leases known to the myplc"
910 leases = self.apiserver.GetLeases(self.auth_root())
913 current=l['t_until']>=now
914 if self.options.verbose or current:
915 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
916 TestPlc.timestamp_printable(l['t_from']),
917 TestPlc.timestamp_printable(l['t_until'])))
920 # create nodegroups if needed, and populate
921 def do_nodegroups (self, action="add"):
922 # 1st pass to scan contents
924 for site_spec in self.plc_spec['sites']:
925 test_site = TestSite (self,site_spec)
926 for node_spec in site_spec['nodes']:
927 test_node=TestNode (self,test_site,node_spec)
928 if node_spec.has_key('nodegroups'):
929 nodegroupnames=node_spec['nodegroups']
930 if isinstance(nodegroupnames,StringTypes):
931 nodegroupnames = [ nodegroupnames ]
932 for nodegroupname in nodegroupnames:
933 if not groups_dict.has_key(nodegroupname):
934 groups_dict[nodegroupname]=[]
935 groups_dict[nodegroupname].append(test_node.name())
936 auth=self.auth_root()
938 for (nodegroupname,group_nodes) in groups_dict.iteritems():
940 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
941 # first, check if the nodetagtype is here
942 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
944 tag_type_id = tag_types[0]['tag_type_id']
946 tag_type_id = self.apiserver.AddTagType(auth,
947 {'tagname':nodegroupname,
948 'description': 'for nodegroup %s'%nodegroupname,
950 print 'located tag (type)',nodegroupname,'as',tag_type_id
952 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
954 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
955 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
956 # set node tag on all nodes, value='yes'
957 for nodename in group_nodes:
959 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
961 traceback.print_exc()
962 print 'node',nodename,'seems to already have tag',nodegroupname
965 expect_yes = self.apiserver.GetNodeTags(auth,
966 {'hostname':nodename,
967 'tagname':nodegroupname},
968 ['value'])[0]['value']
969 if expect_yes != "yes":
970 print 'Mismatch node tag on node',nodename,'got',expect_yes
973 if not self.options.dry_run:
974 print 'Cannot find tag',nodegroupname,'on node',nodename
978 print 'cleaning nodegroup',nodegroupname
979 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
981 traceback.print_exc()
985 # a list of TestNode objs
986 def all_nodes (self):
988 for site_spec in self.plc_spec['sites']:
989 test_site = TestSite (self,site_spec)
990 for node_spec in site_spec['nodes']:
991 nodes.append(TestNode (self,test_site,node_spec))
994 # return a list of tuples (nodename,qemuname)
995 def all_node_infos (self) :
997 for site_spec in self.plc_spec['sites']:
998 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
999 for node_spec in site_spec['nodes'] ]
1002 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
1003 def all_reservable_nodenames (self):
1005 for site_spec in self.plc_spec['sites']:
1006 for node_spec in site_spec['nodes']:
1007 node_fields=node_spec['node_fields']
1008 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
1009 res.append(node_fields['hostname'])
1012 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
1013 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
1014 if self.options.dry_run:
1018 class CompleterTaskBootState (CompleterTask):
1019 def __init__ (self, test_plc,hostname):
1020 self.test_plc=test_plc
1021 self.hostname=hostname
1022 self.last_boot_state='undef'
1023 def actual_run (self):
1025 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
1027 self.last_boot_state = node['boot_state']
1028 return self.last_boot_state == target_boot_state
1032 return "CompleterTaskBootState with node %s"%self.hostname
1033 def failure_epilogue (self):
1034 print "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
1036 timeout = timedelta(minutes=timeout_minutes)
1037 graceout = timedelta(minutes=silent_minutes)
1038 period = timedelta(seconds=period_seconds)
1039 # the nodes that haven't checked yet - start with a full list and shrink over time
1040 utils.header("checking nodes boot state (expected %s)"%target_boot_state)
1041 tasks = [ CompleterTaskBootState (self,hostname) \
1042 for (hostname,_) in self.all_node_infos() ]
1043 message = 'check_boot_state={}'.format(target_boot_state)
1044 return Completer (tasks, message=message).run (timeout, graceout, period)
1046 def nodes_booted(self):
1047 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
1049 def probe_kvm_iptables (self):
1050 (_,kvmbox) = self.all_node_infos()[0]
1051 TestSsh(kvmbox).run("iptables-save")
1055 def check_nodes_ping(self, timeout_seconds=30, period_seconds=10):
1056 class CompleterTaskPingNode(CompleterTask):
1057 def __init__ (self, hostname):
1058 self.hostname=hostname
1059 def run(self, silent):
1060 command="ping -c 1 -w 1 %s >& /dev/null"%self.hostname
1061 return utils.system (command, silent=silent)==0
1062 def failure_epilogue (self):
1063 print "Cannot ping node with name %s"%self.hostname
1064 timeout=timedelta (seconds=timeout_seconds)
1066 period=timedelta (seconds=period_seconds)
1067 node_infos = self.all_node_infos()
1068 tasks = [ CompleterTaskPingNode (h) for (h,_) in node_infos ]
1069 return Completer (tasks, message='ping_node').run (timeout, graceout, period)
1071 # ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
1072 def ping_node (self):
1074 return self.check_nodes_ping ()
1076 def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
1078 timeout = timedelta(minutes=timeout_minutes)
1079 graceout = timedelta(minutes=silent_minutes)
1080 period = timedelta(seconds=period_seconds)
1081 vservername=self.vservername
1084 completer_message = 'ssh_node_debug'
1085 local_key = "keys/%(vservername)s-debug.rsa"%locals()
1088 completer_message = 'ssh_node_boot'
1089 local_key = "keys/key_admin.rsa"
1090 utils.header("checking ssh access to nodes (expected in %s mode)"%message)
1091 node_infos = self.all_node_infos()
1092 tasks = [ CompleterTaskNodeSsh (nodename, qemuname, local_key,
1093 boot_state=message, dry_run=self.options.dry_run) \
1094 for (nodename,qemuname) in node_infos ]
1095 return Completer (tasks, message=completer_message).run (timeout, graceout, period)
1097 def ssh_node_debug(self):
1098 "Tries to ssh into nodes in debug mode with the debug ssh key"
1099 return self.check_nodes_ssh(debug=True,
1100 timeout_minutes=self.ssh_node_debug_timeout,
1101 silent_minutes=self.ssh_node_debug_silent)
1103 def ssh_node_boot(self):
1104 "Tries to ssh into nodes in production mode with the root ssh key"
1105 return self.check_nodes_ssh(debug=False,
1106 timeout_minutes=self.ssh_node_boot_timeout,
1107 silent_minutes=self.ssh_node_boot_silent)
1109 def node_bmlogs(self):
1110 "Checks that there's a non-empty dir. /var/log/bm/raw"
1111 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
1114 def qemu_local_init (self): pass
1116 def bootcd (self): pass
1118 def qemu_local_config (self): pass
1120 def nodestate_reinstall (self): pass
1122 def nodestate_safeboot (self): pass
1124 def nodestate_boot (self): pass
1126 def nodestate_show (self): pass
1128 def qemu_export (self): pass
1130 ### check hooks : invoke scripts from hooks/{node,slice}
1131 def check_hooks_node (self):
1132 return self.locate_first_node().check_hooks()
1133 def check_hooks_sliver (self) :
1134 return self.locate_first_sliver().check_hooks()
1136 def check_hooks (self):
1137 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1138 return self.check_hooks_node() and self.check_hooks_sliver()
1141 def do_check_initscripts(self):
1142 class CompleterTaskInitscript (CompleterTask):
1143 def __init__ (self, test_sliver, stamp):
1144 self.test_sliver=test_sliver
1146 def actual_run (self):
1147 return self.test_sliver.check_initscript_stamp (self.stamp)
1149 return "initscript checker for %s"%self.test_sliver.name()
1150 def failure_epilogue (self):
1151 print "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
1154 for slice_spec in self.plc_spec['slices']:
1155 if not slice_spec.has_key('initscriptstamp'):
1157 stamp=slice_spec['initscriptstamp']
1158 slicename=slice_spec['slice_fields']['name']
1159 for nodename in slice_spec['nodenames']:
1160 print 'nodename',nodename,'slicename',slicename,'stamp',stamp
1161 (site,node) = self.locate_node (nodename)
1162 # xxx - passing the wrong site - probably harmless
1163 test_site = TestSite (self,site)
1164 test_slice = TestSlice (self,test_site,slice_spec)
1165 test_node = TestNode (self,test_site,node)
1166 test_sliver = TestSliver (self, test_node, test_slice)
1167 tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
1168 return Completer (tasks, message='check_initscripts').run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1170 def check_initscripts(self):
1171 "check that the initscripts have triggered"
1172 return self.do_check_initscripts()
1174 def initscripts (self):
1175 "create initscripts with PLCAPI"
1176 for initscript in self.plc_spec['initscripts']:
1177 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1178 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1181 def delete_initscripts (self):
1182 "delete initscripts with PLCAPI"
1183 for initscript in self.plc_spec['initscripts']:
1184 initscript_name = initscript['initscript_fields']['name']
1185 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1187 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1188 print initscript_name,'deleted'
1190 print 'deletion went wrong - probably did not exist'
1195 "create slices with PLCAPI"
1196 return self.do_slices(action="add")
1198 def delete_slices (self):
1199 "delete slices with PLCAPI"
1200 return self.do_slices(action="delete")
1202 def fill_slices (self):
1203 "add nodes in slices with PLCAPI"
1204 return self.do_slices(action="fill")
1206 def empty_slices (self):
1207 "remove nodes from slices with PLCAPI"
1208 return self.do_slices(action="empty")
1210 def do_slices (self, action="add"):
1211 for slice in self.plc_spec['slices']:
1212 site_spec = self.locate_site (slice['sitename'])
1213 test_site = TestSite(self,site_spec)
1214 test_slice=TestSlice(self,test_site,slice)
1215 if action == "delete":
1216 test_slice.delete_slice()
1217 elif action=="fill":
1218 test_slice.add_nodes()
1219 elif action=="empty":
1220 test_slice.delete_nodes()
1222 test_slice.create_slice()
1225 @slice_mapper__tasks(20,10,15)
1226 def ssh_slice(self): pass
1227 @slice_mapper__tasks(20,19,15)
1228 def ssh_slice_off (self): pass
1229 @slice_mapper__tasks(1,1,15)
1230 def slice_fs_present(self): pass
1231 @slice_mapper__tasks(1,1,15)
1232 def slice_fs_deleted(self): pass
1234 # use another name so we can exclude/ignore it from the tests on the nightly command line
1235 def ssh_slice_again(self): return self.ssh_slice()
1236 # note that simply doing ssh_slice_again=ssh_slice would kind of work too
1237 # but for some reason the ignore-wrapping thing would not
1240 def ssh_slice_basics(self): pass
1242 def check_vsys_defaults(self): pass
1245 def keys_clear_known_hosts (self): pass
1247 def plcapi_urls (self):
1248 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1250 def speed_up_slices (self):
1251 "tweak nodemanager cycle (wait time) to 30+/-10 s"
1252 return self._speed_up_slices (30,10)
1253 def super_speed_up_slices (self):
1254 "dev mode: tweak nodemanager cycle (wait time) to 5+/-1 s"
1255 return self._speed_up_slices (5,1)
1257 def _speed_up_slices (self, p, r):
1258 # create the template on the server-side
1259 template="%s.nodemanager"%self.name()
1260 template_file = open (template,"w")
1261 template_file.write('OPTIONS="-p %s -r %s -d"\n'%(p,r))
1262 template_file.close()
1263 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1264 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1265 self.test_ssh.copy_abs(template,remote)
1267 if not self.apiserver.GetConfFiles (self.auth_root(),
1268 {'dest':'/etc/sysconfig/nodemanager'}):
1269 self.apiserver.AddConfFile (self.auth_root(),
1270 {'dest':'/etc/sysconfig/nodemanager',
1271 'source':'PlanetLabConf/nodemanager',
1272 'postinstall_cmd':'service nm restart',})
1275 def debug_nodemanager (self):
1276 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1277 template="%s.nodemanager"%self.name()
1278 template_file = open (template,"w")
1279 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1280 template_file.close()
1281 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1282 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1283 self.test_ssh.copy_abs(template,remote)
1287 def qemu_start (self) : pass
1290 def qemu_timestamp (self) : pass
1292 # when a spec refers to a node possibly on another plc
1293 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1294 for plc in [ self ] + other_plcs:
1296 return plc.locate_sliver_obj (nodename, slicename)
1299 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1301 # implement this one as a cross step so that we can take advantage of different nodes
1302 # in multi-plcs mode
1303 def cross_check_tcp (self, other_plcs):
1304 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1305 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1306 utils.header ("check_tcp: no/empty config found")
1308 specs = self.plc_spec['tcp_specs']
1313 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1314 if not s_test_sliver.run_tcp_server(port,timeout=20):
1318 # idem for the client side
1319 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1320 # use nodename from locatesd sliver, unless 'client_connect' is set
1321 if 'client_connect' in spec:
1322 destination = spec['client_connect']
1324 destination=s_test_sliver.test_node.name()
1325 if not c_test_sliver.run_tcp_client(destination,port):
1329 # painfully enough, we need to allow for some time as netflow might show up last
1330 def check_system_slice (self):
1331 "all nodes: check that a system slice is alive"
1332 # netflow currently not working in the lxc distro
1333 # drl not built at all in the wtx distro
1334 # if we find either of them we're happy
1335 return self.check_netflow() or self.check_drl()
1338 def check_netflow (self): return self._check_system_slice ('netflow')
1339 def check_drl (self): return self._check_system_slice ('drl')
1341 # we have the slices up already here, so it should not take too long
1342 def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
1343 class CompleterTaskSystemSlice (CompleterTask):
1344 def __init__ (self, test_node, dry_run):
1345 self.test_node=test_node
1346 self.dry_run=dry_run
1347 def actual_run (self):
1348 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
1350 return "System slice %s @ %s"%(slicename, self.test_node.name())
1351 def failure_epilogue (self):
1352 print "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
1353 timeout = timedelta(minutes=timeout_minutes)
1354 silent = timedelta (0)
1355 period = timedelta (seconds=period_seconds)
1356 tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
1357 for test_node in self.all_nodes() ]
1358 return Completer (tasks, message='_check_system_slice') . run (timeout, silent, period)
1360 def plcsh_stress_test (self):
1361 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1362 # install the stress-test in the plc image
1363 location = "/usr/share/plc_api/plcsh_stress_test.py"
1364 remote="%s/%s"%(self.vm_root_in_host(),location)
1365 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1367 command += " -- --check"
1368 if self.options.size == 1:
1369 command += " --tiny"
1370 return ( self.run_in_guest(command) == 0)
1372 # populate runs the same utility without slightly different options
1373 # in particular runs with --preserve (dont cleanup) and without --check
1374 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1376 def sfa_install_all (self):
1377 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1378 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1380 def sfa_install_core(self):
1382 return self.yum_install ("sfa")
1384 def sfa_install_plc(self):
1385 "yum install sfa-plc"
1386 return self.yum_install("sfa-plc")
1388 def sfa_install_sfatables(self):
1389 "yum install sfa-sfatables"
1390 return self.yum_install ("sfa-sfatables")
1392 # for some very odd reason, this sometimes fails with the following symptom
1393 # # yum install sfa-client
1394 # Setting up Install Process
1396 # Downloading Packages:
1397 # Running rpm_check_debug
1398 # Running Transaction Test
1399 # Transaction Test Succeeded
1400 # Running Transaction
1401 # Transaction couldn't start:
1402 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1403 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1404 # even though in the same context I have
1405 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1406 # Filesystem Size Used Avail Use% Mounted on
1407 # /dev/hdv1 806G 264G 501G 35% /
1408 # none 16M 36K 16M 1% /tmp
1410 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1411 def sfa_install_client(self):
1412 "yum install sfa-client"
1413 first_try=self.yum_install("sfa-client")
1414 if first_try: return True
1415 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1416 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1417 utils.header("rpm_path=<<%s>>"%rpm_path)
1419 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1420 return self.yum_check_installed ("sfa-client")
1422 def sfa_dbclean(self):
1423 "thoroughly wipes off the SFA database"
1424 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1425 self.run_in_guest("sfa-nuke.py")==0 or \
1426 self.run_in_guest("sfa-nuke-plc.py")==0
1428 def sfa_fsclean(self):
1429 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1430 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1433 def sfa_plcclean(self):
1434 "cleans the PLC entries that were created as a side effect of running the script"
1436 sfa_spec=self.plc_spec['sfa']
1438 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1439 login_base=auth_sfa_spec['login_base']
1440 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1441 except: print "Site %s already absent from PLC db"%login_base
1443 for spec_name in ['pi_spec','user_spec']:
1444 user_spec=auth_sfa_spec[spec_name]
1445 username=user_spec['email']
1446 try: self.apiserver.DeletePerson(self.auth_root(),username)
1448 # this in fact is expected as sites delete their members
1449 #print "User %s already absent from PLC db"%username
1452 print "REMEMBER TO RUN sfa_import AGAIN"
1455 def sfa_uninstall(self):
1456 "uses rpm to uninstall sfa - ignore result"
1457 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1458 self.run_in_guest("rm -rf /var/lib/sfa")
1459 self.run_in_guest("rm -rf /etc/sfa")
1460 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1462 self.run_in_guest("rpm -e --noscripts sfa-plc")
1465 ### run unit tests for SFA
1466 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1467 # Running Transaction
1468 # Transaction couldn't start:
1469 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1470 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1471 # no matter how many Gbs are available on the testplc
1472 # could not figure out what's wrong, so...
1473 # if the yum install phase fails, consider the test is successful
1474 # other combinations will eventually run it hopefully
1475 def sfa_utest(self):
1476 "yum install sfa-tests and run SFA unittests"
1477 self.run_in_guest("yum -y install sfa-tests")
1478 # failed to install - forget it
1479 if self.run_in_guest("rpm -q sfa-tests")!=0:
1480 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1482 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1486 dirname="conf.%s"%self.plc_spec['name']
1487 if not os.path.isdir(dirname):
1488 utils.system("mkdir -p %s"%dirname)
1489 if not os.path.isdir(dirname):
1490 raise Exception,"Cannot create config dir for plc %s"%self.name()
1493 def conffile(self,filename):
1494 return "%s/%s"%(self.confdir(),filename)
1495 def confsubdir(self,dirname,clean,dry_run=False):
1496 subdirname="%s/%s"%(self.confdir(),dirname)
1498 utils.system("rm -rf %s"%subdirname)
1499 if not os.path.isdir(subdirname):
1500 utils.system("mkdir -p %s"%subdirname)
1501 if not dry_run and not os.path.isdir(subdirname):
1502 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1505 def conffile_clean (self,filename):
1506 filename=self.conffile(filename)
1507 return utils.system("rm -rf %s"%filename)==0
1510 def sfa_configure(self):
1511 "run sfa-config-tty"
1512 tmpname=self.conffile("sfa-config-tty")
1513 fileconf=open(tmpname,'w')
1514 for (var,value) in self.plc_spec['sfa']['settings'].iteritems():
1515 fileconf.write ('e %s\n%s\n'%(var,value))
1516 # # the way plc_config handles booleans just sucks..
1519 # if self.plc_spec['sfa'][var]: val='true'
1520 # fileconf.write ('e %s\n%s\n'%(var,val))
1521 fileconf.write('w\n')
1522 fileconf.write('R\n')
1523 fileconf.write('q\n')
1525 utils.system('cat %s'%tmpname)
1526 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1529 def aggregate_xml_line(self):
1530 port=self.plc_spec['sfa']['neighbours-port']
1531 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1532 (self.vserverip,self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'],port)
1534 def registry_xml_line(self):
1535 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1536 (self.vserverip,self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH'])
1539 # a cross step that takes all other plcs in argument
1540 def cross_sfa_configure(self, other_plcs):
1541 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1542 # of course with a single plc, other_plcs is an empty list
1545 agg_fname=self.conffile("agg.xml")
1546 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1547 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1548 utils.header ("(Over)wrote %s"%agg_fname)
1549 reg_fname=self.conffile("reg.xml")
1550 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1551 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1552 utils.header ("(Over)wrote %s"%reg_fname)
1553 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1554 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1556 def sfa_import(self):
1557 "use sfaadmin to import from plc"
1558 auth=self.plc_spec['sfa']['settings']['SFA_REGISTRY_ROOT_AUTH']
1559 return self.run_in_guest('sfaadmin reg import_registry')==0
1561 def sfa_start(self):
1563 return self.start_service('sfa')
1566 def sfi_configure(self):
1567 "Create /root/sfi on the plc side for sfi client configuration"
1568 if self.options.dry_run:
1569 utils.header("DRY RUN - skipping step")
1571 sfa_spec=self.plc_spec['sfa']
1572 # cannot use auth_sfa_mapper to pass dir_name
1573 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1574 test_slice=TestAuthSfa(self,slice_spec)
1575 dir_basename=os.path.basename(test_slice.sfi_path())
1576 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1577 test_slice.sfi_configure(dir_name)
1578 # push into the remote /root/sfi area
1579 location = test_slice.sfi_path()
1580 remote="%s/%s"%(self.vm_root_in_host(),location)
1581 self.test_ssh.mkdir(remote,abs=True)
1582 # need to strip last level or remote otherwise we get an extra dir level
1583 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1587 def sfi_clean (self):
1588 "clean up /root/sfi on the plc side"
1589 self.run_in_guest("rm -rf /root/sfi")
1592 def sfa_rspec_empty(self):
1593 "expose a static empty rspec (ships with the tests module) in the sfi directory"
1594 filename="empty-rspec.xml"
1596 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1597 test_slice=TestAuthSfa(self,slice_spec)
1598 in_vm = test_slice.sfi_path()
1599 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1600 if self.test_ssh.copy_abs (filename, remote) !=0: overall=False
1604 def sfa_register_site (self): pass
1606 def sfa_register_pi (self): pass
1608 def sfa_register_user(self): pass
1610 def sfa_update_user(self): pass
1612 def sfa_register_slice(self): pass
1614 def sfa_renew_slice(self): pass
1616 def sfa_get_expires(self): pass
1618 def sfa_discover(self): pass
1620 def sfa_rspec(self): pass
1622 def sfa_allocate(self): pass
1624 def sfa_allocate_empty(self): pass
1626 def sfa_provision(self): pass
1628 def sfa_provision_empty(self): pass
1630 def sfa_check_slice_plc(self): pass
1632 def sfa_check_slice_plc_empty(self): pass
1634 def sfa_update_slice(self): pass
1636 def sfa_remove_user_from_slice(self): pass
1638 def sfa_insert_user_in_slice(self): pass
1640 def sfi_list(self): pass
1642 def sfi_show_site(self): pass
1644 def sfi_show_slice(self): pass
1646 def sfi_show_slice_researchers(self): pass
1648 def ssh_slice_sfa(self): pass
1650 def sfa_delete_user(self): pass
1652 def sfa_delete_slice(self): pass
1656 return self.stop_service ('sfa')
1658 def populate (self):
1659 "creates random entries in the PLCAPI"
1660 # install the stress-test in the plc image
1661 location = "/usr/share/plc_api/plcsh_stress_test.py"
1662 remote="%s/%s"%(self.vm_root_in_host(),location)
1663 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1665 command += " -- --preserve --short-names"
1666 local = (self.run_in_guest(command) == 0);
1667 # second run with --foreign
1668 command += ' --foreign'
1669 remote = (self.run_in_guest(command) == 0);
1670 return ( local and remote)
1672 def gather_logs (self):
1673 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1674 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1675 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1676 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1677 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1678 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1679 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1681 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1682 self.gather_var_logs ()
1684 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1685 self.gather_pgsql_logs ()
1687 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1688 self.gather_root_sfi ()
1690 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1691 for site_spec in self.plc_spec['sites']:
1692 test_site = TestSite (self,site_spec)
1693 for node_spec in site_spec['nodes']:
1694 test_node=TestNode(self,test_site,node_spec)
1695 test_node.gather_qemu_logs()
1697 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1698 self.gather_nodes_var_logs()
1700 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1701 self.gather_slivers_var_logs()
1704 def gather_slivers_var_logs(self):
1705 for test_sliver in self.all_sliver_objs():
1706 remote = test_sliver.tar_var_logs()
1707 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1708 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1709 utils.system(command)
1712 def gather_var_logs (self):
1713 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1714 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1715 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1716 utils.system(command)
1717 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1718 utils.system(command)
1720 def gather_pgsql_logs (self):
1721 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1722 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1723 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1724 utils.system(command)
1726 def gather_root_sfi (self):
1727 utils.system("mkdir -p logs/sfi.%s"%self.name())
1728 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1729 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1730 utils.system(command)
1732 def gather_nodes_var_logs (self):
1733 for site_spec in self.plc_spec['sites']:
1734 test_site = TestSite (self,site_spec)
1735 for node_spec in site_spec['nodes']:
1736 test_node=TestNode(self,test_site,node_spec)
1737 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1738 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1739 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1740 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1741 utils.system(command)
1744 # returns the filename to use for sql dump/restore, using options.dbname if set
1745 def dbfile (self, database):
1746 # uses options.dbname if it is found
1748 name=self.options.dbname
1749 if not isinstance(name,StringTypes):
1755 return "/root/%s-%s.sql"%(database,name)
1757 def plc_db_dump(self):
1758 'dump the planetlab5 DB in /root in the PLC - filename has time'
1759 dump=self.dbfile("planetab5")
1760 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1761 utils.header('Dumped planetlab5 database in %s'%dump)
1764 def plc_db_restore(self):
1765 'restore the planetlab5 DB - looks broken, but run -n might help'
1766 dump=self.dbfile("planetab5")
1767 ##stop httpd service
1768 self.run_in_guest('service httpd stop')
1769 # xxx - need another wrapper
1770 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1771 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1772 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1773 ##starting httpd service
1774 self.run_in_guest('service httpd start')
1776 utils.header('Database restored from ' + dump)
1779 def create_ignore_steps ():
1780 for step in TestPlc.default_steps + TestPlc.other_steps:
1781 # default step can have a plc qualifier
1782 if '@' in step: (step,qualifier)=step.split('@')
1783 # or be defined as forced or ignored by default
1784 for keyword in ['_ignore','_force']:
1785 if step.endswith (keyword): step=step.replace(keyword,'')
1786 if step == SEP or step == SEPSFA : continue
1787 method=getattr(TestPlc,step)
1789 wrapped=ignore_result(method)
1790 # wrapped.__doc__ = method.__doc__ + " (run in ignore-result mode)"
1791 setattr(TestPlc, name, wrapped)
1794 # def ssh_slice_again_ignore (self): pass
1796 # def check_initscripts_ignore (self): pass
1798 def standby_1_through_20(self):
1799 """convenience function to wait for a specified number of minutes"""
1802 def standby_1(): pass
1804 def standby_2(): pass
1806 def standby_3(): pass
1808 def standby_4(): pass
1810 def standby_5(): pass
1812 def standby_6(): pass
1814 def standby_7(): pass
1816 def standby_8(): pass
1818 def standby_9(): pass
1820 def standby_10(): pass
1822 def standby_11(): pass
1824 def standby_12(): pass
1826 def standby_13(): pass
1828 def standby_14(): pass
1830 def standby_15(): pass
1832 def standby_16(): pass
1834 def standby_17(): pass
1836 def standby_18(): pass
1838 def standby_19(): pass
1840 def standby_20(): pass
1842 # convenience for debugging the test logic
1843 def yes (self): return True
1844 def no (self): return False
1845 def fail (self): return False