1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for test_node in self.all_nodes():
45 if not node_method(test_node, *args, **kwds): overall=False
47 # restore the doc text
48 actual.__doc__=TestNode.__dict__[method.__name__].__doc__
51 def slice_mapper (method):
54 slice_method = TestSlice.__dict__[method.__name__]
55 for slice_spec in self.plc_spec['slices']:
56 site_spec = self.locate_site (slice_spec['sitename'])
57 test_site = TestSite(self,site_spec)
58 test_slice=TestSlice(self,test_site,slice_spec)
59 if not slice_method(test_slice,self.options): overall=False
61 # restore the doc text
62 actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
65 def auth_sfa_mapper (method):
68 slice_method = TestAuthSfa.__dict__[method.__name__]
69 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
70 test_slice=TestAuthSfa(self,slice_spec)
71 if not slice_method(test_slice,self.options): overall=False
73 # restore the doc text
74 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
84 'vs_delete','timestamp_vs','vs_create', SEP,
85 'plc_install', 'plc_configure', 'plc_start', SEP,
86 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', 'speed_up_slices', SEP,
87 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
88 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
89 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
90 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
91 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
92 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
93 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
94 'sfi_list@1', 'sfi_show@1', 'sfi_slices@1', 'sfa_utest@1', SEPSFA,
95 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
96 # but as the stress test might take a while, we sometimes missed the debug mode..
97 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
98 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
99 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
100 'check_tcp', 'check_sys_slice', SEP,
101 'empty_slices', 'ssh_slice_off', 'fill_slices', SEP,
102 'force_gather_logs', SEP,
105 'export', 'show_boxes', SEP,
106 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
107 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
108 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
109 'delete_leases', 'list_leases', SEP,
111 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
112 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
113 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
114 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
115 'plc_db_dump' , 'plc_db_restore', SEP,
116 'standby_1_through_20',SEP,
120 def printable_steps (list):
121 single_line=" ".join(list)+" "
122 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
124 def valid_step (step):
125 return step != SEP and step != SEPSFA
127 # turn off the sfa-related steps when build has skipped SFA
128 # this was originally for centos5 but is still valid
129 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
131 def check_whether_build_has_sfa (rpms_url):
132 utils.header ("Checking if build provides SFA package...")
133 # warning, we're now building 'sface' so let's be a bit more picky
134 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
135 # full builds are expected to return with 0 here
137 utils.header("build does provide SFA")
139 # move all steps containing 'sfa' from default_steps to other_steps
140 utils.header("SFA package not found - removing steps with sfa or sfi")
141 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
142 TestPlc.other_steps += sfa_steps
143 for step in sfa_steps: TestPlc.default_steps.remove(step)
145 def __init__ (self,plc_spec,options):
146 self.plc_spec=plc_spec
148 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
149 self.vserverip=plc_spec['vserverip']
150 self.vservername=plc_spec['vservername']
151 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
152 self.apiserver=TestApiserver(self.url,options.dry_run)
154 def has_addresses_api (self):
155 return self.apiserver.has_method('AddIpAddress')
158 name=self.plc_spec['name']
159 return "%s.%s"%(name,self.vservername)
162 return self.plc_spec['host_box']
165 return self.test_ssh.is_local()
167 # define the API methods on this object through xmlrpc
168 # would help, but not strictly necessary
172 def actual_command_in_guest (self,command):
173 return self.test_ssh.actual_command(self.host_to_guest(command))
175 def start_guest (self):
176 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
178 def stop_guest (self):
179 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
181 def run_in_guest (self,command):
182 return utils.system(self.actual_command_in_guest(command))
184 def run_in_host (self,command):
185 return self.test_ssh.run_in_buildname(command)
187 #command gets run in the plc's vm
188 def host_to_guest(self,command):
189 if self.options.plcs_use_lxc:
190 return "ssh -o StrictHostKeyChecking=no %s %s"%(self.vserverip,command)
192 return "vserver %s exec %s"%(self.vservername,command)
194 def vm_root_in_host(self):
195 if self.options.plcs_use_lxc:
196 return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
198 return "/vservers/%s"%(self.vservername)
200 def vm_timestamp_path (self):
201 if self.options.plcs_use_lxc:
202 return "/var/lib/lxc/%s/%s.timestamp"%(self.vservername,self.vservername)
204 return "/vservers/%s.timestamp"%(self.vservername)
206 #start/stop the vserver
207 def start_guest_in_host(self):
208 if self.options.plcs_use_lxc:
209 return "lxc-start --daemon --name=%s"%(self.vservername)
211 return "vserver %s start"%(self.vservername)
213 def stop_guest_in_host(self):
214 if self.options.plcs_use_lxc:
215 return "lxc-stop --name=%s"%(self.vservername)
217 return "vserver %s stop"%(self.vservername)
220 def run_in_guest_piped (self,local,remote):
221 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
223 def yum_check_installed (self, rpms):
224 if isinstance (rpms, list):
226 return self.run_in_guest("rpm -q %s"%rpms)==0
228 # does a yum install in the vs, ignore yum retcod, check with rpm
229 def yum_install (self, rpms):
230 if isinstance (rpms, list):
232 self.run_in_guest("yum -y install %s"%rpms)
233 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
234 self.run_in_guest("yum-complete-transaction -y")
235 return self.yum_check_installed (rpms)
237 def auth_root (self):
238 return {'Username':self.plc_spec['PLC_ROOT_USER'],
239 'AuthMethod':'password',
240 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
241 'Role' : self.plc_spec['role']
243 def locate_site (self,sitename):
244 for site in self.plc_spec['sites']:
245 if site['site_fields']['name'] == sitename:
247 if site['site_fields']['login_base'] == sitename:
249 raise Exception,"Cannot locate site %s"%sitename
251 def locate_node (self,nodename):
252 for site in self.plc_spec['sites']:
253 for node in site['nodes']:
254 if node['name'] == nodename:
256 raise Exception,"Cannot locate node %s"%nodename
258 def locate_hostname (self,hostname):
259 for site in self.plc_spec['sites']:
260 for node in site['nodes']:
261 if node['node_fields']['hostname'] == hostname:
263 raise Exception,"Cannot locate hostname %s"%hostname
265 def locate_key (self,key_name):
266 for key in self.plc_spec['keys']:
267 if key['key_name'] == key_name:
269 raise Exception,"Cannot locate key %s"%key_name
271 def locate_private_key_from_key_names (self, key_names):
272 # locate the first avail. key
274 for key_name in key_names:
275 key_spec=self.locate_key(key_name)
276 test_key=TestKey(self,key_spec)
277 publickey=test_key.publicpath()
278 privatekey=test_key.privatepath()
279 if os.path.isfile(publickey) and os.path.isfile(privatekey):
281 if found: return privatekey
284 def locate_slice (self, slicename):
285 for slice in self.plc_spec['slices']:
286 if slice['slice_fields']['name'] == slicename:
288 raise Exception,"Cannot locate slice %s"%slicename
290 def all_sliver_objs (self):
292 for slice_spec in self.plc_spec['slices']:
293 slicename = slice_spec['slice_fields']['name']
294 for nodename in slice_spec['nodenames']:
295 result.append(self.locate_sliver_obj (nodename,slicename))
298 def locate_sliver_obj (self,nodename,slicename):
299 (site,node) = self.locate_node(nodename)
300 slice = self.locate_slice (slicename)
302 test_site = TestSite (self, site)
303 test_node = TestNode (self, test_site,node)
304 # xxx the slice site is assumed to be the node site - mhh - probably harmless
305 test_slice = TestSlice (self, test_site, slice)
306 return TestSliver (self, test_node, test_slice)
308 def locate_first_node(self):
309 nodename=self.plc_spec['slices'][0]['nodenames'][0]
310 (site,node) = self.locate_node(nodename)
311 test_site = TestSite (self, site)
312 test_node = TestNode (self, test_site,node)
315 def locate_first_sliver (self):
316 slice_spec=self.plc_spec['slices'][0]
317 slicename=slice_spec['slice_fields']['name']
318 nodename=slice_spec['nodenames'][0]
319 return self.locate_sliver_obj(nodename,slicename)
321 # all different hostboxes used in this plc
322 def gather_hostBoxes(self):
323 # maps on sites and nodes, return [ (host_box,test_node) ]
325 for site_spec in self.plc_spec['sites']:
326 test_site = TestSite (self,site_spec)
327 for node_spec in site_spec['nodes']:
328 test_node = TestNode (self, test_site, node_spec)
329 if not test_node.is_real():
330 tuples.append( (test_node.host_box(),test_node) )
331 # transform into a dict { 'host_box' -> [ test_node .. ] }
333 for (box,node) in tuples:
334 if not result.has_key(box):
337 result[box].append(node)
340 # a step for checking this stuff
341 def show_boxes (self):
342 'print summary of nodes location'
343 for (box,nodes) in self.gather_hostBoxes().iteritems():
344 print box,":"," + ".join( [ node.name() for node in nodes ] )
347 # make this a valid step
348 def qemu_kill_all(self):
349 'kill all qemu instances on the qemu boxes involved by this setup'
350 # this is the brute force version, kill all qemus on that host box
351 for (box,nodes) in self.gather_hostBoxes().iteritems():
352 # pass the first nodename, as we don't push template-qemu on testboxes
353 nodedir=nodes[0].nodedir()
354 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
357 # make this a valid step
358 def qemu_list_all(self):
359 'list all qemu instances on the qemu boxes involved by this setup'
360 for (box,nodes) in self.gather_hostBoxes().iteritems():
361 # this is the brute force version, kill all qemus on that host box
362 TestBoxQemu(box,self.options.buildname).qemu_list_all()
365 # kill only the right qemus
366 def qemu_list_mine(self):
367 'list qemu instances for our nodes'
368 for (box,nodes) in self.gather_hostBoxes().iteritems():
369 # the fine-grain version
374 # kill only the right qemus
375 def qemu_kill_mine(self):
376 'kill the qemu instances for our nodes'
377 for (box,nodes) in self.gather_hostBoxes().iteritems():
378 # the fine-grain version
383 #################### display config
385 "show test configuration after localization"
391 "print cut'n paste-able stuff to export env variables to your shell"
392 # guess local domain from hostname
393 domain=socket.gethostname().split('.',1)[1]
394 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
395 print "export BUILD=%s"%self.options.buildname
396 if self.options.plcs_use_lxc:
397 print "export PLCHOSTLXC=%s"%fqdn
399 print "export PLCHOSTVS=%s"%fqdn
400 print "export GUESTNAME=%s"%self.plc_spec['vservername']
401 vplcname=self.plc_spec['vservername'].split('-')[-1]
402 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
403 # find hostname of first node
404 (hostname,qemubox) = self.all_node_infos()[0]
405 print "export KVMHOST=%s.%s"%(qemubox,domain)
406 print "export NODE=%s"%(hostname)
410 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
411 def show_pass (self,passno):
412 for (key,val) in self.plc_spec.iteritems():
413 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
417 self.display_site_spec(site)
418 for node in site['nodes']:
419 self.display_node_spec(node)
420 elif key=='initscripts':
421 for initscript in val:
422 self.display_initscript_spec (initscript)
425 self.display_slice_spec (slice)
428 self.display_key_spec (key)
430 if key not in ['sites','initscripts','slices','keys', 'sfa']:
431 print '+ ',key,':',val
433 def display_site_spec (self,site):
434 print '+ ======== site',site['site_fields']['name']
435 for (k,v) in site.iteritems():
436 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
439 print '+ ','nodes : ',
441 print node['node_fields']['hostname'],'',
447 print user['name'],'',
449 elif k == 'site_fields':
450 print '+ login_base',':',v['login_base']
451 elif k == 'address_fields':
457 def display_initscript_spec (self,initscript):
458 print '+ ======== initscript',initscript['initscript_fields']['name']
460 def display_key_spec (self,key):
461 print '+ ======== key',key['key_name']
463 def display_slice_spec (self,slice):
464 print '+ ======== slice',slice['slice_fields']['name']
465 for (k,v) in slice.iteritems():
478 elif k=='slice_fields':
479 print '+ fields',':',
480 print 'max_nodes=',v['max_nodes'],
485 def display_node_spec (self,node):
486 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
487 print "hostname=",node['node_fields']['hostname'],
488 print "ip=",node['interface_fields']['ip']
489 if self.options.verbose:
490 utils.pprint("node details",node,depth=3)
492 # another entry point for just showing the boxes involved
493 def display_mapping (self):
494 TestPlc.display_mapping_plc(self.plc_spec)
498 def display_mapping_plc (plc_spec):
499 print '+ MyPLC',plc_spec['name']
500 # WARNING this would not be right for lxc-based PLC's - should be harmless though
501 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
502 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
503 for site_spec in plc_spec['sites']:
504 for node_spec in site_spec['nodes']:
505 TestPlc.display_mapping_node(node_spec)
508 def display_mapping_node (node_spec):
509 print '+ NODE %s'%(node_spec['name'])
510 print '+\tqemu box %s'%node_spec['host_box']
511 print '+\thostname=%s'%node_spec['node_fields']['hostname']
513 # write a timestamp in /vservers/<>.timestamp
514 # cannot be inside the vserver, that causes vserver .. build to cough
515 def timestamp_vs (self):
516 "Create a timestamp to remember creation date for this plc"
518 # TODO-lxc check this one
519 # a first approx. is to store the timestamp close to the VM root like vs does
520 stamp_path=self.vm_timestamp_path ()
521 stamp_dir = os.path.dirname (stamp_path)
522 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
523 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
525 # this is called inconditionnally at the beginning of the test sequence
526 # just in case this is a rerun, so if the vm is not running it's fine
528 "vserver delete the test myplc"
529 stamp_path=self.vm_timestamp_path()
530 self.run_in_host("rm -f %s"%stamp_path)
531 if self.options.plcs_use_lxc:
532 self.run_in_host("lxc-stop --name %s"%self.vservername)
533 self.run_in_host("lxc-destroy --name %s"%self.vservername)
536 self.run_in_host("vserver --silent %s delete"%self.vservername)
540 # historically the build was being fetched by the tests
541 # now the build pushes itself as a subdir of the tests workdir
542 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
543 def vs_create (self):
544 "vserver creation (no install done)"
545 # push the local build/ dir to the testplc box
547 # a full path for the local calls
548 build_dir=os.path.dirname(sys.argv[0])
549 # sometimes this is empty - set to "." in such a case
550 if not build_dir: build_dir="."
551 build_dir += "/build"
553 # use a standard name - will be relative to remote buildname
555 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
556 self.test_ssh.rmdir(build_dir)
557 self.test_ssh.copy(build_dir,recursive=True)
558 # the repo url is taken from arch-rpms-url
559 # with the last step (i386) removed
560 repo_url = self.options.arch_rpms_url
561 for level in [ 'arch' ]:
562 repo_url = os.path.dirname(repo_url)
563 # pass the vbuild-nightly options to vtest-init-vserver
565 test_env_options += " -p %s"%self.options.personality
566 test_env_options += " -d %s"%self.options.pldistro
567 test_env_options += " -f %s"%self.options.fcdistro
568 if self.options.plcs_use_lxc:
569 script="vtest-init-lxc.sh"
571 script="vtest-init-vserver.sh"
572 vserver_name = self.vservername
573 vserver_options="--netdev eth0 --interface %s"%self.vserverip
575 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
576 vserver_options += " --hostname %s"%vserver_hostname
578 print "Cannot reverse lookup %s"%self.vserverip
579 print "This is considered fatal, as this might pollute the test results"
581 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
582 return self.run_in_host(create_vserver) == 0
585 def plc_install(self):
586 "yum install myplc, noderepo, and the plain bootstrapfs"
588 # workaround for getting pgsql8.2 on centos5
589 if self.options.fcdistro == "centos5":
590 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
593 if self.options.personality == "linux32":
595 elif self.options.personality == "linux64":
598 raise Exception, "Unsupported personality %r"%self.options.personality
599 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
602 pkgs_list.append ("slicerepo-%s"%nodefamily)
603 pkgs_list.append ("myplc")
604 pkgs_list.append ("noderepo-%s"%nodefamily)
605 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
606 pkgs_string=" ".join(pkgs_list)
607 return self.yum_install (pkgs_list)
610 def plc_configure(self):
612 tmpname='%s.plc-config-tty'%(self.name())
613 fileconf=open(tmpname,'w')
614 for var in [ 'PLC_NAME',
619 'PLC_MAIL_SUPPORT_ADDRESS',
622 # Above line was added for integrating SFA Testing
628 'PLC_RESERVATION_GRANULARITY',
630 'PLC_OMF_XMPP_SERVER',
632 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
633 fileconf.write('w\n')
634 fileconf.write('q\n')
636 utils.system('cat %s'%tmpname)
637 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
638 utils.system('rm %s'%tmpname)
643 self.run_in_guest('service plc start')
648 self.run_in_guest('service plc stop')
652 "start the PLC vserver"
657 "stop the PLC vserver"
661 # stores the keys from the config for further use
662 def keys_store(self):
663 "stores test users ssh keys in keys/"
664 for key_spec in self.plc_spec['keys']:
665 TestKey(self,key_spec).store_key()
668 def keys_clean(self):
669 "removes keys cached in keys/"
670 utils.system("rm -rf ./keys")
673 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
674 # for later direct access to the nodes
675 def keys_fetch(self):
676 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
678 if not os.path.isdir(dir):
680 vservername=self.vservername
681 vm_root=self.vm_root_in_host()
683 prefix = 'debug_ssh_key'
684 for ext in [ 'pub', 'rsa' ] :
685 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
686 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
687 if self.test_ssh.fetch(src,dst) != 0: overall=False
691 "create sites with PLCAPI"
692 return self.do_sites()
694 def delete_sites (self):
695 "delete sites with PLCAPI"
696 return self.do_sites(action="delete")
698 def do_sites (self,action="add"):
699 for site_spec in self.plc_spec['sites']:
700 test_site = TestSite (self,site_spec)
701 if (action != "add"):
702 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
703 test_site.delete_site()
704 # deleted with the site
705 #test_site.delete_users()
708 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
709 test_site.create_site()
710 test_site.create_users()
713 def delete_all_sites (self):
714 "Delete all sites in PLC, and related objects"
715 print 'auth_root',self.auth_root()
716 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])
718 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
719 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
720 site_id=site['site_id']
721 print 'Deleting site_id',site_id
722 self.apiserver.DeleteSite(self.auth_root(),site_id)
726 "create nodes with PLCAPI"
727 return self.do_nodes()
728 def delete_nodes (self):
729 "delete nodes with PLCAPI"
730 return self.do_nodes(action="delete")
732 def do_nodes (self,action="add"):
733 for site_spec in self.plc_spec['sites']:
734 test_site = TestSite (self,site_spec)
736 utils.header("Deleting nodes in site %s"%test_site.name())
737 for node_spec in site_spec['nodes']:
738 test_node=TestNode(self,test_site,node_spec)
739 utils.header("Deleting %s"%test_node.name())
740 test_node.delete_node()
742 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
743 for node_spec in site_spec['nodes']:
744 utils.pprint('Creating node %s'%node_spec,node_spec)
745 test_node = TestNode (self,test_site,node_spec)
746 test_node.create_node ()
749 def nodegroups (self):
750 "create nodegroups with PLCAPI"
751 return self.do_nodegroups("add")
752 def delete_nodegroups (self):
753 "delete nodegroups with PLCAPI"
754 return self.do_nodegroups("delete")
758 def translate_timestamp (start,grain,timestamp):
759 if timestamp < TestPlc.YEAR: return start+timestamp*grain
760 else: return timestamp
763 def timestamp_printable (timestamp):
764 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
767 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
769 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
770 print 'API answered grain=',grain
771 start=(now/grain)*grain
773 # find out all nodes that are reservable
774 nodes=self.all_reservable_nodenames()
776 utils.header ("No reservable node found - proceeding without leases")
779 # attach them to the leases as specified in plc_specs
780 # this is where the 'leases' field gets interpreted as relative of absolute
781 for lease_spec in self.plc_spec['leases']:
782 # skip the ones that come with a null slice id
783 if not lease_spec['slice']: continue
784 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
785 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
786 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
787 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
788 if lease_addition['errors']:
789 utils.header("Cannot create leases, %s"%lease_addition['errors'])
792 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
793 (nodes,lease_spec['slice'],
794 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
795 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
799 def delete_leases (self):
800 "remove all leases in the myplc side"
801 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
802 utils.header("Cleaning leases %r"%lease_ids)
803 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
806 def list_leases (self):
807 "list all leases known to the myplc"
808 leases = self.apiserver.GetLeases(self.auth_root())
811 current=l['t_until']>=now
812 if self.options.verbose or current:
813 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
814 TestPlc.timestamp_printable(l['t_from']),
815 TestPlc.timestamp_printable(l['t_until'])))
818 # create nodegroups if needed, and populate
819 def do_nodegroups (self, action="add"):
820 # 1st pass to scan contents
822 for site_spec in self.plc_spec['sites']:
823 test_site = TestSite (self,site_spec)
824 for node_spec in site_spec['nodes']:
825 test_node=TestNode (self,test_site,node_spec)
826 if node_spec.has_key('nodegroups'):
827 nodegroupnames=node_spec['nodegroups']
828 if isinstance(nodegroupnames,StringTypes):
829 nodegroupnames = [ nodegroupnames ]
830 for nodegroupname in nodegroupnames:
831 if not groups_dict.has_key(nodegroupname):
832 groups_dict[nodegroupname]=[]
833 groups_dict[nodegroupname].append(test_node.name())
834 auth=self.auth_root()
836 for (nodegroupname,group_nodes) in groups_dict.iteritems():
838 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
839 # first, check if the nodetagtype is here
840 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
842 tag_type_id = tag_types[0]['tag_type_id']
844 tag_type_id = self.apiserver.AddTagType(auth,
845 {'tagname':nodegroupname,
846 'description': 'for nodegroup %s'%nodegroupname,
848 print 'located tag (type)',nodegroupname,'as',tag_type_id
850 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
852 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
853 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
854 # set node tag on all nodes, value='yes'
855 for nodename in group_nodes:
857 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
859 traceback.print_exc()
860 print 'node',nodename,'seems to already have tag',nodegroupname
863 expect_yes = self.apiserver.GetNodeTags(auth,
864 {'hostname':nodename,
865 'tagname':nodegroupname},
866 ['value'])[0]['value']
867 if expect_yes != "yes":
868 print 'Mismatch node tag on node',nodename,'got',expect_yes
871 if not self.options.dry_run:
872 print 'Cannot find tag',nodegroupname,'on node',nodename
876 print 'cleaning nodegroup',nodegroupname
877 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
879 traceback.print_exc()
883 # a list of TestNode objs
884 def all_nodes (self):
886 for site_spec in self.plc_spec['sites']:
887 test_site = TestSite (self,site_spec)
888 for node_spec in site_spec['nodes']:
889 nodes.append(TestNode (self,test_site,node_spec))
892 # return a list of tuples (nodename,qemuname)
893 def all_node_infos (self) :
895 for site_spec in self.plc_spec['sites']:
896 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
897 for node_spec in site_spec['nodes'] ]
900 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
901 def all_reservable_nodenames (self):
903 for site_spec in self.plc_spec['sites']:
904 for node_spec in site_spec['nodes']:
905 node_fields=node_spec['node_fields']
906 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
907 res.append(node_fields['hostname'])
910 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
911 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
912 if self.options.dry_run:
916 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
917 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
918 # the nodes that haven't checked yet - start with a full list and shrink over time
919 tocheck = self.all_hostnames()
920 utils.header("checking nodes %r"%tocheck)
921 # create a dict hostname -> status
922 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
925 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
927 for array in tocheck_status:
928 hostname=array['hostname']
929 boot_state=array['boot_state']
930 if boot_state == target_boot_state:
931 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
933 # if it's a real node, never mind
934 (site_spec,node_spec)=self.locate_hostname(hostname)
935 if TestNode.is_real_model(node_spec['node_fields']['model']):
936 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
938 boot_state = target_boot_state
939 elif datetime.datetime.now() > graceout:
940 utils.header ("%s still in '%s' state"%(hostname,boot_state))
941 graceout=datetime.datetime.now()+datetime.timedelta(1)
942 status[hostname] = boot_state
944 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
947 if datetime.datetime.now() > timeout:
948 for hostname in tocheck:
949 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
951 # otherwise, sleep for a while
953 # only useful in empty plcs
956 def nodes_booted(self):
957 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
959 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
961 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
962 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
963 vservername=self.vservername
966 local_key = "keys/%(vservername)s-debug.rsa"%locals()
969 local_key = "keys/key_admin.rsa"
970 node_infos = self.all_node_infos()
971 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
972 for (nodename,qemuname) in node_infos:
973 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
974 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
975 (timeout_minutes,silent_minutes,period))
977 for node_info in node_infos:
978 (hostname,qemuname) = node_info
979 # try to run 'hostname' in the node
980 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
981 # don't spam logs - show the command only after the grace period
982 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
984 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
986 node_infos.remove(node_info)
988 # we will have tried real nodes once, in case they're up - but if not, just skip
989 (site_spec,node_spec)=self.locate_hostname(hostname)
990 if TestNode.is_real_model(node_spec['node_fields']['model']):
991 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
992 node_infos.remove(node_info)
995 if datetime.datetime.now() > timeout:
996 for (hostname,qemuname) in node_infos:
997 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
999 # otherwise, sleep for a while
1001 # only useful in empty plcs
1004 def ssh_node_debug(self):
1005 "Tries to ssh into nodes in debug mode with the debug ssh key"
1006 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=8)
1008 def ssh_node_boot(self):
1009 "Tries to ssh into nodes in production mode with the root ssh key"
1010 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38)
1013 def qemu_local_init (self): pass
1015 def bootcd (self): pass
1017 def qemu_local_config (self): pass
1019 def nodestate_reinstall (self): pass
1021 def nodestate_safeboot (self): pass
1023 def nodestate_boot (self): pass
1025 def nodestate_show (self): pass
1027 def qemu_export (self): pass
1029 ### check hooks : invoke scripts from hooks/{node,slice}
1030 def check_hooks_node (self):
1031 return self.locate_first_node().check_hooks()
1032 def check_hooks_sliver (self) :
1033 return self.locate_first_sliver().check_hooks()
1035 def check_hooks (self):
1036 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1037 return self.check_hooks_node() and self.check_hooks_sliver()
1040 def do_check_initscripts(self):
1042 for slice_spec in self.plc_spec['slices']:
1043 if not slice_spec.has_key('initscriptstamp'):
1045 stamp=slice_spec['initscriptstamp']
1046 for nodename in slice_spec['nodenames']:
1047 (site,node) = self.locate_node (nodename)
1048 # xxx - passing the wrong site - probably harmless
1049 test_site = TestSite (self,site)
1050 test_slice = TestSlice (self,test_site,slice_spec)
1051 test_node = TestNode (self,test_site,node)
1052 test_sliver = TestSliver (self, test_node, test_slice)
1053 if not test_sliver.check_initscript_stamp(stamp):
1057 def check_initscripts(self):
1058 "check that the initscripts have triggered"
1059 return self.do_check_initscripts()
1061 def initscripts (self):
1062 "create initscripts with PLCAPI"
1063 for initscript in self.plc_spec['initscripts']:
1064 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1065 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1068 def delete_initscripts (self):
1069 "delete initscripts with PLCAPI"
1070 for initscript in self.plc_spec['initscripts']:
1071 initscript_name = initscript['initscript_fields']['name']
1072 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1074 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1075 print initscript_name,'deleted'
1077 print 'deletion went wrong - probably did not exist'
1082 "create slices with PLCAPI"
1083 return self.do_slices(action="add")
1085 def delete_slices (self):
1086 "delete slices with PLCAPI"
1087 return self.do_slices(action="delete")
1089 def fill_slices (self):
1090 "add nodes in slices with PLCAPI"
1091 return self.do_slices(action="fill")
1093 def empty_slices (self):
1094 "remove nodes from slices with PLCAPI"
1095 return self.do_slices(action="empty")
1097 def do_slices (self, action="add"):
1098 for slice in self.plc_spec['slices']:
1099 site_spec = self.locate_site (slice['sitename'])
1100 test_site = TestSite(self,site_spec)
1101 test_slice=TestSlice(self,test_site,slice)
1102 if action == "delete":
1103 test_slice.delete_slice()
1104 elif action=="fill":
1105 test_slice.add_nodes()
1106 elif action=="empty":
1107 test_slice.delete_nodes()
1109 test_slice.create_slice()
1113 def ssh_slice(self): pass
1115 def ssh_slice_off (self): pass
1118 def keys_clear_known_hosts (self): pass
1120 def speed_up_slices (self):
1121 "tweak nodemanager settings on all nodes using a conf file"
1122 # create the template on the server-side
1123 template="%s.nodemanager"%self.name()
1124 template_file = open (template,"w")
1125 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1126 template_file.close()
1127 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1128 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1129 self.test_ssh.copy_abs(template,remote)
1131 self.apiserver.AddConfFile (self.auth_root(),
1132 {'dest':'/etc/sysconfig/nodemanager',
1133 'source':'PlanetLabConf/nodemanager',
1134 'postinstall_cmd':'service nm restart',})
1138 def qemu_start (self) : pass
1141 def timestamp_qemu (self) : pass
1143 def check_tcp (self):
1144 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1145 specs = self.plc_spec['tcp_test']
1150 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1151 if not s_test_sliver.run_tcp_server(port,timeout=10):
1155 # idem for the client side
1156 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1157 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1161 # painfully enough, we need to allow for some time as netflow might show up last
1162 def check_sys_slice (self):
1163 "all nodes: check that a system slice is alive"
1164 # would probably make more sense to check for netflow,
1165 # but that one is currently not working in the lxc distro
1166 # return self.check_systemslice ('netflow')
1167 return self.check_systemslice ('drl')
1169 # we have the slices up already here, so it should not take too long
1170 def check_systemslice (self, slicename, timeout_minutes=5, period=15):
1171 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
1172 test_nodes=self.all_nodes()
1174 for test_node in test_nodes:
1175 if test_node.check_systemslice (slicename,dry_run=self.options.dry_run):
1177 test_nodes.remove(test_node)
1182 if datetime.datetime.now () > timeout:
1183 for test_node in test_nodes:
1184 utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
1189 def plcsh_stress_test (self):
1190 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1191 # install the stress-test in the plc image
1192 location = "/usr/share/plc_api/plcsh_stress_test.py"
1193 remote="%s/%s"%(self.vm_root_in_host(),location)
1194 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1196 command += " -- --check"
1197 if self.options.size == 1:
1198 command += " --tiny"
1199 return ( self.run_in_guest(command) == 0)
1201 # populate runs the same utility without slightly different options
1202 # in particular runs with --preserve (dont cleanup) and without --check
1203 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1205 def sfa_install_all (self):
1206 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1207 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1209 def sfa_install_core(self):
1211 return self.yum_install ("sfa")
1213 def sfa_install_plc(self):
1214 "yum install sfa-plc"
1215 return self.yum_install("sfa-plc")
1217 def sfa_install_sfatables(self):
1218 "yum install sfa-sfatables"
1219 return self.yum_install ("sfa-sfatables")
1221 # for some very odd reason, this sometimes fails with the following symptom
1222 # # yum install sfa-client
1223 # Setting up Install Process
1225 # Downloading Packages:
1226 # Running rpm_check_debug
1227 # Running Transaction Test
1228 # Transaction Test Succeeded
1229 # Running Transaction
1230 # Transaction couldn't start:
1231 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1232 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1233 # even though in the same context I have
1234 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1235 # Filesystem Size Used Avail Use% Mounted on
1236 # /dev/hdv1 806G 264G 501G 35% /
1237 # none 16M 36K 16M 1% /tmp
1239 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1240 def sfa_install_client(self):
1241 "yum install sfa-client"
1242 first_try=self.yum_install("sfa-client")
1243 if first_try: return True
1244 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1245 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1246 utils.header("rpm_path=<<%s>>"%rpm_path)
1248 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1249 return self.yum_check_installed ("sfa-client")
1251 def sfa_dbclean(self):
1252 "thoroughly wipes off the SFA database"
1253 return self.run_in_guest("sfaadmin.py registry nuke")==0 or \
1254 self.run_in_guest("sfa-nuke.py")==0 or \
1255 self.run_in_guest("sfa-nuke-plc.py")==0
1257 def sfa_plcclean(self):
1258 "cleans the PLC entries that were created as a side effect of running the script"
1260 sfa_spec=self.plc_spec['sfa']
1262 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1263 login_base=auth_sfa_spec['login_base']
1264 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1265 except: print "Site %s already absent from PLC db"%login_base
1267 for spec_name in ['pi_spec','user_spec']:
1268 user_spec=auth_sfa_spec[spec_name]
1269 username=user_spec['email']
1270 try: self.apiserver.DeletePerson(self.auth_root(),username)
1272 # this in fact is expected as sites delete their members
1273 #print "User %s already absent from PLC db"%username
1276 print "REMEMBER TO RUN sfa_import AGAIN"
1279 def sfa_uninstall(self):
1280 "uses rpm to uninstall sfa - ignore result"
1281 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1282 self.run_in_guest("rm -rf /var/lib/sfa")
1283 self.run_in_guest("rm -rf /etc/sfa")
1284 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1286 self.run_in_guest("rpm -e --noscripts sfa-plc")
1289 ### run unit tests for SFA
1290 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1291 # Running Transaction
1292 # Transaction couldn't start:
1293 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1294 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1295 # no matter how many Gbs are available on the testplc
1296 # could not figure out what's wrong, so...
1297 # if the yum install phase fails, consider the test is successful
1298 # other combinations will eventually run it hopefully
1299 def sfa_utest(self):
1300 "yum install sfa-tests and run SFA unittests"
1301 self.run_in_guest("yum -y install sfa-tests")
1302 # failed to install - forget it
1303 if self.run_in_guest("rpm -q sfa-tests")!=0:
1304 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1306 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1310 dirname="conf.%s"%self.plc_spec['name']
1311 if not os.path.isdir(dirname):
1312 utils.system("mkdir -p %s"%dirname)
1313 if not os.path.isdir(dirname):
1314 raise "Cannot create config dir for plc %s"%self.name()
1317 def conffile(self,filename):
1318 return "%s/%s"%(self.confdir(),filename)
1319 def confsubdir(self,dirname,clean,dry_run=False):
1320 subdirname="%s/%s"%(self.confdir(),dirname)
1322 utils.system("rm -rf %s"%subdirname)
1323 if not os.path.isdir(subdirname):
1324 utils.system("mkdir -p %s"%subdirname)
1325 if not dry_run and not os.path.isdir(subdirname):
1326 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1329 def conffile_clean (self,filename):
1330 filename=self.conffile(filename)
1331 return utils.system("rm -rf %s"%filename)==0
1334 def sfa_configure(self):
1335 "run sfa-config-tty"
1336 tmpname=self.conffile("sfa-config-tty")
1337 fileconf=open(tmpname,'w')
1338 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1339 'SFA_INTERFACE_HRN',
1340 'SFA_REGISTRY_LEVEL1_AUTH',
1341 'SFA_REGISTRY_HOST',
1342 'SFA_AGGREGATE_HOST',
1352 'SFA_GENERIC_FLAVOUR',
1353 'SFA_AGGREGATE_ENABLED',
1355 if self.plc_spec['sfa'].has_key(var):
1356 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1357 # the way plc_config handles booleans just sucks..
1360 if self.plc_spec['sfa'][var]: val='true'
1361 fileconf.write ('e %s\n%s\n'%(var,val))
1362 fileconf.write('w\n')
1363 fileconf.write('R\n')
1364 fileconf.write('q\n')
1366 utils.system('cat %s'%tmpname)
1367 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1370 def aggregate_xml_line(self):
1371 port=self.plc_spec['sfa']['neighbours-port']
1372 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1373 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1375 def registry_xml_line(self):
1376 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1377 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1380 # a cross step that takes all other plcs in argument
1381 def cross_sfa_configure(self, other_plcs):
1382 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1383 # of course with a single plc, other_plcs is an empty list
1386 agg_fname=self.conffile("agg.xml")
1387 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1388 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1389 utils.header ("(Over)wrote %s"%agg_fname)
1390 reg_fname=self.conffile("reg.xml")
1391 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1392 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1393 utils.header ("(Over)wrote %s"%reg_fname)
1394 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1395 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1397 def sfa_import(self):
1399 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1401 self.run_in_guest('sfaadmin.py reg import_registry')==0
1402 # not needed anymore
1403 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1405 def sfa_start(self):
1407 return self.run_in_guest('service sfa start')==0
1409 def sfi_configure(self):
1410 "Create /root/sfi on the plc side for sfi client configuration"
1411 if self.options.dry_run:
1412 utils.header("DRY RUN - skipping step")
1414 sfa_spec=self.plc_spec['sfa']
1415 # cannot use auth_sfa_mapper to pass dir_name
1416 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1417 test_slice=TestAuthSfa(self,slice_spec)
1418 dir_basename=os.path.basename(test_slice.sfi_path())
1419 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1420 test_slice.sfi_configure(dir_name)
1421 # push into the remote /root/sfi area
1422 location = test_slice.sfi_path()
1423 remote="%s/%s"%(self.vm_root_in_host(),location)
1424 self.test_ssh.mkdir(remote,abs=True)
1425 # need to strip last level or remote otherwise we get an extra dir level
1426 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1430 def sfi_clean (self):
1431 "clean up /root/sfi on the plc side"
1432 self.run_in_guest("rm -rf /root/sfi")
1436 def sfa_add_site (self): pass
1438 def sfa_add_pi (self): pass
1440 def sfa_add_user(self): pass
1442 def sfa_update_user(self): pass
1444 def sfa_add_slice(self): pass
1446 def sfa_discover(self): pass
1448 def sfa_create_slice(self): pass
1450 def sfa_check_slice_plc(self): pass
1452 def sfa_update_slice(self): pass
1454 def sfi_list(self): pass
1456 def sfi_show(self): pass
1458 def sfi_slices(self): pass
1460 def ssh_slice_sfa(self): pass
1462 def sfa_delete_user(self): pass
1464 def sfa_delete_slice(self): pass
1468 self.run_in_guest('service sfa stop')==0
1471 def populate (self):
1472 "creates random entries in the PLCAPI"
1473 # install the stress-test in the plc image
1474 location = "/usr/share/plc_api/plcsh_stress_test.py"
1475 remote="%s/%s"%(self.vm_root_in_host(),location)
1476 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1478 command += " -- --preserve --short-names"
1479 local = (self.run_in_guest(command) == 0);
1480 # second run with --foreign
1481 command += ' --foreign'
1482 remote = (self.run_in_guest(command) == 0);
1483 return ( local and remote)
1485 def gather_logs (self):
1486 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1487 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1488 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1489 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1490 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1491 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1492 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1494 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1495 self.gather_var_logs ()
1497 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1498 self.gather_pgsql_logs ()
1500 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1501 self.gather_root_sfi ()
1503 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1504 for site_spec in self.plc_spec['sites']:
1505 test_site = TestSite (self,site_spec)
1506 for node_spec in site_spec['nodes']:
1507 test_node=TestNode(self,test_site,node_spec)
1508 test_node.gather_qemu_logs()
1510 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1511 self.gather_nodes_var_logs()
1513 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1514 self.gather_slivers_var_logs()
1517 def gather_slivers_var_logs(self):
1518 for test_sliver in self.all_sliver_objs():
1519 remote = test_sliver.tar_var_logs()
1520 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1521 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1522 utils.system(command)
1525 def gather_var_logs (self):
1526 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1527 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1528 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1529 utils.system(command)
1530 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1531 utils.system(command)
1533 def gather_pgsql_logs (self):
1534 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1535 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1536 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1537 utils.system(command)
1539 def gather_root_sfi (self):
1540 utils.system("mkdir -p logs/sfi.%s"%self.name())
1541 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1542 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1543 utils.system(command)
1545 def gather_nodes_var_logs (self):
1546 for site_spec in self.plc_spec['sites']:
1547 test_site = TestSite (self,site_spec)
1548 for node_spec in site_spec['nodes']:
1549 test_node=TestNode(self,test_site,node_spec)
1550 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1551 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1552 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1553 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1554 utils.system(command)
1557 # returns the filename to use for sql dump/restore, using options.dbname if set
1558 def dbfile (self, database):
1559 # uses options.dbname if it is found
1561 name=self.options.dbname
1562 if not isinstance(name,StringTypes):
1565 t=datetime.datetime.now()
1568 return "/root/%s-%s.sql"%(database,name)
1570 def plc_db_dump(self):
1571 'dump the planetlab5 DB in /root in the PLC - filename has time'
1572 dump=self.dbfile("planetab5")
1573 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1574 utils.header('Dumped planetlab5 database in %s'%dump)
1577 def plc_db_restore(self):
1578 'restore the planetlab5 DB - looks broken, but run -n might help'
1579 dump=self.dbfile("planetab5")
1580 ##stop httpd service
1581 self.run_in_guest('service httpd stop')
1582 # xxx - need another wrapper
1583 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1584 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1585 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1586 ##starting httpd service
1587 self.run_in_guest('service httpd start')
1589 utils.header('Database restored from ' + dump)
1591 def standby_1_through_20(self):
1592 """convenience function to wait for a specified number of minutes"""
1595 def standby_1(): pass
1597 def standby_2(): pass
1599 def standby_3(): pass
1601 def standby_4(): pass
1603 def standby_5(): pass
1605 def standby_6(): pass
1607 def standby_7(): pass
1609 def standby_8(): pass
1611 def standby_9(): pass
1613 def standby_10(): pass
1615 def standby_11(): pass
1617 def standby_12(): pass
1619 def standby_13(): pass
1621 def standby_14(): pass
1623 def standby_15(): pass
1625 def standby_16(): pass
1627 def standby_17(): pass
1629 def standby_18(): pass
1631 def standby_19(): pass
1633 def standby_20(): pass