1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for test_node in self.all_nodes():
45 if not node_method(test_node, *args, **kwds): overall=False
47 # restore the doc text
48 actual.__doc__=TestNode.__dict__[method.__name__].__doc__
51 def slice_mapper (method):
54 slice_method = TestSlice.__dict__[method.__name__]
55 for slice_spec in self.plc_spec['slices']:
56 site_spec = self.locate_site (slice_spec['sitename'])
57 test_site = TestSite(self,site_spec)
58 test_slice=TestSlice(self,test_site,slice_spec)
59 if not slice_method(test_slice,self.options): overall=False
61 # restore the doc text
62 actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
65 def auth_sfa_mapper (method):
68 slice_method = TestAuthSfa.__dict__[method.__name__]
69 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
70 test_slice=TestAuthSfa(self,slice_spec)
71 if not slice_method(test_slice,self.options): overall=False
73 # restore the doc text
74 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
84 'vs_delete','timestamp_vs','vs_create', SEP,
85 'plc_install', 'plc_configure', 'plc_start', SEP,
86 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', 'speed_up_slices', SEP,
87 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
88 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
89 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
90 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
91 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
92 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
93 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
94 'sfa_update_user@1', 'sfa_update_slice@1', SEPSFA,
95 'sfi_list@1', 'sfi_show@1', 'sfi_slices@1', 'sfa_utest@1', SEPSFA,
96 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
97 # but as the stress test might take a while, we sometimes missed the debug mode..
98 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
99 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
100 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
101 'check_tcp', 'check_sys_slice', SEP,
102 'empty_slices', 'ssh_slice_off', 'fill_slices', SEP,
103 'force_gather_logs', SEP,
106 'export', 'show_boxes', SEP,
107 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
108 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
109 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
110 'delete_leases', 'list_leases', SEP,
112 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
113 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
114 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
115 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
116 'plc_db_dump' , 'plc_db_restore', SEP,
117 'standby_1_through_20',SEP,
121 def printable_steps (list):
122 single_line=" ".join(list)+" "
123 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
125 def valid_step (step):
126 return step != SEP and step != SEPSFA
128 # turn off the sfa-related steps when build has skipped SFA
129 # this was originally for centos5 but is still valid
130 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
132 def check_whether_build_has_sfa (rpms_url):
133 utils.header ("Checking if build provides SFA package...")
134 # warning, we're now building 'sface' so let's be a bit more picky
135 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
136 # full builds are expected to return with 0 here
138 utils.header("build does provide SFA")
140 # move all steps containing 'sfa' from default_steps to other_steps
141 utils.header("SFA package not found - removing steps with sfa or sfi")
142 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
143 TestPlc.other_steps += sfa_steps
144 for step in sfa_steps: TestPlc.default_steps.remove(step)
146 def __init__ (self,plc_spec,options):
147 self.plc_spec=plc_spec
149 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
150 self.vserverip=plc_spec['vserverip']
151 self.vservername=plc_spec['vservername']
152 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
153 self.apiserver=TestApiserver(self.url,options.dry_run)
155 def has_addresses_api (self):
156 return self.apiserver.has_method('AddIpAddress')
159 name=self.plc_spec['name']
160 return "%s.%s"%(name,self.vservername)
163 return self.plc_spec['host_box']
166 return self.test_ssh.is_local()
168 # define the API methods on this object through xmlrpc
169 # would help, but not strictly necessary
173 def actual_command_in_guest (self,command):
174 return self.test_ssh.actual_command(self.host_to_guest(command))
176 def start_guest (self):
177 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
179 def stop_guest (self):
180 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
182 def run_in_guest (self,command):
183 return utils.system(self.actual_command_in_guest(command))
185 def run_in_host (self,command):
186 return self.test_ssh.run_in_buildname(command)
188 #command gets run in the plc's vm
189 def host_to_guest(self,command):
190 if self.options.plcs_use_lxc:
191 return "ssh -o StrictHostKeyChecking=no %s %s"%(self.vserverip,command)
193 return "vserver %s exec %s"%(self.vservername,command)
195 def vm_root_in_host(self):
196 if self.options.plcs_use_lxc:
197 return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
199 return "/vservers/%s"%(self.vservername)
201 def vm_timestamp_path (self):
202 if self.options.plcs_use_lxc:
203 return "/var/lib/lxc/%s/%s.timestamp"%(self.vservername,self.vservername)
205 return "/vservers/%s.timestamp"%(self.vservername)
207 #start/stop the vserver
208 def start_guest_in_host(self):
209 if self.options.plcs_use_lxc:
210 return "lxc-start --daemon --name=%s"%(self.vservername)
212 return "vserver %s start"%(self.vservername)
214 def stop_guest_in_host(self):
215 if self.options.plcs_use_lxc:
216 return "lxc-stop --name=%s"%(self.vservername)
218 return "vserver %s stop"%(self.vservername)
221 def run_in_guest_piped (self,local,remote):
222 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
224 def yum_check_installed (self, rpms):
225 if isinstance (rpms, list):
227 return self.run_in_guest("rpm -q %s"%rpms)==0
229 # does a yum install in the vs, ignore yum retcod, check with rpm
230 def yum_install (self, rpms):
231 if isinstance (rpms, list):
233 self.run_in_guest("yum -y install %s"%rpms)
234 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
235 self.run_in_guest("yum-complete-transaction -y")
236 return self.yum_check_installed (rpms)
238 def auth_root (self):
239 return {'Username':self.plc_spec['PLC_ROOT_USER'],
240 'AuthMethod':'password',
241 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
242 'Role' : self.plc_spec['role']
244 def locate_site (self,sitename):
245 for site in self.plc_spec['sites']:
246 if site['site_fields']['name'] == sitename:
248 if site['site_fields']['login_base'] == sitename:
250 raise Exception,"Cannot locate site %s"%sitename
252 def locate_node (self,nodename):
253 for site in self.plc_spec['sites']:
254 for node in site['nodes']:
255 if node['name'] == nodename:
257 raise Exception,"Cannot locate node %s"%nodename
259 def locate_hostname (self,hostname):
260 for site in self.plc_spec['sites']:
261 for node in site['nodes']:
262 if node['node_fields']['hostname'] == hostname:
264 raise Exception,"Cannot locate hostname %s"%hostname
266 def locate_key (self,key_name):
267 for key in self.plc_spec['keys']:
268 if key['key_name'] == key_name:
270 raise Exception,"Cannot locate key %s"%key_name
272 def locate_slice (self, slicename):
273 for slice in self.plc_spec['slices']:
274 if slice['slice_fields']['name'] == slicename:
276 raise Exception,"Cannot locate slice %s"%slicename
278 def all_sliver_objs (self):
280 for slice_spec in self.plc_spec['slices']:
281 slicename = slice_spec['slice_fields']['name']
282 for nodename in slice_spec['nodenames']:
283 result.append(self.locate_sliver_obj (nodename,slicename))
286 def locate_sliver_obj (self,nodename,slicename):
287 (site,node) = self.locate_node(nodename)
288 slice = self.locate_slice (slicename)
290 test_site = TestSite (self, site)
291 test_node = TestNode (self, test_site,node)
292 # xxx the slice site is assumed to be the node site - mhh - probably harmless
293 test_slice = TestSlice (self, test_site, slice)
294 return TestSliver (self, test_node, test_slice)
296 def locate_first_node(self):
297 nodename=self.plc_spec['slices'][0]['nodenames'][0]
298 (site,node) = self.locate_node(nodename)
299 test_site = TestSite (self, site)
300 test_node = TestNode (self, test_site,node)
303 def locate_first_sliver (self):
304 slice_spec=self.plc_spec['slices'][0]
305 slicename=slice_spec['slice_fields']['name']
306 nodename=slice_spec['nodenames'][0]
307 return self.locate_sliver_obj(nodename,slicename)
309 # all different hostboxes used in this plc
310 def gather_hostBoxes(self):
311 # maps on sites and nodes, return [ (host_box,test_node) ]
313 for site_spec in self.plc_spec['sites']:
314 test_site = TestSite (self,site_spec)
315 for node_spec in site_spec['nodes']:
316 test_node = TestNode (self, test_site, node_spec)
317 if not test_node.is_real():
318 tuples.append( (test_node.host_box(),test_node) )
319 # transform into a dict { 'host_box' -> [ test_node .. ] }
321 for (box,node) in tuples:
322 if not result.has_key(box):
325 result[box].append(node)
328 # a step for checking this stuff
329 def show_boxes (self):
330 'print summary of nodes location'
331 for (box,nodes) in self.gather_hostBoxes().iteritems():
332 print box,":"," + ".join( [ node.name() for node in nodes ] )
335 # make this a valid step
336 def qemu_kill_all(self):
337 'kill all qemu instances on the qemu boxes involved by this setup'
338 # this is the brute force version, kill all qemus on that host box
339 for (box,nodes) in self.gather_hostBoxes().iteritems():
340 # pass the first nodename, as we don't push template-qemu on testboxes
341 nodedir=nodes[0].nodedir()
342 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
345 # make this a valid step
346 def qemu_list_all(self):
347 'list all qemu instances on the qemu boxes involved by this setup'
348 for (box,nodes) in self.gather_hostBoxes().iteritems():
349 # this is the brute force version, kill all qemus on that host box
350 TestBoxQemu(box,self.options.buildname).qemu_list_all()
353 # kill only the right qemus
354 def qemu_list_mine(self):
355 'list qemu instances for our nodes'
356 for (box,nodes) in self.gather_hostBoxes().iteritems():
357 # the fine-grain version
362 # kill only the right qemus
363 def qemu_kill_mine(self):
364 'kill the qemu instances for our nodes'
365 for (box,nodes) in self.gather_hostBoxes().iteritems():
366 # the fine-grain version
371 #################### display config
373 "show test configuration after localization"
379 "print cut'n paste-able stuff to export env variables to your shell"
380 # guess local domain from hostname
381 domain=socket.gethostname().split('.',1)[1]
382 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
383 print "export BUILD=%s"%self.options.buildname
384 if self.options.plcs_use_lxc:
385 print "export PLCHOSTLXC=%s"%fqdn
387 print "export PLCHOSTVS=%s"%fqdn
388 print "export GUESTNAME=%s"%self.plc_spec['vservername']
389 vplcname=self.plc_spec['vservername'].split('-')[-1]
390 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
391 # find hostname of first node
392 (hostname,qemubox) = self.all_node_infos()[0]
393 print "export KVMHOST=%s.%s"%(qemubox,domain)
394 print "export NODE=%s"%(hostname)
398 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
399 def show_pass (self,passno):
400 for (key,val) in self.plc_spec.iteritems():
401 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
405 self.display_site_spec(site)
406 for node in site['nodes']:
407 self.display_node_spec(node)
408 elif key=='initscripts':
409 for initscript in val:
410 self.display_initscript_spec (initscript)
413 self.display_slice_spec (slice)
416 self.display_key_spec (key)
418 if key not in ['sites','initscripts','slices','keys', 'sfa']:
419 print '+ ',key,':',val
421 def display_site_spec (self,site):
422 print '+ ======== site',site['site_fields']['name']
423 for (k,v) in site.iteritems():
424 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
427 print '+ ','nodes : ',
429 print node['node_fields']['hostname'],'',
435 print user['name'],'',
437 elif k == 'site_fields':
438 print '+ login_base',':',v['login_base']
439 elif k == 'address_fields':
445 def display_initscript_spec (self,initscript):
446 print '+ ======== initscript',initscript['initscript_fields']['name']
448 def display_key_spec (self,key):
449 print '+ ======== key',key['key_name']
451 def display_slice_spec (self,slice):
452 print '+ ======== slice',slice['slice_fields']['name']
453 for (k,v) in slice.iteritems():
466 elif k=='slice_fields':
467 print '+ fields',':',
468 print 'max_nodes=',v['max_nodes'],
473 def display_node_spec (self,node):
474 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
475 print "hostname=",node['node_fields']['hostname'],
476 print "ip=",node['interface_fields']['ip']
477 if self.options.verbose:
478 utils.pprint("node details",node,depth=3)
480 # another entry point for just showing the boxes involved
481 def display_mapping (self):
482 TestPlc.display_mapping_plc(self.plc_spec)
486 def display_mapping_plc (plc_spec):
487 print '+ MyPLC',plc_spec['name']
488 # WARNING this would not be right for lxc-based PLC's - should be harmless though
489 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
490 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
491 for site_spec in plc_spec['sites']:
492 for node_spec in site_spec['nodes']:
493 TestPlc.display_mapping_node(node_spec)
496 def display_mapping_node (node_spec):
497 print '+ NODE %s'%(node_spec['name'])
498 print '+\tqemu box %s'%node_spec['host_box']
499 print '+\thostname=%s'%node_spec['node_fields']['hostname']
501 # write a timestamp in /vservers/<>.timestamp
502 # cannot be inside the vserver, that causes vserver .. build to cough
503 def timestamp_vs (self):
504 "Create a timestamp to remember creation date for this plc"
506 # TODO-lxc check this one
507 # a first approx. is to store the timestamp close to the VM root like vs does
508 stamp_path=self.vm_timestamp_path ()
509 stamp_dir = os.path.dirname (stamp_path)
510 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
511 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
513 # this is called inconditionnally at the beginning of the test sequence
514 # just in case this is a rerun, so if the vm is not running it's fine
516 "vserver delete the test myplc"
517 stamp_path=self.vm_timestamp_path()
518 self.run_in_host("rm -f %s"%stamp_path)
519 if self.options.plcs_use_lxc:
520 self.run_in_host("lxc-stop --name %s"%self.vservername)
521 self.run_in_host("lxc-destroy --name %s"%self.vservername)
524 self.run_in_host("vserver --silent %s delete"%self.vservername)
528 # historically the build was being fetched by the tests
529 # now the build pushes itself as a subdir of the tests workdir
530 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
531 def vs_create (self):
532 "vserver creation (no install done)"
533 # push the local build/ dir to the testplc box
535 # a full path for the local calls
536 build_dir=os.path.dirname(sys.argv[0])
537 # sometimes this is empty - set to "." in such a case
538 if not build_dir: build_dir="."
539 build_dir += "/build"
541 # use a standard name - will be relative to remote buildname
543 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
544 self.test_ssh.rmdir(build_dir)
545 self.test_ssh.copy(build_dir,recursive=True)
546 # the repo url is taken from arch-rpms-url
547 # with the last step (i386) removed
548 repo_url = self.options.arch_rpms_url
549 for level in [ 'arch' ]:
550 repo_url = os.path.dirname(repo_url)
551 # pass the vbuild-nightly options to vtest-init-vserver
553 test_env_options += " -p %s"%self.options.personality
554 test_env_options += " -d %s"%self.options.pldistro
555 test_env_options += " -f %s"%self.options.fcdistro
556 if self.options.plcs_use_lxc:
557 script="vtest-init-lxc.sh"
559 script="vtest-init-vserver.sh"
560 vserver_name = self.vservername
561 vserver_options="--netdev eth0 --interface %s"%self.vserverip
563 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
564 vserver_options += " --hostname %s"%vserver_hostname
566 print "Cannot reverse lookup %s"%self.vserverip
567 print "This is considered fatal, as this might pollute the test results"
569 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
570 return self.run_in_host(create_vserver) == 0
573 def plc_install(self):
574 "yum install myplc, noderepo, and the plain bootstrapfs"
576 # workaround for getting pgsql8.2 on centos5
577 if self.options.fcdistro == "centos5":
578 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
581 if self.options.personality == "linux32":
583 elif self.options.personality == "linux64":
586 raise Exception, "Unsupported personality %r"%self.options.personality
587 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
590 pkgs_list.append ("slicerepo-%s"%nodefamily)
591 pkgs_list.append ("myplc")
592 pkgs_list.append ("noderepo-%s"%nodefamily)
593 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
594 pkgs_string=" ".join(pkgs_list)
595 return self.yum_install (pkgs_list)
598 def plc_configure(self):
600 tmpname='%s.plc-config-tty'%(self.name())
601 fileconf=open(tmpname,'w')
602 for var in [ 'PLC_NAME',
607 'PLC_MAIL_SUPPORT_ADDRESS',
610 # Above line was added for integrating SFA Testing
616 'PLC_RESERVATION_GRANULARITY',
618 'PLC_OMF_XMPP_SERVER',
620 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
621 fileconf.write('w\n')
622 fileconf.write('q\n')
624 utils.system('cat %s'%tmpname)
625 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
626 utils.system('rm %s'%tmpname)
631 self.run_in_guest('service plc start')
636 self.run_in_guest('service plc stop')
640 "start the PLC vserver"
645 "stop the PLC vserver"
649 # stores the keys from the config for further use
650 def keys_store(self):
651 "stores test users ssh keys in keys/"
652 for key_spec in self.plc_spec['keys']:
653 TestKey(self,key_spec).store_key()
656 def keys_clean(self):
657 "removes keys cached in keys/"
658 utils.system("rm -rf ./keys")
661 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
662 # for later direct access to the nodes
663 def keys_fetch(self):
664 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
666 if not os.path.isdir(dir):
668 vservername=self.vservername
669 vm_root=self.vm_root_in_host()
671 prefix = 'debug_ssh_key'
672 for ext in [ 'pub', 'rsa' ] :
673 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
674 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
675 if self.test_ssh.fetch(src,dst) != 0: overall=False
679 "create sites with PLCAPI"
680 return self.do_sites()
682 def delete_sites (self):
683 "delete sites with PLCAPI"
684 return self.do_sites(action="delete")
686 def do_sites (self,action="add"):
687 for site_spec in self.plc_spec['sites']:
688 test_site = TestSite (self,site_spec)
689 if (action != "add"):
690 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
691 test_site.delete_site()
692 # deleted with the site
693 #test_site.delete_users()
696 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
697 test_site.create_site()
698 test_site.create_users()
701 def delete_all_sites (self):
702 "Delete all sites in PLC, and related objects"
703 print 'auth_root',self.auth_root()
704 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])
706 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
707 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
708 site_id=site['site_id']
709 print 'Deleting site_id',site_id
710 self.apiserver.DeleteSite(self.auth_root(),site_id)
714 "create nodes with PLCAPI"
715 return self.do_nodes()
716 def delete_nodes (self):
717 "delete nodes with PLCAPI"
718 return self.do_nodes(action="delete")
720 def do_nodes (self,action="add"):
721 for site_spec in self.plc_spec['sites']:
722 test_site = TestSite (self,site_spec)
724 utils.header("Deleting nodes in site %s"%test_site.name())
725 for node_spec in site_spec['nodes']:
726 test_node=TestNode(self,test_site,node_spec)
727 utils.header("Deleting %s"%test_node.name())
728 test_node.delete_node()
730 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
731 for node_spec in site_spec['nodes']:
732 utils.pprint('Creating node %s'%node_spec,node_spec)
733 test_node = TestNode (self,test_site,node_spec)
734 test_node.create_node ()
737 def nodegroups (self):
738 "create nodegroups with PLCAPI"
739 return self.do_nodegroups("add")
740 def delete_nodegroups (self):
741 "delete nodegroups with PLCAPI"
742 return self.do_nodegroups("delete")
746 def translate_timestamp (start,grain,timestamp):
747 if timestamp < TestPlc.YEAR: return start+timestamp*grain
748 else: return timestamp
751 def timestamp_printable (timestamp):
752 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
755 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
757 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
758 print 'API answered grain=',grain
759 start=(now/grain)*grain
761 # find out all nodes that are reservable
762 nodes=self.all_reservable_nodenames()
764 utils.header ("No reservable node found - proceeding without leases")
767 # attach them to the leases as specified in plc_specs
768 # this is where the 'leases' field gets interpreted as relative of absolute
769 for lease_spec in self.plc_spec['leases']:
770 # skip the ones that come with a null slice id
771 if not lease_spec['slice']: continue
772 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
773 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
774 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
775 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
776 if lease_addition['errors']:
777 utils.header("Cannot create leases, %s"%lease_addition['errors'])
780 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
781 (nodes,lease_spec['slice'],
782 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
783 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
787 def delete_leases (self):
788 "remove all leases in the myplc side"
789 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
790 utils.header("Cleaning leases %r"%lease_ids)
791 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
794 def list_leases (self):
795 "list all leases known to the myplc"
796 leases = self.apiserver.GetLeases(self.auth_root())
799 current=l['t_until']>=now
800 if self.options.verbose or current:
801 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
802 TestPlc.timestamp_printable(l['t_from']),
803 TestPlc.timestamp_printable(l['t_until'])))
806 # create nodegroups if needed, and populate
807 def do_nodegroups (self, action="add"):
808 # 1st pass to scan contents
810 for site_spec in self.plc_spec['sites']:
811 test_site = TestSite (self,site_spec)
812 for node_spec in site_spec['nodes']:
813 test_node=TestNode (self,test_site,node_spec)
814 if node_spec.has_key('nodegroups'):
815 nodegroupnames=node_spec['nodegroups']
816 if isinstance(nodegroupnames,StringTypes):
817 nodegroupnames = [ nodegroupnames ]
818 for nodegroupname in nodegroupnames:
819 if not groups_dict.has_key(nodegroupname):
820 groups_dict[nodegroupname]=[]
821 groups_dict[nodegroupname].append(test_node.name())
822 auth=self.auth_root()
824 for (nodegroupname,group_nodes) in groups_dict.iteritems():
826 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
827 # first, check if the nodetagtype is here
828 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
830 tag_type_id = tag_types[0]['tag_type_id']
832 tag_type_id = self.apiserver.AddTagType(auth,
833 {'tagname':nodegroupname,
834 'description': 'for nodegroup %s'%nodegroupname,
836 print 'located tag (type)',nodegroupname,'as',tag_type_id
838 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
840 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
841 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
842 # set node tag on all nodes, value='yes'
843 for nodename in group_nodes:
845 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
847 traceback.print_exc()
848 print 'node',nodename,'seems to already have tag',nodegroupname
851 expect_yes = self.apiserver.GetNodeTags(auth,
852 {'hostname':nodename,
853 'tagname':nodegroupname},
854 ['value'])[0]['value']
855 if expect_yes != "yes":
856 print 'Mismatch node tag on node',nodename,'got',expect_yes
859 if not self.options.dry_run:
860 print 'Cannot find tag',nodegroupname,'on node',nodename
864 print 'cleaning nodegroup',nodegroupname
865 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
867 traceback.print_exc()
871 # a list of TestNode objs
872 def all_nodes (self):
874 for site_spec in self.plc_spec['sites']:
875 test_site = TestSite (self,site_spec)
876 for node_spec in site_spec['nodes']:
877 nodes.append(TestNode (self,test_site,node_spec))
880 # return a list of tuples (nodename,qemuname)
881 def all_node_infos (self) :
883 for site_spec in self.plc_spec['sites']:
884 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
885 for node_spec in site_spec['nodes'] ]
888 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
889 def all_reservable_nodenames (self):
891 for site_spec in self.plc_spec['sites']:
892 for node_spec in site_spec['nodes']:
893 node_fields=node_spec['node_fields']
894 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
895 res.append(node_fields['hostname'])
898 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
899 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
900 if self.options.dry_run:
904 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
905 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
906 # the nodes that haven't checked yet - start with a full list and shrink over time
907 tocheck = self.all_hostnames()
908 utils.header("checking nodes %r"%tocheck)
909 # create a dict hostname -> status
910 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
913 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
915 for array in tocheck_status:
916 hostname=array['hostname']
917 boot_state=array['boot_state']
918 if boot_state == target_boot_state:
919 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
921 # if it's a real node, never mind
922 (site_spec,node_spec)=self.locate_hostname(hostname)
923 if TestNode.is_real_model(node_spec['node_fields']['model']):
924 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
926 boot_state = target_boot_state
927 elif datetime.datetime.now() > graceout:
928 utils.header ("%s still in '%s' state"%(hostname,boot_state))
929 graceout=datetime.datetime.now()+datetime.timedelta(1)
930 status[hostname] = boot_state
932 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
935 if datetime.datetime.now() > timeout:
936 for hostname in tocheck:
937 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
939 # otherwise, sleep for a while
941 # only useful in empty plcs
944 def nodes_booted(self):
945 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
947 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
949 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
950 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
951 vservername=self.vservername
954 local_key = "keys/%(vservername)s-debug.rsa"%locals()
957 local_key = "keys/key_admin.rsa"
958 node_infos = self.all_node_infos()
959 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
960 for (nodename,qemuname) in node_infos:
961 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
962 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
963 (timeout_minutes,silent_minutes,period))
965 for node_info in node_infos:
966 (hostname,qemuname) = node_info
967 # try to run 'hostname' in the node
968 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
969 # don't spam logs - show the command only after the grace period
970 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
972 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
974 node_infos.remove(node_info)
976 # we will have tried real nodes once, in case they're up - but if not, just skip
977 (site_spec,node_spec)=self.locate_hostname(hostname)
978 if TestNode.is_real_model(node_spec['node_fields']['model']):
979 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
980 node_infos.remove(node_info)
983 if datetime.datetime.now() > timeout:
984 for (hostname,qemuname) in node_infos:
985 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
987 # otherwise, sleep for a while
989 # only useful in empty plcs
992 def ssh_node_debug(self):
993 "Tries to ssh into nodes in debug mode with the debug ssh key"
994 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=8)
996 def ssh_node_boot(self):
997 "Tries to ssh into nodes in production mode with the root ssh key"
998 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38)
1001 def qemu_local_init (self): pass
1003 def bootcd (self): pass
1005 def qemu_local_config (self): pass
1007 def nodestate_reinstall (self): pass
1009 def nodestate_safeboot (self): pass
1011 def nodestate_boot (self): pass
1013 def nodestate_show (self): pass
1015 def qemu_export (self): pass
1017 ### check hooks : invoke scripts from hooks/{node,slice}
1018 def check_hooks_node (self):
1019 return self.locate_first_node().check_hooks()
1020 def check_hooks_sliver (self) :
1021 return self.locate_first_sliver().check_hooks()
1023 def check_hooks (self):
1024 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1025 return self.check_hooks_node() and self.check_hooks_sliver()
1028 def do_check_initscripts(self):
1030 for slice_spec in self.plc_spec['slices']:
1031 if not slice_spec.has_key('initscriptstamp'):
1033 stamp=slice_spec['initscriptstamp']
1034 for nodename in slice_spec['nodenames']:
1035 (site,node) = self.locate_node (nodename)
1036 # xxx - passing the wrong site - probably harmless
1037 test_site = TestSite (self,site)
1038 test_slice = TestSlice (self,test_site,slice_spec)
1039 test_node = TestNode (self,test_site,node)
1040 test_sliver = TestSliver (self, test_node, test_slice)
1041 if not test_sliver.check_initscript_stamp(stamp):
1045 def check_initscripts(self):
1046 "check that the initscripts have triggered"
1047 return self.do_check_initscripts()
1049 def initscripts (self):
1050 "create initscripts with PLCAPI"
1051 for initscript in self.plc_spec['initscripts']:
1052 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1053 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1056 def delete_initscripts (self):
1057 "delete initscripts with PLCAPI"
1058 for initscript in self.plc_spec['initscripts']:
1059 initscript_name = initscript['initscript_fields']['name']
1060 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1062 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1063 print initscript_name,'deleted'
1065 print 'deletion went wrong - probably did not exist'
1070 "create slices with PLCAPI"
1071 return self.do_slices(action="add")
1073 def delete_slices (self):
1074 "delete slices with PLCAPI"
1075 return self.do_slices(action="delete")
1077 def fill_slices (self):
1078 "add nodes in slices with PLCAPI"
1079 return self.do_slices(action="fill")
1081 def empty_slices (self):
1082 "remove nodes from slices with PLCAPI"
1083 return self.do_slices(action="empty")
1085 def do_slices (self, action="add"):
1086 for slice in self.plc_spec['slices']:
1087 site_spec = self.locate_site (slice['sitename'])
1088 test_site = TestSite(self,site_spec)
1089 test_slice=TestSlice(self,test_site,slice)
1090 if action == "delete":
1091 test_slice.delete_slice()
1092 elif action=="fill":
1093 test_slice.add_nodes()
1094 elif action=="empty":
1095 test_slice.delete_nodes()
1097 test_slice.create_slice()
1101 def ssh_slice(self): pass
1103 def ssh_slice_off (self): pass
1106 def keys_clear_known_hosts (self): pass
1108 def speed_up_slices (self):
1109 "tweak nodemanager settings on all nodes using a conf file"
1110 # create the template on the server-side
1111 template="%s.nodemanager"%self.name()
1112 template_file = open (template,"w")
1113 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1114 template_file.close()
1115 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1116 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1117 self.test_ssh.copy_abs(template,remote)
1119 self.apiserver.AddConfFile (self.auth_root(),
1120 {'dest':'/etc/sysconfig/nodemanager',
1121 'source':'PlanetLabConf/nodemanager',
1122 'postinstall_cmd':'service nm restart',})
1126 def qemu_start (self) : pass
1129 def timestamp_qemu (self) : pass
1131 def check_tcp (self):
1132 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1133 specs = self.plc_spec['tcp_test']
1138 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1139 if not s_test_sliver.run_tcp_server(port,timeout=10):
1143 # idem for the client side
1144 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1145 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1149 # painfully enough, we need to allow for some time as netflow might show up last
1150 def check_sys_slice (self):
1151 "all nodes: check that a system slice is alive"
1152 # would probably make more sense to check for netflow,
1153 # but that one is currently not working in the lxc distro
1154 # return self.check_systemslice ('netflow')
1155 return self.check_systemslice ('drl')
1157 # we have the slices up already here, so it should not take too long
1158 def check_systemslice (self, slicename, timeout_minutes=5, period=15):
1159 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
1160 test_nodes=self.all_nodes()
1162 for test_node in test_nodes:
1163 if test_node.check_systemslice (slicename,dry_run=self.options.dry_run):
1165 test_nodes.remove(test_node)
1170 if datetime.datetime.now () > timeout:
1171 for test_node in test_nodes:
1172 utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
1177 def plcsh_stress_test (self):
1178 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1179 # install the stress-test in the plc image
1180 location = "/usr/share/plc_api/plcsh_stress_test.py"
1181 remote="%s/%s"%(self.vm_root_in_host(),location)
1182 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1184 command += " -- --check"
1185 if self.options.size == 1:
1186 command += " --tiny"
1187 return ( self.run_in_guest(command) == 0)
1189 # populate runs the same utility without slightly different options
1190 # in particular runs with --preserve (dont cleanup) and without --check
1191 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1193 def sfa_install_all (self):
1194 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1195 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1197 def sfa_install_core(self):
1199 return self.yum_install ("sfa")
1201 def sfa_install_plc(self):
1202 "yum install sfa-plc"
1203 return self.yum_install("sfa-plc")
1205 def sfa_install_sfatables(self):
1206 "yum install sfa-sfatables"
1207 return self.yum_install ("sfa-sfatables")
1209 # for some very odd reason, this sometimes fails with the following symptom
1210 # # yum install sfa-client
1211 # Setting up Install Process
1213 # Downloading Packages:
1214 # Running rpm_check_debug
1215 # Running Transaction Test
1216 # Transaction Test Succeeded
1217 # Running Transaction
1218 # Transaction couldn't start:
1219 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1220 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1221 # even though in the same context I have
1222 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1223 # Filesystem Size Used Avail Use% Mounted on
1224 # /dev/hdv1 806G 264G 501G 35% /
1225 # none 16M 36K 16M 1% /tmp
1227 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1228 def sfa_install_client(self):
1229 "yum install sfa-client"
1230 first_try=self.yum_install("sfa-client")
1231 if first_try: return True
1232 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1233 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1234 utils.header("rpm_path=<<%s>>"%rpm_path)
1236 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1237 return self.yum_check_installed ("sfa-client")
1239 def sfa_dbclean(self):
1240 "thoroughly wipes off the SFA database"
1241 return self.run_in_guest("sfaadmin.py registry nuke")==0 or \
1242 self.run_in_guest("sfa-nuke.py")==0 or \
1243 self.run_in_guest("sfa-nuke-plc.py")==0
1245 def sfa_plcclean(self):
1246 "cleans the PLC entries that were created as a side effect of running the script"
1248 sfa_spec=self.plc_spec['sfa']
1250 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1251 login_base=auth_sfa_spec['login_base']
1252 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1253 except: print "Site %s already absent from PLC db"%login_base
1255 for spec_name in ['pi_spec','user_spec']:
1256 user_spec=auth_sfa_spec[spec_name]
1257 username=user_spec['email']
1258 try: self.apiserver.DeletePerson(self.auth_root(),username)
1260 # this in fact is expected as sites delete their members
1261 #print "User %s already absent from PLC db"%username
1264 print "REMEMBER TO RUN sfa_import AGAIN"
1267 def sfa_uninstall(self):
1268 "uses rpm to uninstall sfa - ignore result"
1269 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1270 self.run_in_guest("rm -rf /var/lib/sfa")
1271 self.run_in_guest("rm -rf /etc/sfa")
1272 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1274 self.run_in_guest("rpm -e --noscripts sfa-plc")
1277 ### run unit tests for SFA
1278 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1279 # Running Transaction
1280 # Transaction couldn't start:
1281 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1282 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1283 # no matter how many Gbs are available on the testplc
1284 # could not figure out what's wrong, so...
1285 # if the yum install phase fails, consider the test is successful
1286 # other combinations will eventually run it hopefully
1287 def sfa_utest(self):
1288 "yum install sfa-tests and run SFA unittests"
1289 self.run_in_guest("yum -y install sfa-tests")
1290 # failed to install - forget it
1291 if self.run_in_guest("rpm -q sfa-tests")!=0:
1292 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1294 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1298 dirname="conf.%s"%self.plc_spec['name']
1299 if not os.path.isdir(dirname):
1300 utils.system("mkdir -p %s"%dirname)
1301 if not os.path.isdir(dirname):
1302 raise "Cannot create config dir for plc %s"%self.name()
1305 def conffile(self,filename):
1306 return "%s/%s"%(self.confdir(),filename)
1307 def confsubdir(self,dirname,clean,dry_run=False):
1308 subdirname="%s/%s"%(self.confdir(),dirname)
1310 utils.system("rm -rf %s"%subdirname)
1311 if not os.path.isdir(subdirname):
1312 utils.system("mkdir -p %s"%subdirname)
1313 if not dry_run and not os.path.isdir(subdirname):
1314 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1317 def conffile_clean (self,filename):
1318 filename=self.conffile(filename)
1319 return utils.system("rm -rf %s"%filename)==0
1322 def sfa_configure(self):
1323 "run sfa-config-tty"
1324 tmpname=self.conffile("sfa-config-tty")
1325 fileconf=open(tmpname,'w')
1326 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1327 'SFA_INTERFACE_HRN',
1328 'SFA_REGISTRY_LEVEL1_AUTH',
1329 'SFA_REGISTRY_HOST',
1330 'SFA_AGGREGATE_HOST',
1340 'SFA_GENERIC_FLAVOUR',
1341 'SFA_AGGREGATE_ENABLED',
1343 if self.plc_spec['sfa'].has_key(var):
1344 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1345 # the way plc_config handles booleans just sucks..
1348 if self.plc_spec['sfa'][var]: val='true'
1349 fileconf.write ('e %s\n%s\n'%(var,val))
1350 fileconf.write('w\n')
1351 fileconf.write('R\n')
1352 fileconf.write('q\n')
1354 utils.system('cat %s'%tmpname)
1355 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1358 def aggregate_xml_line(self):
1359 port=self.plc_spec['sfa']['neighbours-port']
1360 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1361 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1363 def registry_xml_line(self):
1364 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1365 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1368 # a cross step that takes all other plcs in argument
1369 def cross_sfa_configure(self, other_plcs):
1370 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1371 # of course with a single plc, other_plcs is an empty list
1374 agg_fname=self.conffile("agg.xml")
1375 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1376 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1377 utils.header ("(Over)wrote %s"%agg_fname)
1378 reg_fname=self.conffile("reg.xml")
1379 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1380 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1381 utils.header ("(Over)wrote %s"%reg_fname)
1382 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1383 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1385 def sfa_import(self):
1387 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1389 self.run_in_guest('sfaadmin.py reg import_registry')==0
1390 # not needed anymore
1391 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1393 def sfa_start(self):
1395 return self.run_in_guest('service sfa start')==0
1397 def sfi_configure(self):
1398 "Create /root/sfi on the plc side for sfi client configuration"
1399 if self.options.dry_run:
1400 utils.header("DRY RUN - skipping step")
1402 sfa_spec=self.plc_spec['sfa']
1403 # cannot use auth_sfa_mapper to pass dir_name
1404 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1405 test_slice=TestAuthSfa(self,slice_spec)
1406 dir_basename=os.path.basename(test_slice.sfi_path())
1407 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1408 test_slice.sfi_configure(dir_name)
1409 # push into the remote /root/sfi area
1410 location = test_slice.sfi_path()
1411 remote="%s/%s"%(self.vm_root_in_host(),location)
1412 self.test_ssh.mkdir(remote,abs=True)
1413 # need to strip last level or remote otherwise we get an extra dir level
1414 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1418 def sfi_clean (self):
1419 "clean up /root/sfi on the plc side"
1420 self.run_in_guest("rm -rf /root/sfi")
1424 def sfa_add_site (self): pass
1426 def sfa_add_pi (self): pass
1428 def sfa_add_user(self): pass
1430 def sfa_update_user(self): pass
1432 def sfa_add_slice(self): pass
1434 def sfa_discover(self): pass
1436 def sfa_create_slice(self): pass
1438 def sfa_check_slice_plc(self): pass
1440 def sfa_update_slice(self): pass
1442 def sfi_list(self): pass
1444 def sfi_show(self): pass
1446 def sfi_slices(self): pass
1448 def ssh_slice_sfa(self): pass
1450 def sfa_delete_user(self): pass
1452 def sfa_delete_slice(self): pass
1456 self.run_in_guest('service sfa stop')==0
1459 def populate (self):
1460 "creates random entries in the PLCAPI"
1461 # install the stress-test in the plc image
1462 location = "/usr/share/plc_api/plcsh_stress_test.py"
1463 remote="%s/%s"%(self.vm_root_in_host(),location)
1464 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1466 command += " -- --preserve --short-names"
1467 local = (self.run_in_guest(command) == 0);
1468 # second run with --foreign
1469 command += ' --foreign'
1470 remote = (self.run_in_guest(command) == 0);
1471 return ( local and remote)
1473 def gather_logs (self):
1474 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1475 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1476 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1477 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1478 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1479 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1480 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1482 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1483 self.gather_var_logs ()
1485 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1486 self.gather_pgsql_logs ()
1488 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1489 self.gather_root_sfi ()
1491 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1492 for site_spec in self.plc_spec['sites']:
1493 test_site = TestSite (self,site_spec)
1494 for node_spec in site_spec['nodes']:
1495 test_node=TestNode(self,test_site,node_spec)
1496 test_node.gather_qemu_logs()
1498 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1499 self.gather_nodes_var_logs()
1501 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1502 self.gather_slivers_var_logs()
1505 def gather_slivers_var_logs(self):
1506 for test_sliver in self.all_sliver_objs():
1507 remote = test_sliver.tar_var_logs()
1508 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1509 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1510 utils.system(command)
1513 def gather_var_logs (self):
1514 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1515 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1516 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1517 utils.system(command)
1518 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1519 utils.system(command)
1521 def gather_pgsql_logs (self):
1522 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1523 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1524 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1525 utils.system(command)
1527 def gather_root_sfi (self):
1528 utils.system("mkdir -p logs/sfi.%s"%self.name())
1529 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1530 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1531 utils.system(command)
1533 def gather_nodes_var_logs (self):
1534 for site_spec in self.plc_spec['sites']:
1535 test_site = TestSite (self,site_spec)
1536 for node_spec in site_spec['nodes']:
1537 test_node=TestNode(self,test_site,node_spec)
1538 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1539 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1540 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1541 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1542 utils.system(command)
1545 # returns the filename to use for sql dump/restore, using options.dbname if set
1546 def dbfile (self, database):
1547 # uses options.dbname if it is found
1549 name=self.options.dbname
1550 if not isinstance(name,StringTypes):
1553 t=datetime.datetime.now()
1556 return "/root/%s-%s.sql"%(database,name)
1558 def plc_db_dump(self):
1559 'dump the planetlab5 DB in /root in the PLC - filename has time'
1560 dump=self.dbfile("planetab5")
1561 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1562 utils.header('Dumped planetlab5 database in %s'%dump)
1565 def plc_db_restore(self):
1566 'restore the planetlab5 DB - looks broken, but run -n might help'
1567 dump=self.dbfile("planetab5")
1568 ##stop httpd service
1569 self.run_in_guest('service httpd stop')
1570 # xxx - need another wrapper
1571 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1572 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1573 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1574 ##starting httpd service
1575 self.run_in_guest('service httpd start')
1577 utils.header('Database restored from ' + dump)
1579 def standby_1_through_20(self):
1580 """convenience function to wait for a specified number of minutes"""
1583 def standby_1(): pass
1585 def standby_2(): pass
1587 def standby_3(): pass
1589 def standby_4(): pass
1591 def standby_5(): pass
1593 def standby_6(): pass
1595 def standby_7(): pass
1597 def standby_8(): pass
1599 def standby_9(): pass
1601 def standby_10(): pass
1603 def standby_11(): pass
1605 def standby_12(): pass
1607 def standby_13(): pass
1609 def standby_14(): pass
1611 def standby_15(): pass
1613 def standby_16(): pass
1615 def standby_17(): pass
1617 def standby_18(): pass
1619 def standby_19(): pass
1621 def standby_20(): pass