1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for test_node in self.all_nodes():
45 if not node_method(test_node, *args, **kwds): overall=False
47 # restore the doc text
48 actual.__doc__=method.__doc__
51 def slice_mapper (method):
54 slice_method = TestSlice.__dict__[method.__name__]
55 for slice_spec in self.plc_spec['slices']:
56 site_spec = self.locate_site (slice_spec['sitename'])
57 test_site = TestSite(self,site_spec)
58 test_slice=TestSlice(self,test_site,slice_spec)
59 if not slice_method(test_slice,self.options): overall=False
61 # restore the doc text
62 actual.__doc__=method.__doc__
65 def slice_sfa_mapper (method):
68 slice_method = TestSliceSfa.__dict__[method.__name__]
69 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
70 site_spec = self.locate_site (slice_spec['sitename'])
71 test_site = TestSite(self,site_spec)
72 test_slice=TestSliceSfa(self,test_site,slice_spec)
73 if not slice_method(test_slice,self.options): overall=False
75 # restore the doc text
76 actual.__doc__=method.__doc__
86 'vs_delete','timestamp_vs','vs_create', SEP,
87 'plc_install', 'plc_configure', 'plc_start', SEP,
88 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
89 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
90 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
91 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
92 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
93 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
94 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
95 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
96 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
97 # but as the stress test might take a while, we sometimes missed the debug mode..
98 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
99 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
100 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
101 'check_tcp', 'check_netflow', SEP,
102 'force_gather_logs', SEP,
105 'export', 'show_boxes', SEP,
106 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
107 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
108 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
109 'delete_leases', 'list_leases', SEP,
111 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
112 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
113 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
114 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
115 'plc_db_dump' , 'plc_db_restore', SEP,
116 'standby_1_through_20',SEP,
120 def printable_steps (list):
121 single_line=" ".join(list)+" "
122 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
124 def valid_step (step):
125 return step != SEP and step != SEPSFA
127 # turn off the sfa-related steps when build has skipped SFA
128 # this is originally for centos5 as recent SFAs won't build on this platform
130 def check_whether_build_has_sfa (rpms_url):
131 # warning, we're now building 'sface' so let's be a bit more picky
132 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
133 # full builds are expected to return with 0 here
135 # move all steps containing 'sfa' from default_steps to other_steps
136 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
137 TestPlc.other_steps += sfa_steps
138 for step in sfa_steps: TestPlc.default_steps.remove(step)
140 def __init__ (self,plc_spec,options):
141 self.plc_spec=plc_spec
143 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
144 self.vserverip=plc_spec['vserverip']
145 self.vservername=plc_spec['vservername']
146 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
147 self.apiserver=TestApiserver(self.url,options.dry_run)
150 name=self.plc_spec['name']
151 return "%s.%s"%(name,self.vservername)
154 return self.plc_spec['host_box']
157 return self.test_ssh.is_local()
159 # define the API methods on this object through xmlrpc
160 # would help, but not strictly necessary
164 def actual_command_in_guest (self,command):
165 return self.test_ssh.actual_command(self.host_to_guest(command))
167 def start_guest (self):
168 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
170 def stop_guest (self):
171 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
173 def run_in_guest (self,command):
174 return utils.system(self.actual_command_in_guest(command))
176 def run_in_host (self,command):
177 return self.test_ssh.run_in_buildname(command)
179 #command gets run in the plc's vm
180 def host_to_guest(self,command):
181 if self.options.plcs_use_lxc:
182 return "ssh -o StrictHostKeyChecking=no %s %s"%(self.hostname(),command)
184 return "vserver %s exec %s"%(self.vservername,command)
186 def vm_root_in_guest(self):
187 if self.options.plcs_use_lxc:
188 return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
190 return "/vservers/%s"%(self.vservername)
192 #start/stop the vserver
193 def start_guest_in_host(self):
194 if self.options.plcs_use_lxc:
195 return "lxc-start --name=%s"%(self.vservername)
197 return "vserver %s start"%(self.vservername)
199 def stop_guest_in_host(self):
200 if self.options.plcs_use_lxc:
201 return "lxc-stop --name=%s"%(self.vservername)
203 return "vserver %s stop"%(self.vservername)
206 def run_in_guest_piped (self,local,remote):
207 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
209 # does a yum install in the vs, ignore yum retcod, check with rpm
210 def yum_install (self, rpms):
211 if isinstance (rpms, list):
213 self.run_in_guest("yum -y install %s"%rpms)
214 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
215 self.run_in_guest("yum-complete-transaction -y")
216 return self.run_in_guest("rpm -q %s"%rpms)==0
218 def auth_root (self):
219 return {'Username':self.plc_spec['PLC_ROOT_USER'],
220 'AuthMethod':'password',
221 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
222 'Role' : self.plc_spec['role']
224 def locate_site (self,sitename):
225 for site in self.plc_spec['sites']:
226 if site['site_fields']['name'] == sitename:
228 if site['site_fields']['login_base'] == sitename:
230 raise Exception,"Cannot locate site %s"%sitename
232 def locate_node (self,nodename):
233 for site in self.plc_spec['sites']:
234 for node in site['nodes']:
235 if node['name'] == nodename:
237 raise Exception,"Cannot locate node %s"%nodename
239 def locate_hostname (self,hostname):
240 for site in self.plc_spec['sites']:
241 for node in site['nodes']:
242 if node['node_fields']['hostname'] == hostname:
244 raise Exception,"Cannot locate hostname %s"%hostname
246 def locate_key (self,keyname):
247 for key in self.plc_spec['keys']:
248 if key['name'] == keyname:
250 raise Exception,"Cannot locate key %s"%keyname
252 def locate_slice (self, slicename):
253 for slice in self.plc_spec['slices']:
254 if slice['slice_fields']['name'] == slicename:
256 raise Exception,"Cannot locate slice %s"%slicename
258 def all_sliver_objs (self):
260 for slice_spec in self.plc_spec['slices']:
261 slicename = slice_spec['slice_fields']['name']
262 for nodename in slice_spec['nodenames']:
263 result.append(self.locate_sliver_obj (nodename,slicename))
266 def locate_sliver_obj (self,nodename,slicename):
267 (site,node) = self.locate_node(nodename)
268 slice = self.locate_slice (slicename)
270 test_site = TestSite (self, site)
271 test_node = TestNode (self, test_site,node)
272 # xxx the slice site is assumed to be the node site - mhh - probably harmless
273 test_slice = TestSlice (self, test_site, slice)
274 return TestSliver (self, test_node, test_slice)
276 def locate_first_node(self):
277 nodename=self.plc_spec['slices'][0]['nodenames'][0]
278 (site,node) = self.locate_node(nodename)
279 test_site = TestSite (self, site)
280 test_node = TestNode (self, test_site,node)
283 def locate_first_sliver (self):
284 slice_spec=self.plc_spec['slices'][0]
285 slicename=slice_spec['slice_fields']['name']
286 nodename=slice_spec['nodenames'][0]
287 return self.locate_sliver_obj(nodename,slicename)
289 # all different hostboxes used in this plc
290 def gather_hostBoxes(self):
291 # maps on sites and nodes, return [ (host_box,test_node) ]
293 for site_spec in self.plc_spec['sites']:
294 test_site = TestSite (self,site_spec)
295 for node_spec in site_spec['nodes']:
296 test_node = TestNode (self, test_site, node_spec)
297 if not test_node.is_real():
298 tuples.append( (test_node.host_box(),test_node) )
299 # transform into a dict { 'host_box' -> [ test_node .. ] }
301 for (box,node) in tuples:
302 if not result.has_key(box):
305 result[box].append(node)
308 # a step for checking this stuff
309 def show_boxes (self):
310 'print summary of nodes location'
311 for (box,nodes) in self.gather_hostBoxes().iteritems():
312 print box,":"," + ".join( [ node.name() for node in nodes ] )
315 # make this a valid step
316 def qemu_kill_all(self):
317 'kill all qemu instances on the qemu boxes involved by this setup'
318 # this is the brute force version, kill all qemus on that host box
319 for (box,nodes) in self.gather_hostBoxes().iteritems():
320 # pass the first nodename, as we don't push template-qemu on testboxes
321 nodedir=nodes[0].nodedir()
322 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
325 # make this a valid step
326 def qemu_list_all(self):
327 'list all qemu instances on the qemu boxes involved by this setup'
328 for (box,nodes) in self.gather_hostBoxes().iteritems():
329 # this is the brute force version, kill all qemus on that host box
330 TestBoxQemu(box,self.options.buildname).qemu_list_all()
333 # kill only the right qemus
334 def qemu_list_mine(self):
335 'list qemu instances for our nodes'
336 for (box,nodes) in self.gather_hostBoxes().iteritems():
337 # the fine-grain version
342 # kill only the right qemus
343 def qemu_kill_mine(self):
344 'kill the qemu instances for our nodes'
345 for (box,nodes) in self.gather_hostBoxes().iteritems():
346 # the fine-grain version
351 #################### display config
353 "show test configuration after localization"
354 self.display_pass (1)
355 self.display_pass (2)
359 "print cut'n paste-able stuff to export env variables to your shell"
360 # guess local domain from hostname
361 domain=socket.gethostname().split('.',1)[1]
362 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
363 print "export BUILD=%s"%self.options.buildname
364 print "export PLCHOST=%s"%fqdn
365 print "export GUEST=%s"%self.plc_spec['vservername']
366 # find hostname of first node
367 (hostname,qemubox) = self.all_node_infos()[0]
368 print "export KVMHOST=%s.%s"%(qemubox,domain)
369 print "export NODE=%s"%(hostname)
373 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
374 def display_pass (self,passno):
375 for (key,val) in self.plc_spec.iteritems():
376 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
380 self.display_site_spec(site)
381 for node in site['nodes']:
382 self.display_node_spec(node)
383 elif key=='initscripts':
384 for initscript in val:
385 self.display_initscript_spec (initscript)
388 self.display_slice_spec (slice)
391 self.display_key_spec (key)
393 if key not in ['sites','initscripts','slices','keys', 'sfa']:
394 print '+ ',key,':',val
396 def display_site_spec (self,site):
397 print '+ ======== site',site['site_fields']['name']
398 for (k,v) in site.iteritems():
399 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
402 print '+ ','nodes : ',
404 print node['node_fields']['hostname'],'',
410 print user['name'],'',
412 elif k == 'site_fields':
413 print '+ login_base',':',v['login_base']
414 elif k == 'address_fields':
420 def display_initscript_spec (self,initscript):
421 print '+ ======== initscript',initscript['initscript_fields']['name']
423 def display_key_spec (self,key):
424 print '+ ======== key',key['name']
426 def display_slice_spec (self,slice):
427 print '+ ======== slice',slice['slice_fields']['name']
428 for (k,v) in slice.iteritems():
441 elif k=='slice_fields':
442 print '+ fields',':',
443 print 'max_nodes=',v['max_nodes'],
448 def display_node_spec (self,node):
449 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
450 print "hostname=",node['node_fields']['hostname'],
451 print "ip=",node['interface_fields']['ip']
452 if self.options.verbose:
453 utils.pprint("node details",node,depth=3)
455 # another entry point for just showing the boxes involved
456 def display_mapping (self):
457 TestPlc.display_mapping_plc(self.plc_spec)
461 def display_mapping_plc (plc_spec):
462 print '+ MyPLC',plc_spec['name']
463 # WARNING this would not be right for lxc-based PLC's - should be harmless though
464 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
465 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
466 for site_spec in plc_spec['sites']:
467 for node_spec in site_spec['nodes']:
468 TestPlc.display_mapping_node(node_spec)
471 def display_mapping_node (node_spec):
472 print '+ NODE %s'%(node_spec['name'])
473 print '+\tqemu box %s'%node_spec['host_box']
474 print '+\thostname=%s'%node_spec['node_fields']['hostname']
476 # write a timestamp in /vservers/<>.timestamp
477 # cannot be inside the vserver, that causes vserver .. build to cough
478 def timestamp_vs (self):
480 # TODO-lxc check this one
481 # a first approx. is to store the timestamp close to the VM root like vs does
482 stamp_path="%s.timestamp"%self.vm_root_in_guest()
483 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
485 # this is called inconditionnally at the beginning of the test sequence
486 # just in case this is a rerun, so if the vm is not running it's fine
488 "vserver delete the test myplc"
489 stamp_path="%s.timestamp"%self.vm_root_in_guest()
490 self.run_in_host("rm -f %s"%stamp_path)
491 if self.options.plcs_use_lxc:
492 self.run_in_host("lxc-destroy --name %s"%self.vservername)
495 self.run_in_host("vserver --silent %s delete"%self.vservername)
499 # historically the build was being fetched by the tests
500 # now the build pushes itself as a subdir of the tests workdir
501 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
502 def vs_create (self):
503 "vserver creation (no install done)"
504 # push the local build/ dir to the testplc box
506 # a full path for the local calls
507 build_dir=os.path.dirname(sys.argv[0])
508 # sometimes this is empty - set to "." in such a case
509 if not build_dir: build_dir="."
510 build_dir += "/build"
512 # use a standard name - will be relative to remote buildname
514 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
515 self.test_ssh.rmdir(build_dir)
516 self.test_ssh.copy(build_dir,recursive=True)
517 # the repo url is taken from arch-rpms-url
518 # with the last step (i386) removed
519 repo_url = self.options.arch_rpms_url
520 for level in [ 'arch' ]:
521 repo_url = os.path.dirname(repo_url)
522 # pass the vbuild-nightly options to vtest-init-vserver
524 test_env_options += " -p %s"%self.options.personality
525 test_env_options += " -d %s"%self.options.pldistro
526 test_env_options += " -f %s"%self.options.fcdistro
527 if self.options.plcs_use_lxc:
528 script="vtest-init-lxc.sh"
530 script="vtest-init-vserver.sh"
531 vserver_name = self.vservername
532 vserver_options="--netdev eth0 --interface %s"%self.vserverip
534 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
535 vserver_options += " --hostname %s"%vserver_hostname
537 print "Cannot reverse lookup %s"%self.vserverip
538 print "This is considered fatal, as this might pollute the test results"
540 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
541 return self.run_in_host(create_vserver) == 0
544 def plc_install(self):
545 "yum install myplc, noderepo, and the plain bootstrapfs"
547 # workaround for getting pgsql8.2 on centos5
548 if self.options.fcdistro == "centos5":
549 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
552 if self.options.personality == "linux32":
554 elif self.options.personality == "linux64":
557 raise Exception, "Unsupported personality %r"%self.options.personality
558 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
561 pkgs_list.append ("slicerepo-%s"%nodefamily)
562 pkgs_list.append ("myplc")
563 pkgs_list.append ("noderepo-%s"%nodefamily)
564 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
565 pkgs_string=" ".join(pkgs_list)
566 return self.yum_install (pkgs_list)
569 def plc_configure(self):
571 tmpname='%s.plc-config-tty'%(self.name())
572 fileconf=open(tmpname,'w')
573 for var in [ 'PLC_NAME',
578 'PLC_MAIL_SUPPORT_ADDRESS',
581 # Above line was added for integrating SFA Testing
587 'PLC_RESERVATION_GRANULARITY',
589 'PLC_OMF_XMPP_SERVER',
591 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
592 fileconf.write('w\n')
593 fileconf.write('q\n')
595 utils.system('cat %s'%tmpname)
596 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
597 utils.system('rm %s'%tmpname)
602 self.run_in_guest('service plc start')
607 self.run_in_guest('service plc stop')
611 "start the PLC vserver"
616 "stop the PLC vserver"
620 # stores the keys from the config for further use
621 def keys_store(self):
622 "stores test users ssh keys in keys/"
623 for key_spec in self.plc_spec['keys']:
624 TestKey(self,key_spec).store_key()
627 def keys_clean(self):
628 "removes keys cached in keys/"
629 utils.system("rm -rf ./keys")
632 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
633 # for later direct access to the nodes
634 def keys_fetch(self):
635 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
637 if not os.path.isdir(dir):
639 vservername=self.vservername
640 vm_root=self.vm_root_in_guest()
642 prefix = 'debug_ssh_key'
643 for ext in [ 'pub', 'rsa' ] :
644 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
645 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
646 if self.test_ssh.fetch(src,dst) != 0: overall=False
650 "create sites with PLCAPI"
651 return self.do_sites()
653 def delete_sites (self):
654 "delete sites with PLCAPI"
655 return self.do_sites(action="delete")
657 def do_sites (self,action="add"):
658 for site_spec in self.plc_spec['sites']:
659 test_site = TestSite (self,site_spec)
660 if (action != "add"):
661 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
662 test_site.delete_site()
663 # deleted with the site
664 #test_site.delete_users()
667 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
668 test_site.create_site()
669 test_site.create_users()
672 def delete_all_sites (self):
673 "Delete all sites in PLC, and related objects"
674 print 'auth_root',self.auth_root()
675 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
676 for site_id in site_ids:
677 print 'Deleting site_id',site_id
678 self.apiserver.DeleteSite(self.auth_root(),site_id)
682 "create nodes with PLCAPI"
683 return self.do_nodes()
684 def delete_nodes (self):
685 "delete nodes with PLCAPI"
686 return self.do_nodes(action="delete")
688 def do_nodes (self,action="add"):
689 for site_spec in self.plc_spec['sites']:
690 test_site = TestSite (self,site_spec)
692 utils.header("Deleting nodes in site %s"%test_site.name())
693 for node_spec in site_spec['nodes']:
694 test_node=TestNode(self,test_site,node_spec)
695 utils.header("Deleting %s"%test_node.name())
696 test_node.delete_node()
698 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
699 for node_spec in site_spec['nodes']:
700 utils.pprint('Creating node %s'%node_spec,node_spec)
701 test_node = TestNode (self,test_site,node_spec)
702 test_node.create_node ()
705 def nodegroups (self):
706 "create nodegroups with PLCAPI"
707 return self.do_nodegroups("add")
708 def delete_nodegroups (self):
709 "delete nodegroups with PLCAPI"
710 return self.do_nodegroups("delete")
714 def translate_timestamp (start,grain,timestamp):
715 if timestamp < TestPlc.YEAR: return start+timestamp*grain
716 else: return timestamp
719 def timestamp_printable (timestamp):
720 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
723 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
725 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
726 print 'API answered grain=',grain
727 start=(now/grain)*grain
729 # find out all nodes that are reservable
730 nodes=self.all_reservable_nodenames()
732 utils.header ("No reservable node found - proceeding without leases")
735 # attach them to the leases as specified in plc_specs
736 # this is where the 'leases' field gets interpreted as relative of absolute
737 for lease_spec in self.plc_spec['leases']:
738 # skip the ones that come with a null slice id
739 if not lease_spec['slice']: continue
740 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
741 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
742 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
743 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
744 if lease_addition['errors']:
745 utils.header("Cannot create leases, %s"%lease_addition['errors'])
748 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
749 (nodes,lease_spec['slice'],
750 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
751 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
755 def delete_leases (self):
756 "remove all leases in the myplc side"
757 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
758 utils.header("Cleaning leases %r"%lease_ids)
759 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
762 def list_leases (self):
763 "list all leases known to the myplc"
764 leases = self.apiserver.GetLeases(self.auth_root())
767 current=l['t_until']>=now
768 if self.options.verbose or current:
769 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
770 TestPlc.timestamp_printable(l['t_from']),
771 TestPlc.timestamp_printable(l['t_until'])))
774 # create nodegroups if needed, and populate
775 def do_nodegroups (self, action="add"):
776 # 1st pass to scan contents
778 for site_spec in self.plc_spec['sites']:
779 test_site = TestSite (self,site_spec)
780 for node_spec in site_spec['nodes']:
781 test_node=TestNode (self,test_site,node_spec)
782 if node_spec.has_key('nodegroups'):
783 nodegroupnames=node_spec['nodegroups']
784 if isinstance(nodegroupnames,StringTypes):
785 nodegroupnames = [ nodegroupnames ]
786 for nodegroupname in nodegroupnames:
787 if not groups_dict.has_key(nodegroupname):
788 groups_dict[nodegroupname]=[]
789 groups_dict[nodegroupname].append(test_node.name())
790 auth=self.auth_root()
792 for (nodegroupname,group_nodes) in groups_dict.iteritems():
794 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
795 # first, check if the nodetagtype is here
796 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
798 tag_type_id = tag_types[0]['tag_type_id']
800 tag_type_id = self.apiserver.AddTagType(auth,
801 {'tagname':nodegroupname,
802 'description': 'for nodegroup %s'%nodegroupname,
804 print 'located tag (type)',nodegroupname,'as',tag_type_id
806 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
808 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
809 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
810 # set node tag on all nodes, value='yes'
811 for nodename in group_nodes:
813 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
815 traceback.print_exc()
816 print 'node',nodename,'seems to already have tag',nodegroupname
819 expect_yes = self.apiserver.GetNodeTags(auth,
820 {'hostname':nodename,
821 'tagname':nodegroupname},
822 ['value'])[0]['value']
823 if expect_yes != "yes":
824 print 'Mismatch node tag on node',nodename,'got',expect_yes
827 if not self.options.dry_run:
828 print 'Cannot find tag',nodegroupname,'on node',nodename
832 print 'cleaning nodegroup',nodegroupname
833 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
835 traceback.print_exc()
839 # a list of TestNode objs
840 def all_nodes (self):
842 for site_spec in self.plc_spec['sites']:
843 test_site = TestSite (self,site_spec)
844 for node_spec in site_spec['nodes']:
845 nodes.append(TestNode (self,test_site,node_spec))
848 # return a list of tuples (nodename,qemuname)
849 def all_node_infos (self) :
851 for site_spec in self.plc_spec['sites']:
852 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
853 for node_spec in site_spec['nodes'] ]
856 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
857 def all_reservable_nodenames (self):
859 for site_spec in self.plc_spec['sites']:
860 for node_spec in site_spec['nodes']:
861 node_fields=node_spec['node_fields']
862 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
863 res.append(node_fields['hostname'])
866 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
867 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
868 if self.options.dry_run:
872 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
873 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
874 # the nodes that haven't checked yet - start with a full list and shrink over time
875 tocheck = self.all_hostnames()
876 utils.header("checking nodes %r"%tocheck)
877 # create a dict hostname -> status
878 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
881 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
883 for array in tocheck_status:
884 hostname=array['hostname']
885 boot_state=array['boot_state']
886 if boot_state == target_boot_state:
887 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
889 # if it's a real node, never mind
890 (site_spec,node_spec)=self.locate_hostname(hostname)
891 if TestNode.is_real_model(node_spec['node_fields']['model']):
892 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
894 boot_state = target_boot_state
895 elif datetime.datetime.now() > graceout:
896 utils.header ("%s still in '%s' state"%(hostname,boot_state))
897 graceout=datetime.datetime.now()+datetime.timedelta(1)
898 status[hostname] = boot_state
900 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
903 if datetime.datetime.now() > timeout:
904 for hostname in tocheck:
905 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
907 # otherwise, sleep for a while
909 # only useful in empty plcs
912 def nodes_booted(self):
913 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
915 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
917 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
918 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
919 vservername=self.vservername
922 local_key = "keys/%(vservername)s-debug.rsa"%locals()
925 local_key = "keys/key1.rsa"
926 node_infos = self.all_node_infos()
927 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
928 for (nodename,qemuname) in node_infos:
929 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
930 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
931 (timeout_minutes,silent_minutes,period))
933 for node_info in node_infos:
934 (hostname,qemuname) = node_info
935 # try to run 'hostname' in the node
936 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
937 # don't spam logs - show the command only after the grace period
938 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
940 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
942 node_infos.remove(node_info)
944 # we will have tried real nodes once, in case they're up - but if not, just skip
945 (site_spec,node_spec)=self.locate_hostname(hostname)
946 if TestNode.is_real_model(node_spec['node_fields']['model']):
947 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
948 node_infos.remove(node_info)
951 if datetime.datetime.now() > timeout:
952 for (hostname,qemuname) in node_infos:
953 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
955 # otherwise, sleep for a while
957 # only useful in empty plcs
960 def ssh_node_debug(self):
961 "Tries to ssh into nodes in debug mode with the debug ssh key"
962 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=8)
964 def ssh_node_boot(self):
965 "Tries to ssh into nodes in production mode with the root ssh key"
966 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38)
969 def qemu_local_init (self):
970 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
974 "all nodes: invoke GetBootMedium and store result locally"
977 def qemu_local_config (self):
978 "all nodes: compute qemu config qemu.conf and store it locally"
981 def nodestate_reinstall (self):
982 "all nodes: mark PLCAPI boot_state as reinstall"
985 def nodestate_safeboot (self):
986 "all nodes: mark PLCAPI boot_state as safeboot"
989 def nodestate_boot (self):
990 "all nodes: mark PLCAPI boot_state as boot"
993 def nodestate_show (self):
994 "all nodes: show PLCAPI boot_state"
997 def qemu_export (self):
998 "all nodes: push local node-dep directory on the qemu box"
1001 ### check hooks : invoke scripts from hooks/{node,slice}
1002 def check_hooks_node (self):
1003 return self.locate_first_node().check_hooks()
1004 def check_hooks_sliver (self) :
1005 return self.locate_first_sliver().check_hooks()
1007 def check_hooks (self):
1008 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1009 return self.check_hooks_node() and self.check_hooks_sliver()
1012 def do_check_initscripts(self):
1014 for slice_spec in self.plc_spec['slices']:
1015 if not slice_spec.has_key('initscriptstamp'):
1017 stamp=slice_spec['initscriptstamp']
1018 for nodename in slice_spec['nodenames']:
1019 (site,node) = self.locate_node (nodename)
1020 # xxx - passing the wrong site - probably harmless
1021 test_site = TestSite (self,site)
1022 test_slice = TestSlice (self,test_site,slice_spec)
1023 test_node = TestNode (self,test_site,node)
1024 test_sliver = TestSliver (self, test_node, test_slice)
1025 if not test_sliver.check_initscript_stamp(stamp):
1029 def check_initscripts(self):
1030 "check that the initscripts have triggered"
1031 return self.do_check_initscripts()
1033 def initscripts (self):
1034 "create initscripts with PLCAPI"
1035 for initscript in self.plc_spec['initscripts']:
1036 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1037 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1040 def delete_initscripts (self):
1041 "delete initscripts with PLCAPI"
1042 for initscript in self.plc_spec['initscripts']:
1043 initscript_name = initscript['initscript_fields']['name']
1044 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1046 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1047 print initscript_name,'deleted'
1049 print 'deletion went wrong - probably did not exist'
1054 "create slices with PLCAPI"
1055 return self.do_slices()
1057 def delete_slices (self):
1058 "delete slices with PLCAPI"
1059 return self.do_slices("delete")
1061 def do_slices (self, action="add"):
1062 for slice in self.plc_spec['slices']:
1063 site_spec = self.locate_site (slice['sitename'])
1064 test_site = TestSite(self,site_spec)
1065 test_slice=TestSlice(self,test_site,slice)
1067 utils.header("Deleting slices in site %s"%test_site.name())
1068 test_slice.delete_slice()
1070 utils.pprint("Creating slice",slice)
1071 test_slice.create_slice()
1072 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1076 def ssh_slice(self):
1077 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1081 def keys_clear_known_hosts (self):
1082 "remove test nodes entries from the local known_hosts file"
1086 def qemu_start (self) :
1087 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1091 def timestamp_qemu (self) :
1092 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1095 def check_tcp (self):
1096 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1097 specs = self.plc_spec['tcp_test']
1102 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1103 if not s_test_sliver.run_tcp_server(port,timeout=10):
1107 # idem for the client side
1108 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1109 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1113 # painfully enough, we need to allow for some time as netflow might show up last
1114 def check_netflow (self):
1115 "all nodes: check that the netflow slice is alive"
1116 return self.check_systemslice ('netflow')
1118 # we have the slices up already here, so it should not take too long
1119 def check_systemslice (self, slicename, timeout_minutes=5, period=15):
1120 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
1121 test_nodes=self.all_nodes()
1123 for test_node in test_nodes:
1124 if test_node.check_systemslice (slicename):
1126 test_nodes.remove(test_node)
1131 if datetime.datetime.now () > timeout:
1132 for test_node in test_nodes:
1133 utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
1138 def plcsh_stress_test (self):
1139 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1140 # install the stress-test in the plc image
1141 location = "/usr/share/plc_api/plcsh_stress_test.py"
1142 remote="%s/%s"%(self.vm_root_in_guest(),location)
1143 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1145 command += " -- --check"
1146 if self.options.size == 1:
1147 command += " --tiny"
1148 return ( self.run_in_guest(command) == 0)
1150 # populate runs the same utility without slightly different options
1151 # in particular runs with --preserve (dont cleanup) and without --check
1152 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1154 def sfa_install_all (self):
1155 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1156 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1158 def sfa_install_core(self):
1160 return self.yum_install ("sfa")
1162 def sfa_install_plc(self):
1163 "yum install sfa-plc"
1164 return self.yum_install("sfa-plc")
1166 def sfa_install_client(self):
1167 "yum install sfa-client"
1168 return self.yum_install("sfa-client")
1170 def sfa_install_sfatables(self):
1171 "yum install sfa-sfatables"
1172 return self.yum_install ("sfa-sfatables")
1174 def sfa_dbclean(self):
1175 "thoroughly wipes off the SFA database"
1176 self.run_in_guest("sfa-nuke.py")==0 or \
1177 self.run_in_guest("sfa-nuke-plc.py") or \
1178 self.run_in_guest("sfaadmin.py registry nuke")
1181 def sfa_plcclean(self):
1182 "cleans the PLC entries that were created as a side effect of running the script"
1184 sfa_spec=self.plc_spec['sfa']
1186 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1187 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1188 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1189 except: print "Slice %s already absent from PLC db"%slicename
1191 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1192 try: self.apiserver.DeletePerson(self.auth_root(),username)
1193 except: print "User %s already absent from PLC db"%username
1195 print "REMEMBER TO RUN sfa_import AGAIN"
1198 def sfa_uninstall(self):
1199 "uses rpm to uninstall sfa - ignore result"
1200 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1201 self.run_in_guest("rm -rf /var/lib/sfa")
1202 self.run_in_guest("rm -rf /etc/sfa")
1203 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1205 self.run_in_guest("rpm -e --noscripts sfa-plc")
1208 ### run unit tests for SFA
1209 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1210 # Running Transaction
1211 # Transaction couldn't start:
1212 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1213 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1214 # no matter how many Gbs are available on the testplc
1215 # could not figure out what's wrong, so...
1216 # if the yum install phase fails, consider the test is successful
1217 # other combinations will eventually run it hopefully
1218 def sfa_utest(self):
1219 "yum install sfa-tests and run SFA unittests"
1220 self.run_in_guest("yum -y install sfa-tests")
1221 # failed to install - forget it
1222 if self.run_in_guest("rpm -q sfa-tests")!=0:
1223 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1225 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1229 dirname="conf.%s"%self.plc_spec['name']
1230 if not os.path.isdir(dirname):
1231 utils.system("mkdir -p %s"%dirname)
1232 if not os.path.isdir(dirname):
1233 raise "Cannot create config dir for plc %s"%self.name()
1236 def conffile(self,filename):
1237 return "%s/%s"%(self.confdir(),filename)
1238 def confsubdir(self,dirname,clean,dry_run=False):
1239 subdirname="%s/%s"%(self.confdir(),dirname)
1241 utils.system("rm -rf %s"%subdirname)
1242 if not os.path.isdir(subdirname):
1243 utils.system("mkdir -p %s"%subdirname)
1244 if not dry_run and not os.path.isdir(subdirname):
1245 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1248 def conffile_clean (self,filename):
1249 filename=self.conffile(filename)
1250 return utils.system("rm -rf %s"%filename)==0
1253 def sfa_configure(self):
1254 "run sfa-config-tty"
1255 tmpname=self.conffile("sfa-config-tty")
1256 fileconf=open(tmpname,'w')
1257 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1258 'SFA_INTERFACE_HRN',
1259 'SFA_REGISTRY_LEVEL1_AUTH',
1260 'SFA_REGISTRY_HOST',
1261 'SFA_AGGREGATE_HOST',
1272 if self.plc_spec['sfa'].has_key(var):
1273 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1274 # the way plc_config handles booleans just sucks..
1277 if self.plc_spec['sfa'][var]: val='true'
1278 fileconf.write ('e %s\n%s\n'%(var,val))
1279 fileconf.write('w\n')
1280 fileconf.write('R\n')
1281 fileconf.write('q\n')
1283 utils.system('cat %s'%tmpname)
1284 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1287 def aggregate_xml_line(self):
1288 port=self.plc_spec['sfa']['neighbours-port']
1289 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1290 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1292 def registry_xml_line(self):
1293 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1294 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1297 # a cross step that takes all other plcs in argument
1298 def cross_sfa_configure(self, other_plcs):
1299 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1300 # of course with a single plc, other_plcs is an empty list
1303 agg_fname=self.conffile("agg.xml")
1304 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1305 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1306 utils.header ("(Over)wrote %s"%agg_fname)
1307 reg_fname=self.conffile("reg.xml")
1308 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1309 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1310 utils.header ("(Over)wrote %s"%reg_fname)
1311 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_guest())==0 \
1312 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_guest())==0
1314 def sfa_import(self):
1316 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1317 return self.run_in_guest('sfa-import.py')==0 or \
1318 self.run_in_guest('sfa-import-plc.py')==0 or \
1319 self.run_in_guest('sfaadmin.py registry import_registry')==0
1320 # not needed anymore
1321 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1323 def sfa_start(self):
1325 return self.run_in_guest('service sfa start')==0
1327 def sfi_configure(self):
1328 "Create /root/sfi on the plc side for sfi client configuration"
1329 if self.options.dry_run:
1330 utils.header("DRY RUN - skipping step")
1332 sfa_spec=self.plc_spec['sfa']
1333 # cannot use sfa_slice_mapper to pass dir_name
1334 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1335 site_spec = self.locate_site (slice_spec['sitename'])
1336 test_site = TestSite(self,site_spec)
1337 test_slice=TestSliceSfa(self,test_site,slice_spec)
1338 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1339 test_slice.sfi_config(dir_name)
1340 # push into the remote /root/sfi area
1341 location = test_slice.sfi_path()
1342 remote="%s/%s"%(self.vm_root_in_guest(),location)
1343 self.test_ssh.mkdir(remote,abs=True)
1344 # need to strip last level or remote otherwise we get an extra dir level
1345 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1349 def sfi_clean (self):
1350 "clean up /root/sfi on the plc side"
1351 self.run_in_guest("rm -rf /root/sfi")
1355 def sfa_add_user(self):
1360 def sfa_update_user(self):
1364 def sfa_add_slice(self):
1365 "run sfi.py add (on Registry) from slice.xml"
1369 def sfa_discover(self):
1370 "discover resources into resouces_in.rspec"
1374 def sfa_create_slice(self):
1375 "run sfi.py create (on SM) - 1st time"
1379 def sfa_check_slice_plc(self):
1380 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1384 def sfa_update_slice(self):
1385 "run sfi.py create (on SM) on existing object"
1390 "various registry-related calls"
1394 def ssh_slice_sfa(self):
1395 "tries to ssh-enter the SFA slice"
1399 def sfa_delete_user(self):
1404 def sfa_delete_slice(self):
1405 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1410 self.run_in_guest('service sfa stop')==0
1413 def populate (self):
1414 "creates random entries in the PLCAPI"
1415 # install the stress-test in the plc image
1416 location = "/usr/share/plc_api/plcsh_stress_test.py"
1417 remote="%s/%s"%(self.vm_root_in_guest(),location)
1418 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1420 command += " -- --preserve --short-names"
1421 local = (self.run_in_guest(command) == 0);
1422 # second run with --foreign
1423 command += ' --foreign'
1424 remote = (self.run_in_guest(command) == 0);
1425 return ( local and remote)
1427 def gather_logs (self):
1428 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1429 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1430 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1431 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1432 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1433 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1435 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1436 self.gather_var_logs ()
1438 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1439 self.gather_pgsql_logs ()
1441 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1442 for site_spec in self.plc_spec['sites']:
1443 test_site = TestSite (self,site_spec)
1444 for node_spec in site_spec['nodes']:
1445 test_node=TestNode(self,test_site,node_spec)
1446 test_node.gather_qemu_logs()
1448 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1449 self.gather_nodes_var_logs()
1451 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1452 self.gather_slivers_var_logs()
1455 def gather_slivers_var_logs(self):
1456 for test_sliver in self.all_sliver_objs():
1457 remote = test_sliver.tar_var_logs()
1458 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1459 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1460 utils.system(command)
1463 def gather_var_logs (self):
1464 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1465 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1466 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1467 utils.system(command)
1468 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1469 utils.system(command)
1471 def gather_pgsql_logs (self):
1472 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1473 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1474 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1475 utils.system(command)
1477 def gather_nodes_var_logs (self):
1478 for site_spec in self.plc_spec['sites']:
1479 test_site = TestSite (self,site_spec)
1480 for node_spec in site_spec['nodes']:
1481 test_node=TestNode(self,test_site,node_spec)
1482 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1483 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1484 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1485 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1486 utils.system(command)
1489 # returns the filename to use for sql dump/restore, using options.dbname if set
1490 def dbfile (self, database):
1491 # uses options.dbname if it is found
1493 name=self.options.dbname
1494 if not isinstance(name,StringTypes):
1497 t=datetime.datetime.now()
1500 return "/root/%s-%s.sql"%(database,name)
1502 def plc_db_dump(self):
1503 'dump the planetlab5 DB in /root in the PLC - filename has time'
1504 dump=self.dbfile("planetab5")
1505 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1506 utils.header('Dumped planetlab5 database in %s'%dump)
1509 def plc_db_restore(self):
1510 'restore the planetlab5 DB - looks broken, but run -n might help'
1511 dump=self.dbfile("planetab5")
1512 ##stop httpd service
1513 self.run_in_guest('service httpd stop')
1514 # xxx - need another wrapper
1515 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1516 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1517 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1518 ##starting httpd service
1519 self.run_in_guest('service httpd start')
1521 utils.header('Database restored from ' + dump)
1523 def standby_1_through_20(self):
1524 """convenience function to wait for a specified number of minutes"""
1527 def standby_1(): pass
1529 def standby_2(): pass
1531 def standby_3(): pass
1533 def standby_4(): pass
1535 def standby_5(): pass
1537 def standby_6(): pass
1539 def standby_7(): pass
1541 def standby_8(): pass
1543 def standby_9(): pass
1545 def standby_10(): pass
1547 def standby_11(): pass
1549 def standby_12(): pass
1551 def standby_13(): pass
1553 def standby_14(): pass
1555 def standby_15(): pass
1557 def standby_16(): pass
1559 def standby_17(): pass
1561 def standby_18(): pass
1563 def standby_19(): pass
1565 def standby_20(): pass