1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for test_node in self.all_nodes():
45 if not node_method(test_node, *args, **kwds): overall=False
47 # restore the doc text
48 actual.__doc__=method.__doc__
51 def slice_mapper (method):
54 slice_method = TestSlice.__dict__[method.__name__]
55 for slice_spec in self.plc_spec['slices']:
56 site_spec = self.locate_site (slice_spec['sitename'])
57 test_site = TestSite(self,site_spec)
58 test_slice=TestSlice(self,test_site,slice_spec)
59 if not slice_method(test_slice,self.options): overall=False
61 # restore the doc text
62 actual.__doc__=method.__doc__
65 def slice_sfa_mapper (method):
68 slice_method = TestSliceSfa.__dict__[method.__name__]
69 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
70 site_spec = self.locate_site (slice_spec['sitename'])
71 test_site = TestSite(self,site_spec)
72 test_slice=TestSliceSfa(self,test_site,slice_spec)
73 if not slice_method(test_slice,self.options): overall=False
75 # restore the doc text
76 actual.__doc__=method.__doc__
86 'vs_delete','timestamp_vs','vs_create', SEP,
87 'plc_install', 'plc_configure', 'plc_start', SEP,
88 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
89 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
90 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
91 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
92 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
93 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
94 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
95 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
96 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
97 # but as the stress test might take a while, we sometimes missed the debug mode..
98 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
99 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
100 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
101 'check_tcp', 'check_netflow', SEP,
102 'force_gather_logs', SEP,
105 'export', 'show_boxes', SEP,
106 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
107 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
108 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
109 'delete_leases', 'list_leases', SEP,
111 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
112 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
113 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
114 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
115 'plc_db_dump' , 'plc_db_restore', SEP,
116 'standby_1_through_20',SEP,
120 def printable_steps (list):
121 single_line=" ".join(list)+" "
122 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
124 def valid_step (step):
125 return step != SEP and step != SEPSFA
127 # turn off the sfa-related steps when build has skipped SFA
128 # this is originally for centos5 as recent SFAs won't build on this platform
130 def check_whether_build_has_sfa (rpms_url):
131 # warning, we're now building 'sface' so let's be a bit more picky
132 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
133 # full builds are expected to return with 0 here
135 # move all steps containing 'sfa' from default_steps to other_steps
136 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
137 TestPlc.other_steps += sfa_steps
138 for step in sfa_steps: TestPlc.default_steps.remove(step)
140 def __init__ (self,plc_spec,options):
141 self.plc_spec=plc_spec
143 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
144 self.vserverip=plc_spec['vserverip']
145 self.vservername=plc_spec['vservername']
146 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
147 self.apiserver=TestApiserver(self.url,options.dry_run)
150 name=self.plc_spec['name']
151 return "%s.%s"%(name,self.vservername)
154 return self.plc_spec['host_box']
157 return self.test_ssh.is_local()
159 # define the API methods on this object through xmlrpc
160 # would help, but not strictly necessary
164 def actual_command_in_guest (self,command):
165 return self.test_ssh.actual_command(self.host_to_guest(command))
167 def start_guest (self):
168 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
170 def stop_guest (self):
171 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
173 def run_in_guest (self,command):
174 return utils.system(self.actual_command_in_guest(command))
176 def run_in_host (self,command):
177 return self.test_ssh.run_in_buildname(command)
179 #command gets run in the plc's vm
180 def host_to_guest(self,command):
181 if self.options.plcs_use_lxc:
182 return "ssh -o StrictHostKeyChecking=no %s %s"%(self.hostname(),command)
184 return "vserver %s exec %s"%(self.vservername,command)
186 def vm_root_in_host(self):
187 if self.options.plcs_use_lxc:
188 return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
190 return "/vservers/%s"%(self.vservername)
192 def vm_timestamp_path (self):
193 if self.options.plcs_use_lxc:
194 return "/var/lib/lxc/%s.timestamp"%(self.vservername)
196 return "/vservers/%s.timestamp"%(self.vservername)
198 #start/stop the vserver
199 def start_guest_in_host(self):
200 if self.options.plcs_use_lxc:
201 return "lxc-start --name=%s"%(self.vservername)
203 return "vserver %s start"%(self.vservername)
205 def stop_guest_in_host(self):
206 if self.options.plcs_use_lxc:
207 return "lxc-stop --name=%s"%(self.vservername)
209 return "vserver %s stop"%(self.vservername)
212 def run_in_guest_piped (self,local,remote):
213 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
215 # does a yum install in the vs, ignore yum retcod, check with rpm
216 def yum_install (self, rpms):
217 if isinstance (rpms, list):
219 self.run_in_guest("yum -y install %s"%rpms)
220 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
221 self.run_in_guest("yum-complete-transaction -y")
222 return self.run_in_guest("rpm -q %s"%rpms)==0
224 def auth_root (self):
225 return {'Username':self.plc_spec['PLC_ROOT_USER'],
226 'AuthMethod':'password',
227 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
228 'Role' : self.plc_spec['role']
230 def locate_site (self,sitename):
231 for site in self.plc_spec['sites']:
232 if site['site_fields']['name'] == sitename:
234 if site['site_fields']['login_base'] == sitename:
236 raise Exception,"Cannot locate site %s"%sitename
238 def locate_node (self,nodename):
239 for site in self.plc_spec['sites']:
240 for node in site['nodes']:
241 if node['name'] == nodename:
243 raise Exception,"Cannot locate node %s"%nodename
245 def locate_hostname (self,hostname):
246 for site in self.plc_spec['sites']:
247 for node in site['nodes']:
248 if node['node_fields']['hostname'] == hostname:
250 raise Exception,"Cannot locate hostname %s"%hostname
252 def locate_key (self,keyname):
253 for key in self.plc_spec['keys']:
254 if key['name'] == keyname:
256 raise Exception,"Cannot locate key %s"%keyname
258 def locate_slice (self, slicename):
259 for slice in self.plc_spec['slices']:
260 if slice['slice_fields']['name'] == slicename:
262 raise Exception,"Cannot locate slice %s"%slicename
264 def all_sliver_objs (self):
266 for slice_spec in self.plc_spec['slices']:
267 slicename = slice_spec['slice_fields']['name']
268 for nodename in slice_spec['nodenames']:
269 result.append(self.locate_sliver_obj (nodename,slicename))
272 def locate_sliver_obj (self,nodename,slicename):
273 (site,node) = self.locate_node(nodename)
274 slice = self.locate_slice (slicename)
276 test_site = TestSite (self, site)
277 test_node = TestNode (self, test_site,node)
278 # xxx the slice site is assumed to be the node site - mhh - probably harmless
279 test_slice = TestSlice (self, test_site, slice)
280 return TestSliver (self, test_node, test_slice)
282 def locate_first_node(self):
283 nodename=self.plc_spec['slices'][0]['nodenames'][0]
284 (site,node) = self.locate_node(nodename)
285 test_site = TestSite (self, site)
286 test_node = TestNode (self, test_site,node)
289 def locate_first_sliver (self):
290 slice_spec=self.plc_spec['slices'][0]
291 slicename=slice_spec['slice_fields']['name']
292 nodename=slice_spec['nodenames'][0]
293 return self.locate_sliver_obj(nodename,slicename)
295 # all different hostboxes used in this plc
296 def gather_hostBoxes(self):
297 # maps on sites and nodes, return [ (host_box,test_node) ]
299 for site_spec in self.plc_spec['sites']:
300 test_site = TestSite (self,site_spec)
301 for node_spec in site_spec['nodes']:
302 test_node = TestNode (self, test_site, node_spec)
303 if not test_node.is_real():
304 tuples.append( (test_node.host_box(),test_node) )
305 # transform into a dict { 'host_box' -> [ test_node .. ] }
307 for (box,node) in tuples:
308 if not result.has_key(box):
311 result[box].append(node)
314 # a step for checking this stuff
315 def show_boxes (self):
316 'print summary of nodes location'
317 for (box,nodes) in self.gather_hostBoxes().iteritems():
318 print box,":"," + ".join( [ node.name() for node in nodes ] )
321 # make this a valid step
322 def qemu_kill_all(self):
323 'kill all qemu instances on the qemu boxes involved by this setup'
324 # this is the brute force version, kill all qemus on that host box
325 for (box,nodes) in self.gather_hostBoxes().iteritems():
326 # pass the first nodename, as we don't push template-qemu on testboxes
327 nodedir=nodes[0].nodedir()
328 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
331 # make this a valid step
332 def qemu_list_all(self):
333 'list all qemu instances on the qemu boxes involved by this setup'
334 for (box,nodes) in self.gather_hostBoxes().iteritems():
335 # this is the brute force version, kill all qemus on that host box
336 TestBoxQemu(box,self.options.buildname).qemu_list_all()
339 # kill only the right qemus
340 def qemu_list_mine(self):
341 'list qemu instances for our nodes'
342 for (box,nodes) in self.gather_hostBoxes().iteritems():
343 # the fine-grain version
348 # kill only the right qemus
349 def qemu_kill_mine(self):
350 'kill the qemu instances for our nodes'
351 for (box,nodes) in self.gather_hostBoxes().iteritems():
352 # the fine-grain version
357 #################### display config
359 "show test configuration after localization"
360 self.display_pass (1)
361 self.display_pass (2)
365 "print cut'n paste-able stuff to export env variables to your shell"
366 # guess local domain from hostname
367 domain=socket.gethostname().split('.',1)[1]
368 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
369 print "export BUILD=%s"%self.options.buildname
370 print "export PLCHOST=%s"%fqdn
371 print "export GUEST=%s"%self.plc_spec['vservername']
372 # find hostname of first node
373 (hostname,qemubox) = self.all_node_infos()[0]
374 print "export KVMHOST=%s.%s"%(qemubox,domain)
375 print "export NODE=%s"%(hostname)
379 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
380 def display_pass (self,passno):
381 for (key,val) in self.plc_spec.iteritems():
382 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
386 self.display_site_spec(site)
387 for node in site['nodes']:
388 self.display_node_spec(node)
389 elif key=='initscripts':
390 for initscript in val:
391 self.display_initscript_spec (initscript)
394 self.display_slice_spec (slice)
397 self.display_key_spec (key)
399 if key not in ['sites','initscripts','slices','keys', 'sfa']:
400 print '+ ',key,':',val
402 def display_site_spec (self,site):
403 print '+ ======== site',site['site_fields']['name']
404 for (k,v) in site.iteritems():
405 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
408 print '+ ','nodes : ',
410 print node['node_fields']['hostname'],'',
416 print user['name'],'',
418 elif k == 'site_fields':
419 print '+ login_base',':',v['login_base']
420 elif k == 'address_fields':
426 def display_initscript_spec (self,initscript):
427 print '+ ======== initscript',initscript['initscript_fields']['name']
429 def display_key_spec (self,key):
430 print '+ ======== key',key['name']
432 def display_slice_spec (self,slice):
433 print '+ ======== slice',slice['slice_fields']['name']
434 for (k,v) in slice.iteritems():
447 elif k=='slice_fields':
448 print '+ fields',':',
449 print 'max_nodes=',v['max_nodes'],
454 def display_node_spec (self,node):
455 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
456 print "hostname=",node['node_fields']['hostname'],
457 print "ip=",node['interface_fields']['ip']
458 if self.options.verbose:
459 utils.pprint("node details",node,depth=3)
461 # another entry point for just showing the boxes involved
462 def display_mapping (self):
463 TestPlc.display_mapping_plc(self.plc_spec)
467 def display_mapping_plc (plc_spec):
468 print '+ MyPLC',plc_spec['name']
469 # WARNING this would not be right for lxc-based PLC's - should be harmless though
470 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
471 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
472 for site_spec in plc_spec['sites']:
473 for node_spec in site_spec['nodes']:
474 TestPlc.display_mapping_node(node_spec)
477 def display_mapping_node (node_spec):
478 print '+ NODE %s'%(node_spec['name'])
479 print '+\tqemu box %s'%node_spec['host_box']
480 print '+\thostname=%s'%node_spec['node_fields']['hostname']
482 # write a timestamp in /vservers/<>.timestamp
483 # cannot be inside the vserver, that causes vserver .. build to cough
484 def timestamp_vs (self):
486 # TODO-lxc check this one
487 # a first approx. is to store the timestamp close to the VM root like vs does
488 stamp_path=self.vm_timestamp_path ()
489 stamp_dir = os.path.dirname (stamp_path)
490 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
491 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
493 # this is called inconditionnally at the beginning of the test sequence
494 # just in case this is a rerun, so if the vm is not running it's fine
496 "vserver delete the test myplc"
497 stamp_path=self.vm_timestamp_path()
498 self.run_in_host("rm -f %s"%stamp_path)
499 if self.options.plcs_use_lxc:
500 self.run_in_host("lxc-destroy --name %s"%self.vservername)
503 self.run_in_host("vserver --silent %s delete"%self.vservername)
507 # historically the build was being fetched by the tests
508 # now the build pushes itself as a subdir of the tests workdir
509 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
510 def vs_create (self):
511 "vserver creation (no install done)"
512 # push the local build/ dir to the testplc box
514 # a full path for the local calls
515 build_dir=os.path.dirname(sys.argv[0])
516 # sometimes this is empty - set to "." in such a case
517 if not build_dir: build_dir="."
518 build_dir += "/build"
520 # use a standard name - will be relative to remote buildname
522 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
523 self.test_ssh.rmdir(build_dir)
524 self.test_ssh.copy(build_dir,recursive=True)
525 # the repo url is taken from arch-rpms-url
526 # with the last step (i386) removed
527 repo_url = self.options.arch_rpms_url
528 for level in [ 'arch' ]:
529 repo_url = os.path.dirname(repo_url)
530 # pass the vbuild-nightly options to vtest-init-vserver
532 test_env_options += " -p %s"%self.options.personality
533 test_env_options += " -d %s"%self.options.pldistro
534 test_env_options += " -f %s"%self.options.fcdistro
535 if self.options.plcs_use_lxc:
536 script="vtest-init-lxc.sh"
538 script="vtest-init-vserver.sh"
539 vserver_name = self.vservername
540 vserver_options="--netdev eth0 --interface %s"%self.vserverip
542 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
543 vserver_options += " --hostname %s"%vserver_hostname
545 print "Cannot reverse lookup %s"%self.vserverip
546 print "This is considered fatal, as this might pollute the test results"
548 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
549 return self.run_in_host(create_vserver) == 0
552 def plc_install(self):
553 "yum install myplc, noderepo, and the plain bootstrapfs"
555 # workaround for getting pgsql8.2 on centos5
556 if self.options.fcdistro == "centos5":
557 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
560 if self.options.personality == "linux32":
562 elif self.options.personality == "linux64":
565 raise Exception, "Unsupported personality %r"%self.options.personality
566 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
569 pkgs_list.append ("slicerepo-%s"%nodefamily)
570 pkgs_list.append ("myplc")
571 pkgs_list.append ("noderepo-%s"%nodefamily)
572 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
573 pkgs_string=" ".join(pkgs_list)
574 return self.yum_install (pkgs_list)
577 def plc_configure(self):
579 tmpname='%s.plc-config-tty'%(self.name())
580 fileconf=open(tmpname,'w')
581 for var in [ 'PLC_NAME',
586 'PLC_MAIL_SUPPORT_ADDRESS',
589 # Above line was added for integrating SFA Testing
595 'PLC_RESERVATION_GRANULARITY',
597 'PLC_OMF_XMPP_SERVER',
599 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
600 fileconf.write('w\n')
601 fileconf.write('q\n')
603 utils.system('cat %s'%tmpname)
604 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
605 utils.system('rm %s'%tmpname)
610 self.run_in_guest('service plc start')
615 self.run_in_guest('service plc stop')
619 "start the PLC vserver"
624 "stop the PLC vserver"
628 # stores the keys from the config for further use
629 def keys_store(self):
630 "stores test users ssh keys in keys/"
631 for key_spec in self.plc_spec['keys']:
632 TestKey(self,key_spec).store_key()
635 def keys_clean(self):
636 "removes keys cached in keys/"
637 utils.system("rm -rf ./keys")
640 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
641 # for later direct access to the nodes
642 def keys_fetch(self):
643 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
645 if not os.path.isdir(dir):
647 vservername=self.vservername
648 vm_root=self.vm_root_in_host()
650 prefix = 'debug_ssh_key'
651 for ext in [ 'pub', 'rsa' ] :
652 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
653 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
654 if self.test_ssh.fetch(src,dst) != 0: overall=False
658 "create sites with PLCAPI"
659 return self.do_sites()
661 def delete_sites (self):
662 "delete sites with PLCAPI"
663 return self.do_sites(action="delete")
665 def do_sites (self,action="add"):
666 for site_spec in self.plc_spec['sites']:
667 test_site = TestSite (self,site_spec)
668 if (action != "add"):
669 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
670 test_site.delete_site()
671 # deleted with the site
672 #test_site.delete_users()
675 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
676 test_site.create_site()
677 test_site.create_users()
680 def delete_all_sites (self):
681 "Delete all sites in PLC, and related objects"
682 print 'auth_root',self.auth_root()
683 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
684 for site_id in site_ids:
685 print 'Deleting site_id',site_id
686 self.apiserver.DeleteSite(self.auth_root(),site_id)
690 "create nodes with PLCAPI"
691 return self.do_nodes()
692 def delete_nodes (self):
693 "delete nodes with PLCAPI"
694 return self.do_nodes(action="delete")
696 def do_nodes (self,action="add"):
697 for site_spec in self.plc_spec['sites']:
698 test_site = TestSite (self,site_spec)
700 utils.header("Deleting nodes in site %s"%test_site.name())
701 for node_spec in site_spec['nodes']:
702 test_node=TestNode(self,test_site,node_spec)
703 utils.header("Deleting %s"%test_node.name())
704 test_node.delete_node()
706 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
707 for node_spec in site_spec['nodes']:
708 utils.pprint('Creating node %s'%node_spec,node_spec)
709 test_node = TestNode (self,test_site,node_spec)
710 test_node.create_node ()
713 def nodegroups (self):
714 "create nodegroups with PLCAPI"
715 return self.do_nodegroups("add")
716 def delete_nodegroups (self):
717 "delete nodegroups with PLCAPI"
718 return self.do_nodegroups("delete")
722 def translate_timestamp (start,grain,timestamp):
723 if timestamp < TestPlc.YEAR: return start+timestamp*grain
724 else: return timestamp
727 def timestamp_printable (timestamp):
728 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
731 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
733 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
734 print 'API answered grain=',grain
735 start=(now/grain)*grain
737 # find out all nodes that are reservable
738 nodes=self.all_reservable_nodenames()
740 utils.header ("No reservable node found - proceeding without leases")
743 # attach them to the leases as specified in plc_specs
744 # this is where the 'leases' field gets interpreted as relative of absolute
745 for lease_spec in self.plc_spec['leases']:
746 # skip the ones that come with a null slice id
747 if not lease_spec['slice']: continue
748 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
749 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
750 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
751 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
752 if lease_addition['errors']:
753 utils.header("Cannot create leases, %s"%lease_addition['errors'])
756 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
757 (nodes,lease_spec['slice'],
758 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
759 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
763 def delete_leases (self):
764 "remove all leases in the myplc side"
765 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
766 utils.header("Cleaning leases %r"%lease_ids)
767 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
770 def list_leases (self):
771 "list all leases known to the myplc"
772 leases = self.apiserver.GetLeases(self.auth_root())
775 current=l['t_until']>=now
776 if self.options.verbose or current:
777 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
778 TestPlc.timestamp_printable(l['t_from']),
779 TestPlc.timestamp_printable(l['t_until'])))
782 # create nodegroups if needed, and populate
783 def do_nodegroups (self, action="add"):
784 # 1st pass to scan contents
786 for site_spec in self.plc_spec['sites']:
787 test_site = TestSite (self,site_spec)
788 for node_spec in site_spec['nodes']:
789 test_node=TestNode (self,test_site,node_spec)
790 if node_spec.has_key('nodegroups'):
791 nodegroupnames=node_spec['nodegroups']
792 if isinstance(nodegroupnames,StringTypes):
793 nodegroupnames = [ nodegroupnames ]
794 for nodegroupname in nodegroupnames:
795 if not groups_dict.has_key(nodegroupname):
796 groups_dict[nodegroupname]=[]
797 groups_dict[nodegroupname].append(test_node.name())
798 auth=self.auth_root()
800 for (nodegroupname,group_nodes) in groups_dict.iteritems():
802 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
803 # first, check if the nodetagtype is here
804 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
806 tag_type_id = tag_types[0]['tag_type_id']
808 tag_type_id = self.apiserver.AddTagType(auth,
809 {'tagname':nodegroupname,
810 'description': 'for nodegroup %s'%nodegroupname,
812 print 'located tag (type)',nodegroupname,'as',tag_type_id
814 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
816 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
817 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
818 # set node tag on all nodes, value='yes'
819 for nodename in group_nodes:
821 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
823 traceback.print_exc()
824 print 'node',nodename,'seems to already have tag',nodegroupname
827 expect_yes = self.apiserver.GetNodeTags(auth,
828 {'hostname':nodename,
829 'tagname':nodegroupname},
830 ['value'])[0]['value']
831 if expect_yes != "yes":
832 print 'Mismatch node tag on node',nodename,'got',expect_yes
835 if not self.options.dry_run:
836 print 'Cannot find tag',nodegroupname,'on node',nodename
840 print 'cleaning nodegroup',nodegroupname
841 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
843 traceback.print_exc()
847 # a list of TestNode objs
848 def all_nodes (self):
850 for site_spec in self.plc_spec['sites']:
851 test_site = TestSite (self,site_spec)
852 for node_spec in site_spec['nodes']:
853 nodes.append(TestNode (self,test_site,node_spec))
856 # return a list of tuples (nodename,qemuname)
857 def all_node_infos (self) :
859 for site_spec in self.plc_spec['sites']:
860 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
861 for node_spec in site_spec['nodes'] ]
864 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
865 def all_reservable_nodenames (self):
867 for site_spec in self.plc_spec['sites']:
868 for node_spec in site_spec['nodes']:
869 node_fields=node_spec['node_fields']
870 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
871 res.append(node_fields['hostname'])
874 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
875 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
876 if self.options.dry_run:
880 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
881 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
882 # the nodes that haven't checked yet - start with a full list and shrink over time
883 tocheck = self.all_hostnames()
884 utils.header("checking nodes %r"%tocheck)
885 # create a dict hostname -> status
886 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
889 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
891 for array in tocheck_status:
892 hostname=array['hostname']
893 boot_state=array['boot_state']
894 if boot_state == target_boot_state:
895 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
897 # if it's a real node, never mind
898 (site_spec,node_spec)=self.locate_hostname(hostname)
899 if TestNode.is_real_model(node_spec['node_fields']['model']):
900 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
902 boot_state = target_boot_state
903 elif datetime.datetime.now() > graceout:
904 utils.header ("%s still in '%s' state"%(hostname,boot_state))
905 graceout=datetime.datetime.now()+datetime.timedelta(1)
906 status[hostname] = boot_state
908 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
911 if datetime.datetime.now() > timeout:
912 for hostname in tocheck:
913 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
915 # otherwise, sleep for a while
917 # only useful in empty plcs
920 def nodes_booted(self):
921 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
923 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
925 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
926 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
927 vservername=self.vservername
930 local_key = "keys/%(vservername)s-debug.rsa"%locals()
933 local_key = "keys/key1.rsa"
934 node_infos = self.all_node_infos()
935 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
936 for (nodename,qemuname) in node_infos:
937 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
938 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
939 (timeout_minutes,silent_minutes,period))
941 for node_info in node_infos:
942 (hostname,qemuname) = node_info
943 # try to run 'hostname' in the node
944 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
945 # don't spam logs - show the command only after the grace period
946 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
948 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
950 node_infos.remove(node_info)
952 # we will have tried real nodes once, in case they're up - but if not, just skip
953 (site_spec,node_spec)=self.locate_hostname(hostname)
954 if TestNode.is_real_model(node_spec['node_fields']['model']):
955 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
956 node_infos.remove(node_info)
959 if datetime.datetime.now() > timeout:
960 for (hostname,qemuname) in node_infos:
961 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
963 # otherwise, sleep for a while
965 # only useful in empty plcs
968 def ssh_node_debug(self):
969 "Tries to ssh into nodes in debug mode with the debug ssh key"
970 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=8)
972 def ssh_node_boot(self):
973 "Tries to ssh into nodes in production mode with the root ssh key"
974 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38)
977 def qemu_local_init (self):
978 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
982 "all nodes: invoke GetBootMedium and store result locally"
985 def qemu_local_config (self):
986 "all nodes: compute qemu config qemu.conf and store it locally"
989 def nodestate_reinstall (self):
990 "all nodes: mark PLCAPI boot_state as reinstall"
993 def nodestate_safeboot (self):
994 "all nodes: mark PLCAPI boot_state as safeboot"
997 def nodestate_boot (self):
998 "all nodes: mark PLCAPI boot_state as boot"
1001 def nodestate_show (self):
1002 "all nodes: show PLCAPI boot_state"
1005 def qemu_export (self):
1006 "all nodes: push local node-dep directory on the qemu box"
1009 ### check hooks : invoke scripts from hooks/{node,slice}
1010 def check_hooks_node (self):
1011 return self.locate_first_node().check_hooks()
1012 def check_hooks_sliver (self) :
1013 return self.locate_first_sliver().check_hooks()
1015 def check_hooks (self):
1016 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1017 return self.check_hooks_node() and self.check_hooks_sliver()
1020 def do_check_initscripts(self):
1022 for slice_spec in self.plc_spec['slices']:
1023 if not slice_spec.has_key('initscriptstamp'):
1025 stamp=slice_spec['initscriptstamp']
1026 for nodename in slice_spec['nodenames']:
1027 (site,node) = self.locate_node (nodename)
1028 # xxx - passing the wrong site - probably harmless
1029 test_site = TestSite (self,site)
1030 test_slice = TestSlice (self,test_site,slice_spec)
1031 test_node = TestNode (self,test_site,node)
1032 test_sliver = TestSliver (self, test_node, test_slice)
1033 if not test_sliver.check_initscript_stamp(stamp):
1037 def check_initscripts(self):
1038 "check that the initscripts have triggered"
1039 return self.do_check_initscripts()
1041 def initscripts (self):
1042 "create initscripts with PLCAPI"
1043 for initscript in self.plc_spec['initscripts']:
1044 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1045 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1048 def delete_initscripts (self):
1049 "delete initscripts with PLCAPI"
1050 for initscript in self.plc_spec['initscripts']:
1051 initscript_name = initscript['initscript_fields']['name']
1052 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1054 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1055 print initscript_name,'deleted'
1057 print 'deletion went wrong - probably did not exist'
1062 "create slices with PLCAPI"
1063 return self.do_slices()
1065 def delete_slices (self):
1066 "delete slices with PLCAPI"
1067 return self.do_slices("delete")
1069 def do_slices (self, action="add"):
1070 for slice in self.plc_spec['slices']:
1071 site_spec = self.locate_site (slice['sitename'])
1072 test_site = TestSite(self,site_spec)
1073 test_slice=TestSlice(self,test_site,slice)
1075 utils.header("Deleting slices in site %s"%test_site.name())
1076 test_slice.delete_slice()
1078 utils.pprint("Creating slice",slice)
1079 test_slice.create_slice()
1080 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1084 def ssh_slice(self):
1085 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1089 def keys_clear_known_hosts (self):
1090 "remove test nodes entries from the local known_hosts file"
1094 def qemu_start (self) :
1095 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1099 def timestamp_qemu (self) :
1100 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1103 def check_tcp (self):
1104 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1105 specs = self.plc_spec['tcp_test']
1110 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1111 if not s_test_sliver.run_tcp_server(port,timeout=10):
1115 # idem for the client side
1116 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1117 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1121 # painfully enough, we need to allow for some time as netflow might show up last
1122 def check_netflow (self):
1123 "all nodes: check that the netflow slice is alive"
1124 return self.check_systemslice ('netflow')
1126 # we have the slices up already here, so it should not take too long
1127 def check_systemslice (self, slicename, timeout_minutes=5, period=15):
1128 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
1129 test_nodes=self.all_nodes()
1131 for test_node in test_nodes:
1132 if test_node.check_systemslice (slicename):
1134 test_nodes.remove(test_node)
1139 if datetime.datetime.now () > timeout:
1140 for test_node in test_nodes:
1141 utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
1146 def plcsh_stress_test (self):
1147 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1148 # install the stress-test in the plc image
1149 location = "/usr/share/plc_api/plcsh_stress_test.py"
1150 remote="%s/%s"%(self.vm_root_in_host(),location)
1151 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1153 command += " -- --check"
1154 if self.options.size == 1:
1155 command += " --tiny"
1156 return ( self.run_in_guest(command) == 0)
1158 # populate runs the same utility without slightly different options
1159 # in particular runs with --preserve (dont cleanup) and without --check
1160 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1162 def sfa_install_all (self):
1163 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1164 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1166 def sfa_install_core(self):
1168 return self.yum_install ("sfa")
1170 def sfa_install_plc(self):
1171 "yum install sfa-plc"
1172 return self.yum_install("sfa-plc")
1174 def sfa_install_client(self):
1175 "yum install sfa-client"
1176 return self.yum_install("sfa-client")
1178 def sfa_install_sfatables(self):
1179 "yum install sfa-sfatables"
1180 return self.yum_install ("sfa-sfatables")
1182 def sfa_dbclean(self):
1183 "thoroughly wipes off the SFA database"
1184 self.run_in_guest("sfa-nuke.py")==0 or \
1185 self.run_in_guest("sfa-nuke-plc.py") or \
1186 self.run_in_guest("sfaadmin.py registry nuke")
1189 def sfa_plcclean(self):
1190 "cleans the PLC entries that were created as a side effect of running the script"
1192 sfa_spec=self.plc_spec['sfa']
1194 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1195 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1196 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1197 except: print "Slice %s already absent from PLC db"%slicename
1199 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1200 try: self.apiserver.DeletePerson(self.auth_root(),username)
1201 except: print "User %s already absent from PLC db"%username
1203 print "REMEMBER TO RUN sfa_import AGAIN"
1206 def sfa_uninstall(self):
1207 "uses rpm to uninstall sfa - ignore result"
1208 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1209 self.run_in_guest("rm -rf /var/lib/sfa")
1210 self.run_in_guest("rm -rf /etc/sfa")
1211 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1213 self.run_in_guest("rpm -e --noscripts sfa-plc")
1216 ### run unit tests for SFA
1217 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1218 # Running Transaction
1219 # Transaction couldn't start:
1220 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1221 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1222 # no matter how many Gbs are available on the testplc
1223 # could not figure out what's wrong, so...
1224 # if the yum install phase fails, consider the test is successful
1225 # other combinations will eventually run it hopefully
1226 def sfa_utest(self):
1227 "yum install sfa-tests and run SFA unittests"
1228 self.run_in_guest("yum -y install sfa-tests")
1229 # failed to install - forget it
1230 if self.run_in_guest("rpm -q sfa-tests")!=0:
1231 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1233 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1237 dirname="conf.%s"%self.plc_spec['name']
1238 if not os.path.isdir(dirname):
1239 utils.system("mkdir -p %s"%dirname)
1240 if not os.path.isdir(dirname):
1241 raise "Cannot create config dir for plc %s"%self.name()
1244 def conffile(self,filename):
1245 return "%s/%s"%(self.confdir(),filename)
1246 def confsubdir(self,dirname,clean,dry_run=False):
1247 subdirname="%s/%s"%(self.confdir(),dirname)
1249 utils.system("rm -rf %s"%subdirname)
1250 if not os.path.isdir(subdirname):
1251 utils.system("mkdir -p %s"%subdirname)
1252 if not dry_run and not os.path.isdir(subdirname):
1253 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1256 def conffile_clean (self,filename):
1257 filename=self.conffile(filename)
1258 return utils.system("rm -rf %s"%filename)==0
1261 def sfa_configure(self):
1262 "run sfa-config-tty"
1263 tmpname=self.conffile("sfa-config-tty")
1264 fileconf=open(tmpname,'w')
1265 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1266 'SFA_INTERFACE_HRN',
1267 'SFA_REGISTRY_LEVEL1_AUTH',
1268 'SFA_REGISTRY_HOST',
1269 'SFA_AGGREGATE_HOST',
1280 if self.plc_spec['sfa'].has_key(var):
1281 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1282 # the way plc_config handles booleans just sucks..
1285 if self.plc_spec['sfa'][var]: val='true'
1286 fileconf.write ('e %s\n%s\n'%(var,val))
1287 fileconf.write('w\n')
1288 fileconf.write('R\n')
1289 fileconf.write('q\n')
1291 utils.system('cat %s'%tmpname)
1292 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1295 def aggregate_xml_line(self):
1296 port=self.plc_spec['sfa']['neighbours-port']
1297 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1298 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1300 def registry_xml_line(self):
1301 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1302 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1305 # a cross step that takes all other plcs in argument
1306 def cross_sfa_configure(self, other_plcs):
1307 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1308 # of course with a single plc, other_plcs is an empty list
1311 agg_fname=self.conffile("agg.xml")
1312 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1313 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1314 utils.header ("(Over)wrote %s"%agg_fname)
1315 reg_fname=self.conffile("reg.xml")
1316 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1317 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1318 utils.header ("(Over)wrote %s"%reg_fname)
1319 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1320 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1322 def sfa_import(self):
1324 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1325 return self.run_in_guest('sfa-import.py')==0 or \
1326 self.run_in_guest('sfa-import-plc.py')==0 or \
1327 self.run_in_guest('sfaadmin.py registry import_registry')==0
1328 # not needed anymore
1329 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1331 def sfa_start(self):
1333 return self.run_in_guest('service sfa start')==0
1335 def sfi_configure(self):
1336 "Create /root/sfi on the plc side for sfi client configuration"
1337 if self.options.dry_run:
1338 utils.header("DRY RUN - skipping step")
1340 sfa_spec=self.plc_spec['sfa']
1341 # cannot use sfa_slice_mapper to pass dir_name
1342 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1343 site_spec = self.locate_site (slice_spec['sitename'])
1344 test_site = TestSite(self,site_spec)
1345 test_slice=TestSliceSfa(self,test_site,slice_spec)
1346 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1347 test_slice.sfi_config(dir_name)
1348 # push into the remote /root/sfi area
1349 location = test_slice.sfi_path()
1350 remote="%s/%s"%(self.vm_root_in_host(),location)
1351 self.test_ssh.mkdir(remote,abs=True)
1352 # need to strip last level or remote otherwise we get an extra dir level
1353 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1357 def sfi_clean (self):
1358 "clean up /root/sfi on the plc side"
1359 self.run_in_guest("rm -rf /root/sfi")
1363 def sfa_add_user(self):
1368 def sfa_update_user(self):
1372 def sfa_add_slice(self):
1373 "run sfi.py add (on Registry) from slice.xml"
1377 def sfa_discover(self):
1378 "discover resources into resouces_in.rspec"
1382 def sfa_create_slice(self):
1383 "run sfi.py create (on SM) - 1st time"
1387 def sfa_check_slice_plc(self):
1388 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1392 def sfa_update_slice(self):
1393 "run sfi.py create (on SM) on existing object"
1398 "various registry-related calls"
1402 def ssh_slice_sfa(self):
1403 "tries to ssh-enter the SFA slice"
1407 def sfa_delete_user(self):
1412 def sfa_delete_slice(self):
1413 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1418 self.run_in_guest('service sfa stop')==0
1421 def populate (self):
1422 "creates random entries in the PLCAPI"
1423 # install the stress-test in the plc image
1424 location = "/usr/share/plc_api/plcsh_stress_test.py"
1425 remote="%s/%s"%(self.vm_root_in_host(),location)
1426 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1428 command += " -- --preserve --short-names"
1429 local = (self.run_in_guest(command) == 0);
1430 # second run with --foreign
1431 command += ' --foreign'
1432 remote = (self.run_in_guest(command) == 0);
1433 return ( local and remote)
1435 def gather_logs (self):
1436 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1437 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1438 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1439 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1440 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1441 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1443 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1444 self.gather_var_logs ()
1446 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1447 self.gather_pgsql_logs ()
1449 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1450 for site_spec in self.plc_spec['sites']:
1451 test_site = TestSite (self,site_spec)
1452 for node_spec in site_spec['nodes']:
1453 test_node=TestNode(self,test_site,node_spec)
1454 test_node.gather_qemu_logs()
1456 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1457 self.gather_nodes_var_logs()
1459 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1460 self.gather_slivers_var_logs()
1463 def gather_slivers_var_logs(self):
1464 for test_sliver in self.all_sliver_objs():
1465 remote = test_sliver.tar_var_logs()
1466 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1467 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1468 utils.system(command)
1471 def gather_var_logs (self):
1472 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1473 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1474 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1475 utils.system(command)
1476 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1477 utils.system(command)
1479 def gather_pgsql_logs (self):
1480 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1481 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1482 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1483 utils.system(command)
1485 def gather_nodes_var_logs (self):
1486 for site_spec in self.plc_spec['sites']:
1487 test_site = TestSite (self,site_spec)
1488 for node_spec in site_spec['nodes']:
1489 test_node=TestNode(self,test_site,node_spec)
1490 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1491 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1492 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1493 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1494 utils.system(command)
1497 # returns the filename to use for sql dump/restore, using options.dbname if set
1498 def dbfile (self, database):
1499 # uses options.dbname if it is found
1501 name=self.options.dbname
1502 if not isinstance(name,StringTypes):
1505 t=datetime.datetime.now()
1508 return "/root/%s-%s.sql"%(database,name)
1510 def plc_db_dump(self):
1511 'dump the planetlab5 DB in /root in the PLC - filename has time'
1512 dump=self.dbfile("planetab5")
1513 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1514 utils.header('Dumped planetlab5 database in %s'%dump)
1517 def plc_db_restore(self):
1518 'restore the planetlab5 DB - looks broken, but run -n might help'
1519 dump=self.dbfile("planetab5")
1520 ##stop httpd service
1521 self.run_in_guest('service httpd stop')
1522 # xxx - need another wrapper
1523 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1524 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1525 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1526 ##starting httpd service
1527 self.run_in_guest('service httpd start')
1529 utils.header('Database restored from ' + dump)
1531 def standby_1_through_20(self):
1532 """convenience function to wait for a specified number of minutes"""
1535 def standby_1(): pass
1537 def standby_2(): pass
1539 def standby_3(): pass
1541 def standby_4(): pass
1543 def standby_5(): pass
1545 def standby_6(): pass
1547 def standby_7(): pass
1549 def standby_8(): pass
1551 def standby_9(): pass
1553 def standby_10(): pass
1555 def standby_11(): pass
1557 def standby_12(): pass
1559 def standby_13(): pass
1561 def standby_14(): pass
1563 def standby_15(): pass
1565 def standby_16(): pass
1567 def standby_17(): pass
1569 def standby_18(): pass
1571 def standby_19(): pass
1573 def standby_20(): pass