1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for test_node in self.all_nodes():
45 if not node_method(test_node, *args, **kwds): overall=False
47 # restore the doc text
48 actual.__doc__=method.__doc__
51 def slice_mapper (method):
54 slice_method = TestSlice.__dict__[method.__name__]
55 for slice_spec in self.plc_spec['slices']:
56 site_spec = self.locate_site (slice_spec['sitename'])
57 test_site = TestSite(self,site_spec)
58 test_slice=TestSlice(self,test_site,slice_spec)
59 if not slice_method(test_slice,self.options): overall=False
61 # restore the doc text
62 actual.__doc__=method.__doc__
65 def slice_sfa_mapper (method):
68 slice_method = TestSliceSfa.__dict__[method.__name__]
69 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
70 test_slice=TestSliceSfa(self,slice_spec)
71 if not slice_method(test_slice,self.options): overall=False
73 # restore the doc text
74 actual.__doc__=method.__doc__
84 'vs_delete','timestamp_vs','vs_create', SEP,
85 'plc_install', 'plc_configure', 'plc_start', SEP,
86 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', 'speed_up_slices', SEP,
87 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
88 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
89 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
90 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
91 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
92 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
93 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
94 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
95 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
96 # but as the stress test might take a while, we sometimes missed the debug mode..
97 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
98 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
99 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
100 'check_tcp', 'check_sys_slice', SEP,
101 'empty_slices', 'ssh_slice_off', 'fill_slices', SEP,
102 'force_gather_logs', SEP,
105 'export', 'show_boxes', SEP,
106 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
107 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
108 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
109 'delete_leases', 'list_leases', SEP,
111 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
112 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
113 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
114 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
115 'plc_db_dump' , 'plc_db_restore', SEP,
116 'standby_1_through_20',SEP,
120 def printable_steps (list):
121 single_line=" ".join(list)+" "
122 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
124 def valid_step (step):
125 return step != SEP and step != SEPSFA
127 # turn off the sfa-related steps when build has skipped SFA
128 # this is originally for centos5 as recent SFAs won't build on this platform
130 def check_whether_build_has_sfa (rpms_url):
131 # warning, we're now building 'sface' so let's be a bit more picky
132 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
133 # full builds are expected to return with 0 here
135 # move all steps containing 'sfa' from default_steps to other_steps
136 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
137 TestPlc.other_steps += sfa_steps
138 for step in sfa_steps: TestPlc.default_steps.remove(step)
140 def __init__ (self,plc_spec,options):
141 self.plc_spec=plc_spec
143 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
144 self.vserverip=plc_spec['vserverip']
145 self.vservername=plc_spec['vservername']
146 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
147 self.apiserver=TestApiserver(self.url,options.dry_run)
149 def has_addresses_api (self):
150 return self.apiserver.has_method('AddIpAddress')
153 name=self.plc_spec['name']
154 return "%s.%s"%(name,self.vservername)
157 return self.plc_spec['host_box']
160 return self.test_ssh.is_local()
162 # define the API methods on this object through xmlrpc
163 # would help, but not strictly necessary
167 def actual_command_in_guest (self,command):
168 return self.test_ssh.actual_command(self.host_to_guest(command))
170 def start_guest (self):
171 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
173 def stop_guest (self):
174 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
176 def run_in_guest (self,command):
177 return utils.system(self.actual_command_in_guest(command))
179 def run_in_host (self,command):
180 return self.test_ssh.run_in_buildname(command)
182 #command gets run in the plc's vm
183 def host_to_guest(self,command):
184 if self.options.plcs_use_lxc:
185 return "ssh -o StrictHostKeyChecking=no %s %s"%(self.vserverip,command)
187 return "vserver %s exec %s"%(self.vservername,command)
189 def vm_root_in_host(self):
190 if self.options.plcs_use_lxc:
191 return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
193 return "/vservers/%s"%(self.vservername)
195 def vm_timestamp_path (self):
196 if self.options.plcs_use_lxc:
197 return "/var/lib/lxc/%s/%s.timestamp"%(self.vservername,self.vservername)
199 return "/vservers/%s.timestamp"%(self.vservername)
201 #start/stop the vserver
202 def start_guest_in_host(self):
203 if self.options.plcs_use_lxc:
204 return "lxc-start --daemon --name=%s"%(self.vservername)
206 return "vserver %s start"%(self.vservername)
208 def stop_guest_in_host(self):
209 if self.options.plcs_use_lxc:
210 return "lxc-stop --name=%s"%(self.vservername)
212 return "vserver %s stop"%(self.vservername)
215 def run_in_guest_piped (self,local,remote):
216 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
218 # does a yum install in the vs, ignore yum retcod, check with rpm
219 def yum_install (self, rpms):
220 if isinstance (rpms, list):
222 self.run_in_guest("yum -y install %s"%rpms)
223 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
224 self.run_in_guest("yum-complete-transaction -y")
225 return self.run_in_guest("rpm -q %s"%rpms)==0
227 def auth_root (self):
228 return {'Username':self.plc_spec['PLC_ROOT_USER'],
229 'AuthMethod':'password',
230 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
231 'Role' : self.plc_spec['role']
233 def locate_site (self,sitename):
234 for site in self.plc_spec['sites']:
235 if site['site_fields']['name'] == sitename:
237 if site['site_fields']['login_base'] == sitename:
239 raise Exception,"Cannot locate site %s"%sitename
241 def locate_node (self,nodename):
242 for site in self.plc_spec['sites']:
243 for node in site['nodes']:
244 if node['name'] == nodename:
246 raise Exception,"Cannot locate node %s"%nodename
248 def locate_hostname (self,hostname):
249 for site in self.plc_spec['sites']:
250 for node in site['nodes']:
251 if node['node_fields']['hostname'] == hostname:
253 raise Exception,"Cannot locate hostname %s"%hostname
255 def locate_key (self,keyname):
256 for key in self.plc_spec['keys']:
257 if key['name'] == keyname:
259 raise Exception,"Cannot locate key %s"%keyname
261 def locate_slice (self, slicename):
262 for slice in self.plc_spec['slices']:
263 if slice['slice_fields']['name'] == slicename:
265 raise Exception,"Cannot locate slice %s"%slicename
267 def all_sliver_objs (self):
269 for slice_spec in self.plc_spec['slices']:
270 slicename = slice_spec['slice_fields']['name']
271 for nodename in slice_spec['nodenames']:
272 result.append(self.locate_sliver_obj (nodename,slicename))
275 def locate_sliver_obj (self,nodename,slicename):
276 (site,node) = self.locate_node(nodename)
277 slice = self.locate_slice (slicename)
279 test_site = TestSite (self, site)
280 test_node = TestNode (self, test_site,node)
281 # xxx the slice site is assumed to be the node site - mhh - probably harmless
282 test_slice = TestSlice (self, test_site, slice)
283 return TestSliver (self, test_node, test_slice)
285 def locate_first_node(self):
286 nodename=self.plc_spec['slices'][0]['nodenames'][0]
287 (site,node) = self.locate_node(nodename)
288 test_site = TestSite (self, site)
289 test_node = TestNode (self, test_site,node)
292 def locate_first_sliver (self):
293 slice_spec=self.plc_spec['slices'][0]
294 slicename=slice_spec['slice_fields']['name']
295 nodename=slice_spec['nodenames'][0]
296 return self.locate_sliver_obj(nodename,slicename)
298 # all different hostboxes used in this plc
299 def gather_hostBoxes(self):
300 # maps on sites and nodes, return [ (host_box,test_node) ]
302 for site_spec in self.plc_spec['sites']:
303 test_site = TestSite (self,site_spec)
304 for node_spec in site_spec['nodes']:
305 test_node = TestNode (self, test_site, node_spec)
306 if not test_node.is_real():
307 tuples.append( (test_node.host_box(),test_node) )
308 # transform into a dict { 'host_box' -> [ test_node .. ] }
310 for (box,node) in tuples:
311 if not result.has_key(box):
314 result[box].append(node)
317 # a step for checking this stuff
318 def show_boxes (self):
319 'print summary of nodes location'
320 for (box,nodes) in self.gather_hostBoxes().iteritems():
321 print box,":"," + ".join( [ node.name() for node in nodes ] )
324 # make this a valid step
325 def qemu_kill_all(self):
326 'kill all qemu instances on the qemu boxes involved by this setup'
327 # this is the brute force version, kill all qemus on that host box
328 for (box,nodes) in self.gather_hostBoxes().iteritems():
329 # pass the first nodename, as we don't push template-qemu on testboxes
330 nodedir=nodes[0].nodedir()
331 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
334 # make this a valid step
335 def qemu_list_all(self):
336 'list all qemu instances on the qemu boxes involved by this setup'
337 for (box,nodes) in self.gather_hostBoxes().iteritems():
338 # this is the brute force version, kill all qemus on that host box
339 TestBoxQemu(box,self.options.buildname).qemu_list_all()
342 # kill only the right qemus
343 def qemu_list_mine(self):
344 'list qemu instances for our nodes'
345 for (box,nodes) in self.gather_hostBoxes().iteritems():
346 # the fine-grain version
351 # kill only the right qemus
352 def qemu_kill_mine(self):
353 'kill the qemu instances for our nodes'
354 for (box,nodes) in self.gather_hostBoxes().iteritems():
355 # the fine-grain version
360 #################### display config
362 "show test configuration after localization"
368 "print cut'n paste-able stuff to export env variables to your shell"
369 # guess local domain from hostname
370 domain=socket.gethostname().split('.',1)[1]
371 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
372 print "export BUILD=%s"%self.options.buildname
373 if self.options.plcs_use_lxc:
374 print "export PLCHOSTLXC=%s"%fqdn
376 print "export PLCHOSTVS=%s"%fqdn
377 print "export GUESTNAME=%s"%self.plc_spec['vservername']
378 vplcname=self.plc_spec['vservername'].split('-')[-1]
379 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
380 # find hostname of first node
381 (hostname,qemubox) = self.all_node_infos()[0]
382 print "export KVMHOST=%s.%s"%(qemubox,domain)
383 print "export NODE=%s"%(hostname)
387 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
388 def show_pass (self,passno):
389 for (key,val) in self.plc_spec.iteritems():
390 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
394 self.display_site_spec(site)
395 for node in site['nodes']:
396 self.display_node_spec(node)
397 elif key=='initscripts':
398 for initscript in val:
399 self.display_initscript_spec (initscript)
402 self.display_slice_spec (slice)
405 self.display_key_spec (key)
407 if key not in ['sites','initscripts','slices','keys', 'sfa']:
408 print '+ ',key,':',val
410 def display_site_spec (self,site):
411 print '+ ======== site',site['site_fields']['name']
412 for (k,v) in site.iteritems():
413 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
416 print '+ ','nodes : ',
418 print node['node_fields']['hostname'],'',
424 print user['name'],'',
426 elif k == 'site_fields':
427 print '+ login_base',':',v['login_base']
428 elif k == 'address_fields':
434 def display_initscript_spec (self,initscript):
435 print '+ ======== initscript',initscript['initscript_fields']['name']
437 def display_key_spec (self,key):
438 print '+ ======== key',key['name']
440 def display_slice_spec (self,slice):
441 print '+ ======== slice',slice['slice_fields']['name']
442 for (k,v) in slice.iteritems():
455 elif k=='slice_fields':
456 print '+ fields',':',
457 print 'max_nodes=',v['max_nodes'],
462 def display_node_spec (self,node):
463 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
464 print "hostname=",node['node_fields']['hostname'],
465 print "ip=",node['interface_fields']['ip']
466 if self.options.verbose:
467 utils.pprint("node details",node,depth=3)
469 # another entry point for just showing the boxes involved
470 def display_mapping (self):
471 TestPlc.display_mapping_plc(self.plc_spec)
475 def display_mapping_plc (plc_spec):
476 print '+ MyPLC',plc_spec['name']
477 # WARNING this would not be right for lxc-based PLC's - should be harmless though
478 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
479 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
480 for site_spec in plc_spec['sites']:
481 for node_spec in site_spec['nodes']:
482 TestPlc.display_mapping_node(node_spec)
485 def display_mapping_node (node_spec):
486 print '+ NODE %s'%(node_spec['name'])
487 print '+\tqemu box %s'%node_spec['host_box']
488 print '+\thostname=%s'%node_spec['node_fields']['hostname']
490 # write a timestamp in /vservers/<>.timestamp
491 # cannot be inside the vserver, that causes vserver .. build to cough
492 def timestamp_vs (self):
494 # TODO-lxc check this one
495 # a first approx. is to store the timestamp close to the VM root like vs does
496 stamp_path=self.vm_timestamp_path ()
497 stamp_dir = os.path.dirname (stamp_path)
498 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
499 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
501 # this is called inconditionnally at the beginning of the test sequence
502 # just in case this is a rerun, so if the vm is not running it's fine
504 "vserver delete the test myplc"
505 stamp_path=self.vm_timestamp_path()
506 self.run_in_host("rm -f %s"%stamp_path)
507 if self.options.plcs_use_lxc:
508 self.run_in_host("lxc-stop --name %s"%self.vservername)
509 self.run_in_host("lxc-destroy --name %s"%self.vservername)
512 self.run_in_host("vserver --silent %s delete"%self.vservername)
516 # historically the build was being fetched by the tests
517 # now the build pushes itself as a subdir of the tests workdir
518 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
519 def vs_create (self):
520 "vserver creation (no install done)"
521 # push the local build/ dir to the testplc box
523 # a full path for the local calls
524 build_dir=os.path.dirname(sys.argv[0])
525 # sometimes this is empty - set to "." in such a case
526 if not build_dir: build_dir="."
527 build_dir += "/build"
529 # use a standard name - will be relative to remote buildname
531 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
532 self.test_ssh.rmdir(build_dir)
533 self.test_ssh.copy(build_dir,recursive=True)
534 # the repo url is taken from arch-rpms-url
535 # with the last step (i386) removed
536 repo_url = self.options.arch_rpms_url
537 for level in [ 'arch' ]:
538 repo_url = os.path.dirname(repo_url)
539 # pass the vbuild-nightly options to vtest-init-vserver
541 test_env_options += " -p %s"%self.options.personality
542 test_env_options += " -d %s"%self.options.pldistro
543 test_env_options += " -f %s"%self.options.fcdistro
544 if self.options.plcs_use_lxc:
545 script="vtest-init-lxc.sh"
547 script="vtest-init-vserver.sh"
548 vserver_name = self.vservername
549 vserver_options="--netdev eth0 --interface %s"%self.vserverip
551 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
552 vserver_options += " --hostname %s"%vserver_hostname
554 print "Cannot reverse lookup %s"%self.vserverip
555 print "This is considered fatal, as this might pollute the test results"
557 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
558 return self.run_in_host(create_vserver) == 0
561 def plc_install(self):
562 "yum install myplc, noderepo, and the plain bootstrapfs"
564 # workaround for getting pgsql8.2 on centos5
565 if self.options.fcdistro == "centos5":
566 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
569 if self.options.personality == "linux32":
571 elif self.options.personality == "linux64":
574 raise Exception, "Unsupported personality %r"%self.options.personality
575 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
578 pkgs_list.append ("slicerepo-%s"%nodefamily)
579 pkgs_list.append ("myplc")
580 pkgs_list.append ("noderepo-%s"%nodefamily)
581 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
582 pkgs_string=" ".join(pkgs_list)
583 return self.yum_install (pkgs_list)
586 def plc_configure(self):
588 tmpname='%s.plc-config-tty'%(self.name())
589 fileconf=open(tmpname,'w')
590 for var in [ 'PLC_NAME',
595 'PLC_MAIL_SUPPORT_ADDRESS',
598 # Above line was added for integrating SFA Testing
604 'PLC_RESERVATION_GRANULARITY',
606 'PLC_OMF_XMPP_SERVER',
608 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
609 fileconf.write('w\n')
610 fileconf.write('q\n')
612 utils.system('cat %s'%tmpname)
613 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
614 utils.system('rm %s'%tmpname)
619 self.run_in_guest('service plc start')
624 self.run_in_guest('service plc stop')
628 "start the PLC vserver"
633 "stop the PLC vserver"
637 # stores the keys from the config for further use
638 def keys_store(self):
639 "stores test users ssh keys in keys/"
640 for key_spec in self.plc_spec['keys']:
641 TestKey(self,key_spec).store_key()
644 def keys_clean(self):
645 "removes keys cached in keys/"
646 utils.system("rm -rf ./keys")
649 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
650 # for later direct access to the nodes
651 def keys_fetch(self):
652 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
654 if not os.path.isdir(dir):
656 vservername=self.vservername
657 vm_root=self.vm_root_in_host()
659 prefix = 'debug_ssh_key'
660 for ext in [ 'pub', 'rsa' ] :
661 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
662 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
663 if self.test_ssh.fetch(src,dst) != 0: overall=False
667 "create sites with PLCAPI"
668 return self.do_sites()
670 def delete_sites (self):
671 "delete sites with PLCAPI"
672 return self.do_sites(action="delete")
674 def do_sites (self,action="add"):
675 for site_spec in self.plc_spec['sites']:
676 test_site = TestSite (self,site_spec)
677 if (action != "add"):
678 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
679 test_site.delete_site()
680 # deleted with the site
681 #test_site.delete_users()
684 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
685 test_site.create_site()
686 test_site.create_users()
689 def delete_all_sites (self):
690 "Delete all sites in PLC, and related objects"
691 print 'auth_root',self.auth_root()
692 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])
694 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
695 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
696 site_id=site['site_id']
697 print 'Deleting site_id',site_id
698 self.apiserver.DeleteSite(self.auth_root(),site_id)
702 "create nodes with PLCAPI"
703 return self.do_nodes()
704 def delete_nodes (self):
705 "delete nodes with PLCAPI"
706 return self.do_nodes(action="delete")
708 def do_nodes (self,action="add"):
709 for site_spec in self.plc_spec['sites']:
710 test_site = TestSite (self,site_spec)
712 utils.header("Deleting nodes in site %s"%test_site.name())
713 for node_spec in site_spec['nodes']:
714 test_node=TestNode(self,test_site,node_spec)
715 utils.header("Deleting %s"%test_node.name())
716 test_node.delete_node()
718 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
719 for node_spec in site_spec['nodes']:
720 utils.pprint('Creating node %s'%node_spec,node_spec)
721 test_node = TestNode (self,test_site,node_spec)
722 test_node.create_node ()
725 def nodegroups (self):
726 "create nodegroups with PLCAPI"
727 return self.do_nodegroups("add")
728 def delete_nodegroups (self):
729 "delete nodegroups with PLCAPI"
730 return self.do_nodegroups("delete")
734 def translate_timestamp (start,grain,timestamp):
735 if timestamp < TestPlc.YEAR: return start+timestamp*grain
736 else: return timestamp
739 def timestamp_printable (timestamp):
740 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
743 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
745 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
746 print 'API answered grain=',grain
747 start=(now/grain)*grain
749 # find out all nodes that are reservable
750 nodes=self.all_reservable_nodenames()
752 utils.header ("No reservable node found - proceeding without leases")
755 # attach them to the leases as specified in plc_specs
756 # this is where the 'leases' field gets interpreted as relative of absolute
757 for lease_spec in self.plc_spec['leases']:
758 # skip the ones that come with a null slice id
759 if not lease_spec['slice']: continue
760 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
761 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
762 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
763 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
764 if lease_addition['errors']:
765 utils.header("Cannot create leases, %s"%lease_addition['errors'])
768 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
769 (nodes,lease_spec['slice'],
770 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
771 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
775 def delete_leases (self):
776 "remove all leases in the myplc side"
777 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
778 utils.header("Cleaning leases %r"%lease_ids)
779 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
782 def list_leases (self):
783 "list all leases known to the myplc"
784 leases = self.apiserver.GetLeases(self.auth_root())
787 current=l['t_until']>=now
788 if self.options.verbose or current:
789 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
790 TestPlc.timestamp_printable(l['t_from']),
791 TestPlc.timestamp_printable(l['t_until'])))
794 # create nodegroups if needed, and populate
795 def do_nodegroups (self, action="add"):
796 # 1st pass to scan contents
798 for site_spec in self.plc_spec['sites']:
799 test_site = TestSite (self,site_spec)
800 for node_spec in site_spec['nodes']:
801 test_node=TestNode (self,test_site,node_spec)
802 if node_spec.has_key('nodegroups'):
803 nodegroupnames=node_spec['nodegroups']
804 if isinstance(nodegroupnames,StringTypes):
805 nodegroupnames = [ nodegroupnames ]
806 for nodegroupname in nodegroupnames:
807 if not groups_dict.has_key(nodegroupname):
808 groups_dict[nodegroupname]=[]
809 groups_dict[nodegroupname].append(test_node.name())
810 auth=self.auth_root()
812 for (nodegroupname,group_nodes) in groups_dict.iteritems():
814 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
815 # first, check if the nodetagtype is here
816 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
818 tag_type_id = tag_types[0]['tag_type_id']
820 tag_type_id = self.apiserver.AddTagType(auth,
821 {'tagname':nodegroupname,
822 'description': 'for nodegroup %s'%nodegroupname,
824 print 'located tag (type)',nodegroupname,'as',tag_type_id
826 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
828 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
829 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
830 # set node tag on all nodes, value='yes'
831 for nodename in group_nodes:
833 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
835 traceback.print_exc()
836 print 'node',nodename,'seems to already have tag',nodegroupname
839 expect_yes = self.apiserver.GetNodeTags(auth,
840 {'hostname':nodename,
841 'tagname':nodegroupname},
842 ['value'])[0]['value']
843 if expect_yes != "yes":
844 print 'Mismatch node tag on node',nodename,'got',expect_yes
847 if not self.options.dry_run:
848 print 'Cannot find tag',nodegroupname,'on node',nodename
852 print 'cleaning nodegroup',nodegroupname
853 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
855 traceback.print_exc()
859 # a list of TestNode objs
860 def all_nodes (self):
862 for site_spec in self.plc_spec['sites']:
863 test_site = TestSite (self,site_spec)
864 for node_spec in site_spec['nodes']:
865 nodes.append(TestNode (self,test_site,node_spec))
868 # return a list of tuples (nodename,qemuname)
869 def all_node_infos (self) :
871 for site_spec in self.plc_spec['sites']:
872 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
873 for node_spec in site_spec['nodes'] ]
876 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
877 def all_reservable_nodenames (self):
879 for site_spec in self.plc_spec['sites']:
880 for node_spec in site_spec['nodes']:
881 node_fields=node_spec['node_fields']
882 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
883 res.append(node_fields['hostname'])
886 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
887 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
888 if self.options.dry_run:
892 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
893 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
894 # the nodes that haven't checked yet - start with a full list and shrink over time
895 tocheck = self.all_hostnames()
896 utils.header("checking nodes %r"%tocheck)
897 # create a dict hostname -> status
898 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
901 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
903 for array in tocheck_status:
904 hostname=array['hostname']
905 boot_state=array['boot_state']
906 if boot_state == target_boot_state:
907 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
909 # if it's a real node, never mind
910 (site_spec,node_spec)=self.locate_hostname(hostname)
911 if TestNode.is_real_model(node_spec['node_fields']['model']):
912 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
914 boot_state = target_boot_state
915 elif datetime.datetime.now() > graceout:
916 utils.header ("%s still in '%s' state"%(hostname,boot_state))
917 graceout=datetime.datetime.now()+datetime.timedelta(1)
918 status[hostname] = boot_state
920 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
923 if datetime.datetime.now() > timeout:
924 for hostname in tocheck:
925 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
927 # otherwise, sleep for a while
929 # only useful in empty plcs
932 def nodes_booted(self):
933 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
935 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
937 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
938 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
939 vservername=self.vservername
942 local_key = "keys/%(vservername)s-debug.rsa"%locals()
945 local_key = "keys/key1.rsa"
946 node_infos = self.all_node_infos()
947 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
948 for (nodename,qemuname) in node_infos:
949 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
950 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
951 (timeout_minutes,silent_minutes,period))
953 for node_info in node_infos:
954 (hostname,qemuname) = node_info
955 # try to run 'hostname' in the node
956 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
957 # don't spam logs - show the command only after the grace period
958 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
960 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
962 node_infos.remove(node_info)
964 # we will have tried real nodes once, in case they're up - but if not, just skip
965 (site_spec,node_spec)=self.locate_hostname(hostname)
966 if TestNode.is_real_model(node_spec['node_fields']['model']):
967 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
968 node_infos.remove(node_info)
971 if datetime.datetime.now() > timeout:
972 for (hostname,qemuname) in node_infos:
973 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
975 # otherwise, sleep for a while
977 # only useful in empty plcs
980 def ssh_node_debug(self):
981 "Tries to ssh into nodes in debug mode with the debug ssh key"
982 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=8)
984 def ssh_node_boot(self):
985 "Tries to ssh into nodes in production mode with the root ssh key"
986 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38)
989 def qemu_local_init (self):
990 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
994 "all nodes: invoke GetBootMedium and store result locally"
997 def qemu_local_config (self):
998 "all nodes: compute qemu config qemu.conf and store it locally"
1001 def nodestate_reinstall (self):
1002 "all nodes: mark PLCAPI boot_state as reinstall"
1005 def nodestate_safeboot (self):
1006 "all nodes: mark PLCAPI boot_state as safeboot"
1009 def nodestate_boot (self):
1010 "all nodes: mark PLCAPI boot_state as boot"
1013 def nodestate_show (self):
1014 "all nodes: show PLCAPI boot_state"
1017 def qemu_export (self):
1018 "all nodes: push local node-dep directory on the qemu box"
1021 ### check hooks : invoke scripts from hooks/{node,slice}
1022 def check_hooks_node (self):
1023 return self.locate_first_node().check_hooks()
1024 def check_hooks_sliver (self) :
1025 return self.locate_first_sliver().check_hooks()
1027 def check_hooks (self):
1028 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1029 return self.check_hooks_node() and self.check_hooks_sliver()
1032 def do_check_initscripts(self):
1034 for slice_spec in self.plc_spec['slices']:
1035 if not slice_spec.has_key('initscriptstamp'):
1037 stamp=slice_spec['initscriptstamp']
1038 for nodename in slice_spec['nodenames']:
1039 (site,node) = self.locate_node (nodename)
1040 # xxx - passing the wrong site - probably harmless
1041 test_site = TestSite (self,site)
1042 test_slice = TestSlice (self,test_site,slice_spec)
1043 test_node = TestNode (self,test_site,node)
1044 test_sliver = TestSliver (self, test_node, test_slice)
1045 if not test_sliver.check_initscript_stamp(stamp):
1049 def check_initscripts(self):
1050 "check that the initscripts have triggered"
1051 return self.do_check_initscripts()
1053 def initscripts (self):
1054 "create initscripts with PLCAPI"
1055 for initscript in self.plc_spec['initscripts']:
1056 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1057 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1060 def delete_initscripts (self):
1061 "delete initscripts with PLCAPI"
1062 for initscript in self.plc_spec['initscripts']:
1063 initscript_name = initscript['initscript_fields']['name']
1064 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1066 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1067 print initscript_name,'deleted'
1069 print 'deletion went wrong - probably did not exist'
1074 "create slices with PLCAPI"
1075 return self.do_slices(action="add")
1077 def delete_slices (self):
1078 "delete slices with PLCAPI"
1079 return self.do_slices(action="delete")
1081 def fill_slices (self):
1082 "add nodes in slices with PLCAPI"
1083 return self.do_slices(action="fill")
1085 def empty_slices (self):
1086 "remove nodes from slices with PLCAPI"
1087 return self.do_slices(action="empty")
1089 def do_slices (self, action="add"):
1090 for slice in self.plc_spec['slices']:
1091 site_spec = self.locate_site (slice['sitename'])
1092 test_site = TestSite(self,site_spec)
1093 test_slice=TestSlice(self,test_site,slice)
1094 if action == "delete":
1095 test_slice.delete_slice()
1096 elif action=="fill":
1097 test_slice.add_nodes()
1098 elif action=="empty":
1099 test_slice.delete_nodes()
1101 test_slice.create_slice()
1105 def ssh_slice(self):
1106 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1110 def ssh_slice_off (self):
1111 "tries to ssh-enter the slice with the user key, expecting it to be unreachable"
1115 def keys_clear_known_hosts (self):
1116 "remove test nodes entries from the local known_hosts file"
1119 def speed_up_slices (self):
1120 "tweak nodemanager settings on all nodes using a conf file"
1121 # create the template on the server-side
1122 template="%s.nodemanager"%self.name()
1123 template_file = open (template,"w")
1124 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1125 template_file.close()
1126 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1127 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1128 self.test_ssh.copy_abs(template,remote)
1130 self.apiserver.AddConfFile (self.auth_root(),
1131 {'dest':'/etc/sysconfig/nodemanager',
1132 'source':'PlanetLabConf/nodemanager',
1133 'postinstall_cmd':'service nm restart',})
1137 def qemu_start (self) :
1138 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1142 def timestamp_qemu (self) :
1143 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1146 def check_tcp (self):
1147 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1148 specs = self.plc_spec['tcp_test']
1153 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1154 if not s_test_sliver.run_tcp_server(port,timeout=10):
1158 # idem for the client side
1159 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1160 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1164 # painfully enough, we need to allow for some time as netflow might show up last
1165 def check_sys_slice (self):
1166 "all nodes: check that a system slice is alive"
1167 # would probably make more sense to check for netflow,
1168 # but that one is currently not working in the lxc distro
1169 # return self.check_systemslice ('netflow')
1170 return self.check_systemslice ('drl')
1172 # we have the slices up already here, so it should not take too long
1173 def check_systemslice (self, slicename, timeout_minutes=5, period=15):
1174 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
1175 test_nodes=self.all_nodes()
1177 for test_node in test_nodes:
1178 if test_node.check_systemslice (slicename,dry_run=self.options.dry_run):
1180 test_nodes.remove(test_node)
1185 if datetime.datetime.now () > timeout:
1186 for test_node in test_nodes:
1187 utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
1192 def plcsh_stress_test (self):
1193 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1194 # install the stress-test in the plc image
1195 location = "/usr/share/plc_api/plcsh_stress_test.py"
1196 remote="%s/%s"%(self.vm_root_in_host(),location)
1197 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1199 command += " -- --check"
1200 if self.options.size == 1:
1201 command += " --tiny"
1202 return ( self.run_in_guest(command) == 0)
1204 # populate runs the same utility without slightly different options
1205 # in particular runs with --preserve (dont cleanup) and without --check
1206 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1208 def sfa_install_all (self):
1209 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1210 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1212 def sfa_install_core(self):
1214 return self.yum_install ("sfa")
1216 def sfa_install_plc(self):
1217 "yum install sfa-plc"
1218 return self.yum_install("sfa-plc")
1220 def sfa_install_client(self):
1221 "yum install sfa-client"
1222 return self.yum_install("sfa-client")
1224 def sfa_install_sfatables(self):
1225 "yum install sfa-sfatables"
1226 return self.yum_install ("sfa-sfatables")
1228 def sfa_dbclean(self):
1229 "thoroughly wipes off the SFA database"
1230 return self.run_in_guest("sfaadmin.py registry nuke")==0 or \
1231 self.run_in_guest("sfa-nuke.py")==0 or \
1232 self.run_in_guest("sfa-nuke-plc.py")==0
1234 def sfa_plcclean(self):
1235 "cleans the PLC entries that were created as a side effect of running the script"
1237 sfa_spec=self.plc_spec['sfa']
1239 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1240 login_base=sfa_slice_spec['login_base']
1241 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1242 except: print "Site %s already absent from PLC db"%login_base
1244 for key in ['piuser','regularuser']:
1245 username="%s@%s"%(sfa_slice_spec[key],sfa_slice_spec['domain'])
1246 try: self.apiserver.DeletePerson(self.auth_root(),username)
1247 except: print "User %s already absent from PLC db"%username
1249 print "REMEMBER TO RUN sfa_import AGAIN"
1252 def sfa_uninstall(self):
1253 "uses rpm to uninstall sfa - ignore result"
1254 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1255 self.run_in_guest("rm -rf /var/lib/sfa")
1256 self.run_in_guest("rm -rf /etc/sfa")
1257 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1259 self.run_in_guest("rpm -e --noscripts sfa-plc")
1262 ### run unit tests for SFA
1263 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1264 # Running Transaction
1265 # Transaction couldn't start:
1266 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1267 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1268 # no matter how many Gbs are available on the testplc
1269 # could not figure out what's wrong, so...
1270 # if the yum install phase fails, consider the test is successful
1271 # other combinations will eventually run it hopefully
1272 def sfa_utest(self):
1273 "yum install sfa-tests and run SFA unittests"
1274 self.run_in_guest("yum -y install sfa-tests")
1275 # failed to install - forget it
1276 if self.run_in_guest("rpm -q sfa-tests")!=0:
1277 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1279 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1283 dirname="conf.%s"%self.plc_spec['name']
1284 if not os.path.isdir(dirname):
1285 utils.system("mkdir -p %s"%dirname)
1286 if not os.path.isdir(dirname):
1287 raise "Cannot create config dir for plc %s"%self.name()
1290 def conffile(self,filename):
1291 return "%s/%s"%(self.confdir(),filename)
1292 def confsubdir(self,dirname,clean,dry_run=False):
1293 subdirname="%s/%s"%(self.confdir(),dirname)
1295 utils.system("rm -rf %s"%subdirname)
1296 if not os.path.isdir(subdirname):
1297 utils.system("mkdir -p %s"%subdirname)
1298 if not dry_run and not os.path.isdir(subdirname):
1299 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1302 def conffile_clean (self,filename):
1303 filename=self.conffile(filename)
1304 return utils.system("rm -rf %s"%filename)==0
1307 def sfa_configure(self):
1308 "run sfa-config-tty"
1309 tmpname=self.conffile("sfa-config-tty")
1310 fileconf=open(tmpname,'w')
1311 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1312 'SFA_INTERFACE_HRN',
1313 'SFA_REGISTRY_LEVEL1_AUTH',
1314 'SFA_REGISTRY_HOST',
1315 'SFA_AGGREGATE_HOST',
1325 'SFA_GENERIC_FLAVOUR',
1326 'SFA_AGGREGATE_ENABLED',
1328 if self.plc_spec['sfa'].has_key(var):
1329 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1330 # the way plc_config handles booleans just sucks..
1333 if self.plc_spec['sfa'][var]: val='true'
1334 fileconf.write ('e %s\n%s\n'%(var,val))
1335 fileconf.write('w\n')
1336 fileconf.write('R\n')
1337 fileconf.write('q\n')
1339 utils.system('cat %s'%tmpname)
1340 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1343 def aggregate_xml_line(self):
1344 port=self.plc_spec['sfa']['neighbours-port']
1345 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1346 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1348 def registry_xml_line(self):
1349 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1350 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1353 # a cross step that takes all other plcs in argument
1354 def cross_sfa_configure(self, other_plcs):
1355 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1356 # of course with a single plc, other_plcs is an empty list
1359 agg_fname=self.conffile("agg.xml")
1360 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1361 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1362 utils.header ("(Over)wrote %s"%agg_fname)
1363 reg_fname=self.conffile("reg.xml")
1364 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1365 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1366 utils.header ("(Over)wrote %s"%reg_fname)
1367 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1368 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1370 def sfa_import(self):
1372 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1373 return self.run_in_guest('sfa-import.py')==0 or \
1374 self.run_in_guest('sfa-import-plc.py')==0 or \
1375 self.run_in_guest('sfaadmin.py registry import_registry')==0
1376 # not needed anymore
1377 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1379 def sfa_start(self):
1381 return self.run_in_guest('service sfa start')==0
1383 def sfi_configure(self):
1384 "Create /root/sfi on the plc side for sfi client configuration"
1385 if self.options.dry_run:
1386 utils.header("DRY RUN - skipping step")
1388 sfa_spec=self.plc_spec['sfa']
1389 # cannot use sfa_slice_mapper to pass dir_name
1390 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1391 test_slice=TestSliceSfa(self,slice_spec)
1392 dir_basename=os.path.basename(test_slice.sfi_path())
1393 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1394 test_slice.sfi_config(dir_name)
1395 # push into the remote /root/sfi area
1396 location = test_slice.sfi_path()
1397 remote="%s/%s"%(self.vm_root_in_host(),location)
1398 self.test_ssh.mkdir(remote,abs=True)
1399 # need to strip last level or remote otherwise we get an extra dir level
1400 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1404 def sfi_clean (self):
1405 "clean up /root/sfi on the plc side"
1406 self.run_in_guest("rm -rf /root/sfi")
1410 def sfa_add_site (self):
1411 "bootstrap a site using sfaadmin"
1415 def sfa_add_pi (self):
1416 "bootstrap a PI user for that site"
1420 def sfa_add_user(self):
1425 def sfa_update_user(self):
1429 def sfa_add_slice(self):
1430 "run sfi.py add (on Registry) from slice.xml"
1434 def sfa_discover(self):
1435 "discover resources into resouces_in.rspec"
1439 def sfa_create_slice(self):
1440 "run sfi.py create (on SM) - 1st time"
1444 def sfa_check_slice_plc(self):
1445 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1449 def sfa_update_slice(self):
1450 "run sfi.py create (on SM) on existing object"
1455 "various registry-related calls"
1459 def ssh_slice_sfa(self):
1460 "tries to ssh-enter the SFA slice"
1464 def sfa_delete_user(self):
1469 def sfa_delete_slice(self):
1470 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1475 self.run_in_guest('service sfa stop')==0
1478 def populate (self):
1479 "creates random entries in the PLCAPI"
1480 # install the stress-test in the plc image
1481 location = "/usr/share/plc_api/plcsh_stress_test.py"
1482 remote="%s/%s"%(self.vm_root_in_host(),location)
1483 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1485 command += " -- --preserve --short-names"
1486 local = (self.run_in_guest(command) == 0);
1487 # second run with --foreign
1488 command += ' --foreign'
1489 remote = (self.run_in_guest(command) == 0);
1490 return ( local and remote)
1492 def gather_logs (self):
1493 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1494 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1495 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1496 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1497 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1498 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1499 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1501 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1502 self.gather_var_logs ()
1504 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1505 self.gather_pgsql_logs ()
1507 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1508 self.gather_root_sfi ()
1510 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1511 for site_spec in self.plc_spec['sites']:
1512 test_site = TestSite (self,site_spec)
1513 for node_spec in site_spec['nodes']:
1514 test_node=TestNode(self,test_site,node_spec)
1515 test_node.gather_qemu_logs()
1517 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1518 self.gather_nodes_var_logs()
1520 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1521 self.gather_slivers_var_logs()
1524 def gather_slivers_var_logs(self):
1525 for test_sliver in self.all_sliver_objs():
1526 remote = test_sliver.tar_var_logs()
1527 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1528 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1529 utils.system(command)
1532 def gather_var_logs (self):
1533 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1534 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1535 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1536 utils.system(command)
1537 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1538 utils.system(command)
1540 def gather_pgsql_logs (self):
1541 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1542 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1543 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1544 utils.system(command)
1546 def gather_root_sfi (self):
1547 utils.system("mkdir -p logs/sfi.%s"%self.name())
1548 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1549 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1550 utils.system(command)
1552 def gather_nodes_var_logs (self):
1553 for site_spec in self.plc_spec['sites']:
1554 test_site = TestSite (self,site_spec)
1555 for node_spec in site_spec['nodes']:
1556 test_node=TestNode(self,test_site,node_spec)
1557 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1558 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1559 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1560 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1561 utils.system(command)
1564 # returns the filename to use for sql dump/restore, using options.dbname if set
1565 def dbfile (self, database):
1566 # uses options.dbname if it is found
1568 name=self.options.dbname
1569 if not isinstance(name,StringTypes):
1572 t=datetime.datetime.now()
1575 return "/root/%s-%s.sql"%(database,name)
1577 def plc_db_dump(self):
1578 'dump the planetlab5 DB in /root in the PLC - filename has time'
1579 dump=self.dbfile("planetab5")
1580 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1581 utils.header('Dumped planetlab5 database in %s'%dump)
1584 def plc_db_restore(self):
1585 'restore the planetlab5 DB - looks broken, but run -n might help'
1586 dump=self.dbfile("planetab5")
1587 ##stop httpd service
1588 self.run_in_guest('service httpd stop')
1589 # xxx - need another wrapper
1590 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1591 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1592 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1593 ##starting httpd service
1594 self.run_in_guest('service httpd start')
1596 utils.header('Database restored from ' + dump)
1598 def standby_1_through_20(self):
1599 """convenience function to wait for a specified number of minutes"""
1602 def standby_1(): pass
1604 def standby_2(): pass
1606 def standby_3(): pass
1608 def standby_4(): pass
1610 def standby_5(): pass
1612 def standby_6(): pass
1614 def standby_7(): pass
1616 def standby_8(): pass
1618 def standby_9(): pass
1620 def standby_10(): pass
1622 def standby_11(): pass
1624 def standby_12(): pass
1626 def standby_13(): pass
1628 def standby_14(): pass
1630 def standby_15(): pass
1632 def standby_16(): pass
1634 def standby_17(): pass
1636 def standby_18(): pass
1638 def standby_19(): pass
1640 def standby_20(): pass