9 from types import StringTypes
12 from TestSite import TestSite
13 from TestNode import TestNode
14 from TestUser import TestUser
15 from TestKey import TestKey
16 from TestSlice import TestSlice
17 from TestSliver import TestSliver
18 from TestBox import TestBox
19 from TestSsh import TestSsh
21 # step methods must take (self) and return a boolean (options is a member of the class)
24 utils.header('Entering StandBy for %d mn'%minutes)
25 time.sleep(60*minutes)
28 def standby_generic (func):
30 minutes=int(func.__name__.split("_")[1])
31 return standby(minutes)
36 def __init__ (self,plc_spec,options):
37 self.plc_spec=plc_spec
39 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
41 self.vserverip=plc_spec['vserverip']
42 self.vservername=plc_spec['vservername']
43 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
47 self.url="https://%s:443/PLCAPI/"%plc_spec['hostname']
48 # utils.header('Using API url %s'%self.url)
49 self.server=xmlrpclib.Server(self.url,allow_none=True)
52 name=self.plc_spec['name']
54 return name+"[%s]"%self.vservername
56 return name+"[chroot]"
59 return self.plc_spec['hostname']
62 return self.test_ssh.is_local()
64 # define the API methods on this object through xmlrpc
65 # would help, but not strictly necessary
69 def run_in_guest (self,command):
70 return self.test_ssh.run(self.host_to_guest(command))
72 def run_in_host (self,command):
73 return self.test_ssh.run_in_buildname(command)
75 #command gets run in the chroot/vserver
76 def host_to_guest(self,command):
78 return "vserver %s exec %s"%(self.vservername,command)
80 return "chroot /plc/root %s"%TestSsh.backslash_shell_specials(command)
82 # copy a file to the myplc root image - pass in_data=True if the file must go in /plc/data
83 def copy_in_guest (self, localfile, remotefile, in_data=False):
85 chroot_dest="/plc/data"
87 chroot_dest="/plc/root"
90 utils.system("cp %s %s/%s"%(localfile,chroot_dest,remotefile))
92 utils.system("cp %s /vservers/%s/%s"%(localfile,self.vservername,remotefile))
95 utils.system("scp %s %s:%s/%s"%(localfile,self.hostname(),chroot_dest,remotefile))
97 utils.system("scp %s %s@/vservers/%s/%s"%(localfile,self.hostname(),self.vservername,remotefile))
101 def run_in_guest_piped (self,local,remote):
102 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote)))
104 def auth_root (self):
105 return {'Username':self.plc_spec['PLC_ROOT_USER'],
106 'AuthMethod':'password',
107 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
108 'Role' : self.plc_spec['role']
110 def locate_site (self,sitename):
111 for site in self.plc_spec['sites']:
112 if site['site_fields']['name'] == sitename:
114 if site['site_fields']['login_base'] == sitename:
116 raise Exception,"Cannot locate site %s"%sitename
118 def locate_node (self,nodename):
119 for site in self.plc_spec['sites']:
120 for node in site['nodes']:
121 if node['name'] == nodename:
123 raise Exception,"Cannot locate node %s"%nodename
125 def locate_hostname (self,hostname):
126 for site in self.plc_spec['sites']:
127 for node in site['nodes']:
128 if node['node_fields']['hostname'] == hostname:
130 raise Exception,"Cannot locate hostname %s"%hostname
132 def locate_key (self,keyname):
133 for key in self.plc_spec['keys']:
134 if key['name'] == keyname:
136 raise Exception,"Cannot locate key %s"%keyname
138 def locate_slice (self, slicename):
139 for slice in self.plc_spec['slices']:
140 if slice['slice_fields']['name'] == slicename:
142 raise Exception,"Cannot locate slice %s"%slicename
144 # all different hostboxes used in this plc
145 def gather_hostBoxes(self):
146 # maps on sites and nodes, return [ (host_box,test_node) ]
148 for site_spec in self.plc_spec['sites']:
149 test_site = TestSite (self,site_spec)
150 for node_spec in site_spec['nodes']:
151 test_node = TestNode (self, test_site, node_spec)
152 if not test_node.is_real():
153 tuples.append( (test_node.host_box(),test_node) )
154 # transform into a dict { 'host_box' -> [ hostnames .. ] }
156 for (box,node) in tuples:
157 if not result.has_key(box):
160 result[box].append(node)
163 # a step for checking this stuff
164 def show_boxes (self):
165 for (box,nodes) in self.gather_hostBoxes().iteritems():
166 print box,":"," + ".join( [ node.name() for node in nodes ] )
169 # make this a valid step
170 def kill_all_qemus(self):
171 for (box,nodes) in self.gather_hostBoxes().iteritems():
172 # this is the brute force version, kill all qemus on that host box
173 TestBox(box,self.options.buildname).kill_all_qemus()
176 # make this a valid step
177 def list_all_qemus(self):
178 for (box,nodes) in self.gather_hostBoxes().iteritems():
179 # this is the brute force version, kill all qemus on that host box
180 TestBox(box,self.options.buildname).list_all_qemus()
183 # kill only the right qemus
184 def list_qemus(self):
185 for (box,nodes) in self.gather_hostBoxes().iteritems():
186 # the fine-grain version
191 # kill only the right qemus
192 def kill_qemus(self):
193 for (box,nodes) in self.gather_hostBoxes().iteritems():
194 # the fine-grain version
199 #################### step methods
202 def uninstall_chroot(self):
203 self.run_in_host('service plc safestop')
204 #####detecting the last myplc version installed and remove it
205 self.run_in_host('rpm -e myplc')
206 ##### Clean up the /plc directory
207 self.run_in_host('rm -rf /plc/data')
208 ##### stop any running vservers
209 self.run_in_host('for vserver in $(ls -d /vservers/* | sed -e s,/vservers/,,) ; do case $vserver in vtest*) echo Shutting down vserver $vserver ; vserver $vserver stop ;; esac ; done')
212 def uninstall_vserver(self):
213 self.run_in_host("vserver --silent %s delete"%self.vservername)
217 # if there's a chroot-based myplc running, and then a native-based myplc is being deployed
218 # it sounds safer to have the former uninstalled too
219 # now the vserver method cannot be invoked for chroot instances as vservername is required
221 self.uninstall_vserver()
222 self.uninstall_chroot()
224 self.uninstall_chroot()
228 def install_chroot(self):
232 def install_vserver(self):
233 # we need build dir for vtest-init-vserver
235 # a full path for the local calls
236 build_dir=os.path(sys.argv[0])+"/build"
238 # use a standard name - will be relative to HOME
239 build_dir="options.buildname"
240 # run checkout in any case - would do an update if already exists
241 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
242 if self.run_in_host(build_checkout) != 0:
243 raise Exception,"Cannot checkout build dir"
244 # the repo url is taken from myplc-url
245 # with the last two steps (i386/myplc...) removed
246 repo_url = self.options.myplc_url
247 for level in [ 'rpmname','arch' ]:
248 repo_url = os.path.dirname(repo_url)
249 create_vserver="%s/vtest-init-vserver.sh %s %s -- --interface eth0:%s"%\
250 (build_dir,self.vservername,repo_url,self.vserverip)
251 if self.run_in_host(create_vserver) != 0:
252 raise Exception,"Could not create vserver for %s"%self.vservername
257 return self.install_vserver()
259 return self.install_chroot()
262 def cache_rpm(self,url,rpm):
263 cache_fetch="pwd;if [ -f %(rpm)s ] ; then echo Using cached rpm %(rpm)s ; else echo Fetching %(url)s ; curl -O %(url)s; fi"%locals()
264 id = self.run_in_host(cache_fetch)
266 raise Exception,"Could not get rpm from %s"%url
268 def install_rpm_chroot(self):
269 url = self.options.myplc_url
270 rpm = os.path.basename(url)
271 self.cache_rpm(url,rpm)
272 utils.header('Installing the : %s'%rpm)
273 self.run_in_host('rpm -Uvh '+rpm)
274 self.run_in_host('service plc mount')
277 def install_rpm_vserver(self):
278 self.run_in_guest("yum -y install myplc-native")
281 def install_rpm(self):
283 return self.install_rpm_vserver()
285 return self.install_rpm_chroot()
289 tmpname='%s.plc-config-tty'%(self.name())
290 fileconf=open(tmpname,'w')
291 for var in [ 'PLC_NAME',
295 'PLC_MAIL_SUPPORT_ADDRESS',
302 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
303 fileconf.write('w\n')
304 fileconf.write('q\n')
306 utils.system('cat %s'%tmpname)
307 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
308 utils.system('rm %s'%tmpname)
311 # the chroot install is slightly different to this respect
314 self.run_in_guest('service plc start')
316 self.run_in_host('service plc start')
321 self.run_in_guest('service plc stop')
323 self.run_in_host('service plc stop')
326 # could use a TestKey class
327 def store_keys(self):
328 for key_spec in self.plc_spec['keys']:
329 TestKey(self,key_spec).store_key()
332 def clean_keys(self):
333 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
336 return self.do_sites()
338 def clean_sites (self):
339 return self.do_sites(action="delete")
341 def do_sites (self,action="add"):
342 for site_spec in self.plc_spec['sites']:
343 test_site = TestSite (self,site_spec)
344 if (action != "add"):
345 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
346 test_site.delete_site()
347 # deleted with the site
348 #test_site.delete_users()
351 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
352 test_site.create_site()
353 test_site.create_users()
357 return self.do_nodes()
358 def clean_nodes (self):
359 return self.do_nodes(action="delete")
361 def do_nodes (self,action="add"):
362 for site_spec in self.plc_spec['sites']:
363 test_site = TestSite (self,site_spec)
365 utils.header("Deleting nodes in site %s"%test_site.name())
366 for node_spec in site_spec['nodes']:
367 test_node=TestNode(self,test_site,node_spec)
368 utils.header("Deleting %s"%test_node.name())
369 test_node.delete_node()
371 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
372 for node_spec in site_spec['nodes']:
373 utils.pprint('Creating node %s'%node_spec,node_spec)
374 test_node = TestNode (self,test_site,node_spec)
375 test_node.create_node ()
378 # create nodegroups if needed, and populate
379 # no need for a clean_nodegroups if we are careful enough
380 def nodegroups (self):
381 # 1st pass to scan contents
383 for site_spec in self.plc_spec['sites']:
384 test_site = TestSite (self,site_spec)
385 for node_spec in site_spec['nodes']:
386 test_node=TestNode (self,test_site,node_spec)
387 if node_spec.has_key('nodegroups'):
388 nodegroupnames=node_spec['nodegroups']
389 if isinstance(nodegroupnames,StringTypes):
390 nodegroupnames = [ nodegroupnames ]
391 for nodegroupname in nodegroupnames:
392 if not groups_dict.has_key(nodegroupname):
393 groups_dict[nodegroupname]=[]
394 groups_dict[nodegroupname].append(test_node.name())
395 auth=self.auth_root()
396 for (nodegroupname,group_nodes) in groups_dict.iteritems():
398 self.server.GetNodeGroups(auth,{'name':nodegroupname})[0]
400 self.server.AddNodeGroup(auth,{'name':nodegroupname})
401 for node in group_nodes:
402 self.server.AddNodeToNodeGroup(auth,node,nodegroupname)
405 def all_hostnames (self) :
407 for site_spec in self.plc_spec['sites']:
408 hostnames += [ node_spec['node_fields']['hostname'] \
409 for node_spec in site_spec['nodes'] ]
412 # gracetime : during the first <gracetime> minutes nothing gets printed
413 def do_nodes_booted (self, minutes, gracetime=2):
415 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
416 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
417 # the nodes that haven't checked yet - start with a full list and shrink over time
418 tocheck = self.all_hostnames()
419 utils.header("checking nodes %r"%tocheck)
420 # create a dict hostname -> status
421 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
424 tocheck_status=self.server.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
426 for array in tocheck_status:
427 hostname=array['hostname']
428 boot_state=array['boot_state']
429 if boot_state == 'boot':
430 utils.header ("%s has reached the 'boot' state"%hostname)
432 # if it's a real node, never mind
433 (site_spec,node_spec)=self.locate_hostname(hostname)
434 if TestNode.is_real_model(node_spec['node_fields']['model']):
435 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
438 if datetime.datetime.now() > graceout:
439 utils.header ("%s still in '%s' state"%(hostname,boot_state))
440 graceout=datetime.datetime.now()+datetime.timedelta(1)
441 status[hostname] = boot_state
443 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != 'boot' ]
446 if datetime.datetime.now() > timeout:
447 for hostname in tocheck:
448 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
450 # otherwise, sleep for a while
452 # only useful in empty plcs
455 def nodes_booted(self):
456 return self.do_nodes_booted(minutes=0)
458 # #to scan and store the nodes's public keys and avoid to ask for confirmation when ssh
459 # def scan_publicKeys(self,hostnames):
461 # temp_knownhosts="/root/known_hosts"
462 # remote_knownhosts="/root/.ssh/known_hosts"
463 # self.run_in_host("touch %s"%temp_knownhosts )
464 # for hostname in hostnames:
465 # utils.header("Scan Public %s key and store it in the known_host file(under the root image) "%hostname)
466 # scan=self.run_in_host('ssh-keyscan -t rsa %s >> %s '%(hostname,temp_knownhosts))
467 # #Store the public keys in the right root image
468 # self.copy_in_guest(temp_knownhosts,remote_knownhosts,True)
469 # #clean the temp keys file used
470 # self.run_in_host('rm -f %s '%temp_knownhosts )
471 # except Exception, err:
474 def do_check_nodesSsh(self,minutes):
476 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
477 tocheck = self.all_hostnames()
478 # self.scan_publicKeys(tocheck)
479 utils.header("checking Connectivity on nodes %r"%tocheck)
481 for hostname in tocheck:
482 # try to ssh in nodes
483 node_test_ssh = TestSsh (hostname)
484 access=self.run_in_guest(node_test_ssh.actual_command("date"))
486 utils.header('The node %s is sshable -->'%hostname)
488 tocheck.remove(hostname)
490 # we will have tried real nodes once, in case they're up - but if not, just skip
491 (site_spec,node_spec)=self.locate_hostname(hostname)
492 if TestNode.is_real_model(node_spec['node_fields']['model']):
493 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
494 tocheck.remove(hostname)
497 if datetime.datetime.now() > timeout:
498 for hostname in tocheck:
499 utils.header("FAILURE to ssh into %s"%hostname)
501 # otherwise, sleep for a while
503 # only useful in empty plcs
507 return self.do_check_nodesSsh(minutes=2)
510 for site_spec in self.plc_spec['sites']:
511 test_site = TestSite (self,site_spec)
512 for node_spec in site_spec['nodes']:
513 test_node=TestNode (self,test_site,node_spec)
514 test_node.prepare_area()
515 test_node.create_boot_cd()
516 test_node.configure_qemu()
519 def do_check_initscripts(self):
520 for site_spec in self.plc_spec['sites']:
521 test_site = TestSite (self,site_spec)
522 test_node = TestNode (self,test_site,site_spec['nodes'])
523 for slice_spec in self.plc_spec['slices']:
524 test_slice=TestSlice (self,test_site,slice_spec)
525 test_sliver=TestSliver(self,test_node,test_slice)
526 init_status=test_sliver.get_initscript(slice_spec)
527 if (not init_status):
531 def check_initscripts(self):
532 return self.do_check_initscripts()
534 def initscripts (self):
535 for initscript in self.plc_spec['initscripts']:
536 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
537 self.server.AddInitScript(self.auth_root(),initscript['initscript_fields'])
541 return self.do_slices()
543 def clean_slices (self):
544 return self.do_slices("delete")
546 def do_slices (self, action="add"):
547 for slice in self.plc_spec['slices']:
548 site_spec = self.locate_site (slice['sitename'])
549 test_site = TestSite(self,site_spec)
550 test_slice=TestSlice(self,test_site,slice)
552 utils.header("Deleting slices in site %s"%test_site.name())
553 test_slice.delete_slice()
555 utils.pprint("Creating slice",slice)
556 test_slice.create_slice()
557 utils.header('Created Slice %s'%slice['slice_fields']['name'])
560 def check_slices(self):
561 for slice_spec in self.plc_spec['slices']:
562 site_spec = self.locate_site (slice_spec['sitename'])
563 test_site = TestSite(self,site_spec)
564 test_slice=TestSlice(self,test_site,slice_spec)
565 status=test_slice.do_check_slice(self.options)
570 def start_nodes (self):
571 utils.header("Starting nodes")
572 for site_spec in self.plc_spec['sites']:
573 TestSite(self,site_spec).start_nodes (self.options)
576 def gather_all_logs (self):
577 # (1) get the plc's /var/log and store it locally in logs/<plcname>-var-log/*
578 # (2) get all the nodes qemu log and store it as logs/<node>-qemu.log
579 # (3) get the nodes /var/log and store is as logs/<node>-var-log/*
580 # (4) as far as possible get the slice's /var/log as logs/<slice>-<node>-var-log/*
584 for site_spec in self.plc_spec['sites']:
585 test_site = TestSite (self,site_spec)
586 for node_spec in site_spec['nodes']:
587 TestNode(self,test_site,node_spec).gather_qemu_logs()
588 TestNode(self,test_site,node_spec).gather_var_logs()
591 def gather_logs (self):
592 utils.header("WARNING - Incomplete logs gathering TestPlc.gather_logs")
595 def check_tcp (self):
596 specs = self.plc_spec['tcp_test']
599 utils.header ("WARNING : xxx check_tcp is underway, spec=%r"%spec)
602 (s_site,s_node) = self.locate_node(spec['server_node'])
603 s_slice = self.locate_slice (spec['server_slice'])
605 s_test_site = TestSite (self, s_site)
606 s_test_node = TestNode (self, s_test_site,s_node)
607 # xxx the slice site is assumed to be the node site - mhh
608 s_test_slice = TestSlice (self, s_test_site, s_slice)
609 s_test_sliver = TestSliver (self, s_test_node, s_test_slice)
610 if not s_test_sliver.run_tcp_server(port):
614 # idem for the client side
615 (c_site,c_node) = self.locate_node(spec['server_node'])
616 c_slice = self.locate_slice (spec['server_slice'])
618 c_test_site = TestSite (self, c_site)
619 c_test_node = TestNode (self, c_test_site,c_node)
620 # xxx the slice site is assumed to be the node site - mhh
621 c_test_slice = TestSlice (self, c_test_site, c_slice)
622 c_test_sliver = TestSliver (self, c_test_node, c_test_slice)
623 if not c_test_sliver.run_tcp_client(s_test_node.name(),port):
629 # returns the filename to use for sql dump/restore, using options.dbname if set
630 def dbfile (self, database):
631 # uses options.dbname if it is found
633 name=self.options.dbname
634 if not isinstance(name,StringTypes):
637 t=datetime.datetime.now()
640 return "/root/%s-%s.sql"%(database,name)
643 dump=self.dbfile("planetab4")
644 self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
645 utils.header('Dumped planetlab4 database in %s'%dump)
648 def db_restore(self):
649 dump=self.dbfile("planetab4")
651 self.run_in_guest('service httpd stop')
652 # xxx - need another wrapper
653 self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
654 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
655 self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
656 ##starting httpd service
657 self.run_in_guest('service httpd start')
659 utils.header('Database restored from ' + dump)
662 def standby_1(): pass
664 def standby_2(): pass
666 def standby_3(): pass
668 def standby_4(): pass
670 def standby_5(): pass
672 def standby_6(): pass
674 def standby_7(): pass
676 def standby_8(): pass
678 def standby_9(): pass
680 def standby_10(): pass
682 def standby_11(): pass
684 def standby_12(): pass
686 def standby_13(): pass
688 def standby_14(): pass
690 def standby_15(): pass
692 def standby_16(): pass
694 def standby_17(): pass
696 def standby_18(): pass
698 def standby_19(): pass
700 def standby_20(): pass