# $Id$
import os, os.path
+import datetime
+import time
import sys
import xmlrpclib
import datetime
from TestNode import TestNode
from TestUser import TestUser
from TestKey import TestKey
+from TestSlice import TestSlice
+
+# inserts a backslash before each occurence of the following chars
+# \ " ' < > & | ; ( ) $ * ~ @
+def backslash_shell_specials (command):
+ result=''
+ for char in command:
+ if char in "\\\"'<>&|;()$*~@":
+ result +='\\'+char
+ else:
+ result +=char
+ return result
# step methods must take (self, options) and return a boolean
def __init__ (self,plc_spec):
self.plc_spec=plc_spec
- self.url="https://%s:443/PLCAPI/"%plc_spec['hostname']
- self.server=xmlrpclib.Server(self.url,allow_none=True)
self.path=os.path.dirname(sys.argv[0])
try:
self.vserverip=plc_spec['vserverip']
self.vservername=plc_spec['vservername']
+ self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
self.vserver=True
except:
self.vserver=False
+ self.url="https://%s:443/PLCAPI/"%plc_spec['hostname']
+ utils.header('Using API url %s'%self.url)
+ self.server=xmlrpclib.Server(self.url,allow_none=True)
def name(self):
name=self.plc_spec['name']
else:
return name+"[chroot]"
+ def is_local (self):
+ return self.plc_spec['hostname'] == 'localhost'
+
# define the API methods on this object through xmlrpc
# would help, but not strictly necessary
def connect (self):
pass
- # build the full command so command gets run in the chroot/vserver
- def run_command(self,command):
+ # command gets run in the chroot/vserver
+ def host_to_guest(self,command):
if self.vserver:
return "vserver %s exec %s"%(self.vservername,command)
else:
- return "chroot /plc/root %s"%command
+ return "chroot /plc/root %s"%backslash_shell_specials(command)
- def ssh_command(self,command):
- if self.plc_spec['hostname'] == "localhost":
+ # command gets run on the right box
+ def to_host(self,command):
+ if self.is_local():
return command
else:
- return "ssh " + self.plc_spec['hostname'] + " " + command
+ return "ssh %s %s"%(self.plc_spec['hostname'],backslash_shell_specials(command))
def full_command(self,command):
- return self.ssh_command(self.run_command(command))
+ return self.to_host(self.host_to_guest(command))
def run_in_guest (self,command):
return utils.system(self.full_command(command))
def run_in_host (self,command):
- return utils.system(self.ssh_command(command))
+ return utils.system(self.to_host(command))
# xxx quick n dirty
def run_in_guest_piped (self,local,remote):
- return utils.system(local+" | "+self.full_command(command))
+ return utils.system(local+" | "+self.full_command(remote))
+
+ # copy a file to the myplc root image - pass in_data=True if the file must go in /plc/data
+ def copy_in_guest (self, localfile, remotefile, in_data=False):
+ if in_data:
+ chroot_dest="/plc/data"
+ else:
+ chroot_dest="/plc/root"
+ if self.is_local():
+ if not self.vserver:
+ utils.system("cp %s %s/%s"%(localfile,chroot_dest,remotefile))
+ else:
+ utils.system("cp %s /vservers/%s/%s"%(localfile,self.vservername,remotefile))
+ else:
+ if not self.vserver:
+ utils.system("scp %s %s:%s/%s"%(localfile,self.plc_spec['hostname'],chroot_dest,remotefile))
+ else:
+ utils.system("scp %s %s@/vservers/%s/%s"%(localfile,self.plc_spec['hostname'],self.vservername,remotefile))
def auth_root (self):
return {'Username':self.plc_spec['PLC_ROOT_USER'],
return site
raise Exception,"Cannot locate site %s"%sitename
+ def locate_node (self,nodename):
+ for site in self.plc_spec['sites']:
+ for node in site['nodes']:
+ if node['node_fields']['hostname'] == nodename:
+ return (site,node)
+ raise Exception,"Cannot locate node %s"%nodename
+
def locate_key (self,keyname):
for key in self.plc_spec['keys']:
if key['name'] == keyname:
return key
raise Exception,"Cannot locate key %s"%keyname
-
- def kill_all_vmwares(self):
- utils.header('Killing any running vmware or vmplayer instance')
- utils.system('pgrep vmware | xargs -r kill')
- utils.system('pgrep vmplayer | xargs -r kill ')
- utils.system('pgrep vmware | xargs -r kill -9')
- utils.system('pgrep vmplayer | xargs -r kill -9')
-
+
+ #this to catch up all different hostboxes used in this plc
+ def locate_hostBoxes(self,site_spec):
+ #Get The first host box to avoid returning a long list with the same host box
+ #in case only one is used for all the nodes
+ HostBoxes=[site_spec['nodes'][0]['host_box']]
+ for node_spec in site_spec['nodes']:
+ if node_spec['host_box']!= HostBoxes[0]:
+ HostBoxes.append( node_spec['host_box'])
+
+ return HostBoxes
+
+ def kill_all_qemus(self):
+ for site_spec in self.plc_spec['sites']:
+ test_site = TestSite (self,site_spec)
+ hostboxes_list=self.locate_hostBoxes(site_spec)
+ if (hostboxes_list):
+ for node_spec in site_spec['nodes']:
+ TestNode(self,test_site,node_spec).stop_qemu(node_spec)
+ else:
+ utils.header("No emulated node running on this PLC config ignore the kill() step")
+
+ def clear_ssh_config (self,options):
+ # install local ssh_config file as root's .ssh/config - ssh should be quiet
+ # dir might need creation first
+ self.run_in_guest("mkdir /root/.ssh")
+ self.run_in_guest("chmod 700 /root/.ssh")
+ # this does not work - > redirection somehow makes it until an argument to cat
+ #self.run_in_guest_piped("cat ssh_config","cat > /root/.ssh/config")
+ self.copy_in_guest("ssh_config","/root/.ssh/config",True)
+ return True
+
#################### step methods
### uninstall
self.run_in_host('rpm -e myplc')
##### Clean up the /plc directory
self.run_in_host('rm -rf /plc/data')
+ ##### stop any running vservers
+ self.run_in_host('for vserver in $(ls /vservers/* | sed -e s,/vservers/,,) ; do vserver $vserver stop ; done')
return True
def uninstall_vserver(self,options):
return True
def uninstall(self,options):
+ # if there's a chroot-based myplc running, and then a native-based myplc is being deployed
+ # it sounds safer to have the former uninstalled too
+ # now the vserver method cannot be invoked for chroot instances as vservername is required
if self.vserver:
- return self.uninstall_vserver(options)
+ self.uninstall_vserver(options)
+ self.uninstall_chroot(options)
else:
- return self.uninstall_chroot(options)
+ self.uninstall_chroot(options)
+ return True
### install
def install_chroot(self,options):
- utils.header('Installing from %s'%options.myplc_url)
- url=options.myplc_url
- utils.system('rpm -Uvh '+url)
- utils.system('service plc mount')
+ # nothing to do
return True
# xxx this would not work with hostname != localhost as mylc-init-vserver was extracted locally
- def install_vserver_create(self,options):
+ def install_vserver(self,options):
# we need build dir for vtest-init-vserver
- build_dir=self.path+"/build"
- if not os.path.isdir(build_dir):
- if utils.system("svn checkout %s %s"%(options.build_url,build_dir)) != 0:
- raise Exception,"Cannot checkout build dir"
+ if self.is_local():
+ # a full path for the local calls
+ build_dir=self.path+"/build"
+ else:
+ # use a standard name - will be relative to HOME
+ build_dir="tests-system-build"
+ build_checkout = "svn checkout %s %s"%(options.build_url,build_dir)
+ if self.run_in_host(build_checkout) != 0:
+ raise Exception,"Cannot checkout build dir"
# the repo url is taken from myplc-url
# with the last two steps (i386/myplc...) removed
repo_url = options.myplc_url
repo_url = os.path.dirname(repo_url)
repo_url = os.path.dirname(repo_url)
- command="%s/vtest-init-vserver.sh %s %s -- --interface eth0:%s"%\
+ create_vserver="%s/vtest-init-vserver.sh %s %s -- --interface eth0:%s"%\
(build_dir,self.vservername,repo_url,self.vserverip)
- if utils.system(command) != 0:
+ if self.run_in_host(create_vserver) != 0:
raise Exception,"Could not create vserver for %s"%self.vservername
return True
- def install_vserver_native(self,options):
+ def install(self,options):
+ if self.vserver:
+ return self.install_vserver(options)
+ else:
+ return self.install_chroot(options)
+
+ ### install_rpm
+ def install_rpm_chroot(self,options):
+ utils.header('Installing from %s'%options.myplc_url)
+ url=options.myplc_url
+ self.run_in_host('rpm -Uvh '+url)
+ self.run_in_host('service plc mount')
+ return True
+
+ def install_rpm_vserver(self,options):
self.run_in_guest("yum -y install myplc-native")
return True
- def install(self,options):
+ def install_rpm(self,options):
if self.vserver:
- return self.install_vserver_create(options)
- return self.install_vserver_yum(options)
+ return self.install_rpm_vserver(options)
else:
- return self.install_chroot(options)
+ return self.install_rpm_chroot(options)
###
def configure(self,options):
fileconf.write('q\n')
fileconf.close()
utils.system('cat %s'%tmpname)
- self.run_in_guest('plc-config-tty < %s'%tmpname)
+ self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
utils.system('rm %s'%tmpname)
return True
self.server.AddNodeToNodeGroup(auth,node,nodegroupname)
return True
+ def all_hostnames (self) :
+ hostnames = []
+ for site_spec in self.plc_spec['sites']:
+ hostnames += [ node_spec['node_fields']['hostname'] \
+ for node_spec in site_spec['nodes'] ]
+ return hostnames
+
+ # gracetime : during the first <gracetime> minutes nothing gets printed
+ def do_check_nodesStatus (self, minutes, gracetime=2):
+ # compute timeout
+ timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
+ graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
+ # the nodes that haven't checked yet - start with a full list and shrink over time
+ tocheck = self.all_hostnames()
+ utils.header("checking nodes %r"%tocheck)
+ # create a dict hostname -> status
+ status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
+ while tocheck:
+ # get their status
+ tocheck_status=self.server.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
+ # update status
+ for array in tocheck_status:
+ hostname=array['hostname']
+ boot_state=array['boot_state']
+ if boot_state == 'boot':
+ utils.header ("%s has reached the 'boot' state"%hostname)
+ else:
+ # if it's a real node, never mind
+ (site_spec,node_spec)=self.locate_node(hostname)
+ if TestNode.is_real_model(node_spec['node_fields']['model']):
+ utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
+ # let's cheat
+ boot_state = 'boot'
+ if datetime.datetime.now() > graceout:
+ utils.header ("%s still in '%s' state"%(hostname,boot_state))
+ graceout=datetime.datetime.now()+datetime.timedelta(1)
+ status[hostname] = boot_state
+ # refresh tocheck
+ tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != 'boot' ]
+ if not tocheck:
+ return True
+ if datetime.datetime.now() > timeout:
+ for hostname in tocheck:
+ utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
+ return False
+ # otherwise, sleep for a while
+ time.sleep(15)
+ # only useful in empty plcs
+ return True
+
+ def check_nodesStatus(self,options):
+ return self.do_check_nodesStatus(minutes=5)
+
+ #to scan and store the nodes's public keys and avoid to ask for confirmation when ssh
+ def scan_publicKeys(self,hostnames):
+ try:
+ temp_knownhosts="/root/known_hosts"
+ remote_knownhosts="/root/.ssh/known_hosts"
+ self.run_in_host("touch %s"%temp_knownhosts )
+ for hostname in hostnames:
+ utils.header("Scan Public %s key and store it in the known_host file(under the root image) "%hostname)
+ scan=self.run_in_host('ssh-keyscan -t rsa %s >> %s '%(hostname,temp_knownhosts))
+ #Store the public keys in the right root image
+ self.copy_in_guest(temp_knownhosts,remote_knownhosts,True)
+ #clean the temp keys file used
+ self.run_in_host('rm -f %s '%temp_knownhosts )
+ except Exception, err:
+ print err
+
+ def do_check_nodesSsh(self,minutes):
+ # compute timeout
+ timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
+ #graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
+ tocheck = self.all_hostnames()
+ self.scan_publicKeys(tocheck)
+ utils.header("checking Connectivity on nodes %r"%tocheck)
+ while tocheck:
+ for hostname in tocheck:
+ # try to ssh in nodes
+ access=self.run_in_guest('ssh -i /etc/planetlab/root_ssh_key.rsa root@%s date'%hostname )
+ if (not access):
+ utils.header('The node %s is sshable -->'%hostname)
+ # refresh tocheck
+ tocheck.remove(hostname)
+ else:
+ (site_spec,node_spec)=self.locate_node(hostname)
+ if TestNode.is_real_model(node_spec['node_fields']['model']):
+ utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
+ tocheck.remove(hostname)
+ if not tocheck:
+ return True
+ if datetime.datetime.now() > timeout:
+ for hostname in tocheck:
+ utils.header("FAILURE to ssh into %s"%hostname)
+ return False
+ # otherwise, sleep for a while
+ time.sleep(15)
+ # only useful in empty plcs
+ return True
+
+ def check_nodesConnectivity(self, options):
+ return self.do_check_nodesSsh(minutes=2)
+
+ def standby(self,options):
+ #Method for waiting a while when nodes are booting and being sshable,giving time to NM to be up
+ utils.header('Entering in StanbdBy mode for 10min at %s'%datetime.datetime.now())
+ time.sleep(600)
+ utils.header('Exist StandBy mode at %s'%datetime.datetime.now())
+ return True
+
def bootcd (self, options):
for site_spec in self.plc_spec['sites']:
test_site = TestSite (self,site_spec)
test_node=TestNode (self,test_site,node_spec)
test_node.create_boot_cd(options.path)
return True
-
+
def initscripts (self, options):
for initscript in self.plc_spec['initscripts']:
utils.show_spec('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
def clean_slices (self, options):
return self.do_slices("delete")
- ### would need a TestSlice class
- def do_slices (self, add_or_delete="add"):
+ def do_slices (self, action="add"):
for slice in self.plc_spec['slices']:
site_spec = self.locate_site (slice['sitename'])
test_site = TestSite(self,site_spec)
- owner_spec = test_site.locate_user(slice['owner'])
- auth = TestUser(self,test_site,owner_spec).auth()
- slice_fields = slice['slice_fields']
- slice_name = slice_fields['name']
- if (add_or_delete == "delete"):
- self.server.DeleteSlice(auth,slice_fields['name'])
- utils.header("Deleted slice %s"%slice_fields['name'])
- continue
- utils.show_spec("Creating slice",slice_fields)
- self.server.AddSlice(auth,slice_fields)
- utils.header('Created Slice %s'%slice_fields['name'])
- for username in slice['usernames']:
- user_spec=test_site.locate_user(username)
- test_user=TestUser(self,test_site,user_spec)
- self.server.AddPersonToSlice(auth, test_user.name(), slice_name)
-
- hostnames=[]
- for nodename in slice['nodenames']:
- node_spec=test_site.locate_node(nodename)
- test_node=TestNode(self,test_site,node_spec)
- hostnames += [test_node.name()]
- utils.header("Adding %r in %s"%(hostnames,slice_name))
- self.server.AddSliceToNodes(auth, slice_name, hostnames)
- if slice.has_key('initscriptname'):
- isname=slice['initscriptname']
- utils.header("Adding initscript %s in %s"%(isname,slice_name))
- self.server.AddSliceAttribute(self.auth_root(), slice_name,
- 'initscript',isname)
+ test_slice=TestSlice(self,test_site,slice)
+ if action != "add":
+ utils.header("Deleting slices in site %s"%test_site.name())
+ test_slice.delete_slice()
+ else:
+ utils.show_spec("Creating slice",slice)
+ test_slice.create_slice()
+ utils.header('Created Slice %s'%slice['slice_fields']['name'])
return True
+ def check_slices(self, options):
+ for slice_spec in self.plc_spec['slices']:
+ site_spec = self.locate_site (slice_spec['sitename'])
+ test_site = TestSite(self,site_spec)
+ test_slice=TestSlice(self,test_site,slice_spec)
+ status=test_slice.do_check_slice(options)
+ return status
+
def start_nodes (self, options):
- self.kill_all_vmwares()
- utils.header("Starting vmware nodes")
+ self.kill_all_qemus()
+ utils.header("Starting nodes")
for site_spec in self.plc_spec['sites']:
TestSite(self,site_spec).start_nodes (options)
return True
def stop_nodes (self, options):
- self.kill_all_vmwares ()
+ self.kill_all_qemus()
return True
# returns the filename to use for sql dump/restore, using options.dbname if set