return actual
def node_mapper (method):
- def actual(self):
+ def actual(self,*args, **kwds):
overall=True
node_method = TestNode.__dict__[method.__name__]
- for site_spec in self.plc_spec['sites']:
- test_site = TestSite (self,site_spec)
- for node_spec in site_spec['nodes']:
- test_node = TestNode (self,test_site,node_spec)
- if not node_method(test_node): overall=False
+ for test_node in self.all_nodes():
+ if not node_method(test_node, *args, **kwds): overall=False
return overall
# restore the doc text
actual.__doc__=method.__doc__
'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
- 'check_tcp', SEP,
+ 'check_tcp', 'check_sys_slice', SEP,
'force_gather_logs', SEP,
]
other_steps = [
self.plc_spec=plc_spec
self.options=options
self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
- try:
- self.vserverip=plc_spec['vserverip']
- self.vservername=plc_spec['vservername']
- self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
- self.vserver=True
- except:
- raise Exception,'chroot-based myplc testing is deprecated'
+ self.vserverip=plc_spec['vserverip']
+ self.vservername=plc_spec['vservername']
+ self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
self.apiserver=TestApiserver(self.url,options.dry_run)
+ def has_addresses_api (self):
+ return hasattr(self.apiserver,'AddIpAddress')
+
def name(self):
name=self.plc_spec['name']
return "%s.%s"%(name,self.vservername)
def run_in_host (self,command):
return self.test_ssh.run_in_buildname(command)
- #command gets run in the vserver
+ #command gets run in the plc's vm
def host_to_guest(self,command):
- return "vserver %s exec %s"%(self.vservername,command)
+ if self.options.plcs_use_lxc:
+ return "ssh -o StrictHostKeyChecking=no %s %s"%(self.vserverip,command)
+ else:
+ return "vserver %s exec %s"%(self.vservername,command)
+ def vm_root_in_host(self):
+ if self.options.plcs_use_lxc:
+ return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
+ else:
+ return "/vservers/%s"%(self.vservername)
+
+ def vm_timestamp_path (self):
+ if self.options.plcs_use_lxc:
+ return "/var/lib/lxc/%s/%s.timestamp"%(self.vservername,self.vservername)
+ else:
+ return "/vservers/%s.timestamp"%(self.vservername)
+
#start/stop the vserver
def start_guest_in_host(self):
- return "vserver %s start"%(self.vservername)
+ if self.options.plcs_use_lxc:
+ return "lxc-start --daemon --name=%s"%(self.vservername)
+ else:
+ return "vserver %s start"%(self.vservername)
def stop_guest_in_host(self):
- return "vserver %s stop"%(self.vservername)
+ if self.options.plcs_use_lxc:
+ return "lxc-stop --name=%s"%(self.vservername)
+ else:
+ return "vserver %s stop"%(self.vservername)
# xxx quick n dirty
def run_in_guest_piped (self,local,remote):
#################### display config
def show (self):
"show test configuration after localization"
- self.display_pass (1)
- self.display_pass (2)
+ self.show_pass (1)
+ self.show_pass (2)
return True
def export (self):
"print cut'n paste-able stuff to export env variables to your shell"
- # these work but the shell prompt does not get displayed..
- command1="ssh %s vserver %s enter"%(self.plc_spec['host_box'],self.plc_spec['vservername'])
- command2="ssh root@%s %s"%(socket.gethostname(),command1)
# guess local domain from hostname
domain=socket.gethostname().split('.',1)[1]
fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
print "export BUILD=%s"%self.options.buildname
- print "export PLCHOST=%s"%fqdn
- print "export GUEST=%s"%self.plc_spec['vservername']
+ if self.options.plcs_use_lxc:
+ print "export PLCHOSTLXC=%s"%fqdn
+ else:
+ print "export PLCHOSTVS=%s"%fqdn
+ print "export GUESTNAME=%s"%self.plc_spec['vservername']
+ vplcname=self.plc_spec['vservername'].split('-')[-1]
+ print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
# find hostname of first node
(hostname,qemubox) = self.all_node_infos()[0]
print "export KVMHOST=%s.%s"%(qemubox,domain)
# entry point
always_display_keys=['PLC_WWW_HOST','nodes','sites',]
- def display_pass (self,passno):
+ def show_pass (self,passno):
for (key,val) in self.plc_spec.iteritems():
if not self.options.verbose and key not in TestPlc.always_display_keys: continue
if passno == 2:
@staticmethod
def display_mapping_plc (plc_spec):
print '+ MyPLC',plc_spec['name']
+ # WARNING this would not be right for lxc-based PLC's - should be harmless though
print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
for site_spec in plc_spec['sites']:
# cannot be inside the vserver, that causes vserver .. build to cough
def timestamp_vs (self):
now=int(time.time())
- return utils.system(self.test_ssh.actual_command("echo %d > /vservers/%s.timestamp"%(now,self.vservername)))==0
+ # TODO-lxc check this one
+ # a first approx. is to store the timestamp close to the VM root like vs does
+ stamp_path=self.vm_timestamp_path ()
+ stamp_dir = os.path.dirname (stamp_path)
+ utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
+ return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
-# def local_pre (self):
-# "run site-dependant pre-test script as defined in LocalTestResources"
-# from LocalTestResources import local_resources
-# return local_resources.step_pre(self)
-#
-# def local_post (self):
-# "run site-dependant post-test script as defined in LocalTestResources"
-# from LocalTestResources import local_resources
-# return local_resources.step_post(self)
-#
-# def local_list (self):
-# "run site-dependant list script as defined in LocalTestResources"
-# from LocalTestResources import local_resources
-# return local_resources.step_list(self)
-#
-# def local_rel (self):
-# "run site-dependant release script as defined in LocalTestResources"
-# from LocalTestResources import local_resources
-# return local_resources.step_release(self)
-#
-# def local_rel_plc (self):
-# "run site-dependant release script as defined in LocalTestResources"
-# from LocalTestResources import local_resources
-# return local_resources.step_release_plc(self)
-#
-# def local_rel_qemu (self):
-# "run site-dependant release script as defined in LocalTestResources"
-# from LocalTestResources import local_resources
-# return local_resources.step_release_qemu(self)
-#
+ # this is called inconditionnally at the beginning of the test sequence
+ # just in case this is a rerun, so if the vm is not running it's fine
def vs_delete(self):
"vserver delete the test myplc"
- self.run_in_host("vserver --silent %s delete"%self.vservername)
- self.run_in_host("rm -f /vservers/%s.timestamp"%self.vservername)
- return True
+ stamp_path=self.vm_timestamp_path()
+ self.run_in_host("rm -f %s"%stamp_path)
+ if self.options.plcs_use_lxc:
+ self.run_in_host("lxc-stop --name %s"%self.vservername)
+ self.run_in_host("lxc-destroy --name %s"%self.vservername)
+ return True
+ else:
+ self.run_in_host("vserver --silent %s delete"%self.vservername)
+ return True
### install
# historically the build was being fetched by the tests
test_env_options += " -p %s"%self.options.personality
test_env_options += " -d %s"%self.options.pldistro
test_env_options += " -f %s"%self.options.fcdistro
- script="vtest-init-vserver.sh"
+ if self.options.plcs_use_lxc:
+ script="vtest-init-lxc.sh"
+ else:
+ script="vtest-init-vserver.sh"
vserver_name = self.vservername
vserver_options="--netdev eth0 --interface %s"%self.vserverip
try:
pkgs_list.append ("slicerepo-%s"%nodefamily)
pkgs_list.append ("myplc")
pkgs_list.append ("noderepo-%s"%nodefamily)
- pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
+ pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
pkgs_string=" ".join(pkgs_list)
return self.yum_install (pkgs_list)
if not os.path.isdir(dir):
os.mkdir(dir)
vservername=self.vservername
+ vm_root=self.vm_root_in_host()
overall=True
prefix = 'debug_ssh_key'
for ext in [ 'pub', 'rsa' ] :
- src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
+ src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
dst="keys/%(vservername)s-debug.%(ext)s"%locals()
if self.test_ssh.fetch(src,dst) != 0: overall=False
return overall
overall=False
return overall
+ # a list of TestNode objs
+ def all_nodes (self):
+ nodes=[]
+ for site_spec in self.plc_spec['sites']:
+ test_site = TestSite (self,site_spec)
+ for node_spec in site_spec['nodes']:
+ nodes.append(TestNode (self,test_site,node_spec))
+ return nodes
+
# return a list of tuples (nodename,qemuname)
def all_node_infos (self) :
node_infos = []
return True
def nodes_booted(self):
- return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
+ return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
# compute timeout
def ssh_node_debug(self):
"Tries to ssh into nodes in debug mode with the debug ssh key"
- return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
+ return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=8)
def ssh_node_boot(self):
"Tries to ssh into nodes in production mode with the root ssh key"
- return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
+ return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38)
@node_mapper
def qemu_local_init (self):
overall=False
return overall
+ # painfully enough, we need to allow for some time as netflow might show up last
+ def check_sys_slice (self):
+ "all nodes: check that a system slice is alive"
+# would probably make more sense to check for netflow,
+# but that one is currently not working in the lxc distro
+# return self.check_systemslice ('netflow')
+ return self.check_systemslice ('drl')
+
+ # we have the slices up already here, so it should not take too long
+ def check_systemslice (self, slicename, timeout_minutes=5, period=15):
+ timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
+ test_nodes=self.all_nodes()
+ while test_nodes:
+ for test_node in test_nodes:
+ if test_node.check_systemslice (slicename,dry_run=self.options.dry_run):
+ utils.header ("ok")
+ test_nodes.remove(test_node)
+ else:
+ print '.',
+ if not test_nodes:
+ return True
+ if datetime.datetime.now () > timeout:
+ for test_node in test_nodes:
+ utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
+ return False
+ time.sleep(period)
+ return True
+
def plcsh_stress_test (self):
"runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
# install the stress-test in the plc image
location = "/usr/share/plc_api/plcsh_stress_test.py"
- remote="/vservers/%s/%s"%(self.vservername,location)
+ remote="%s/%s"%(self.vm_root_in_host(),location)
self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
command = location
command += " -- --check"
file(reg_fname,"w").write("<registries>%s</registries>\n" % \
" ".join([ plc.registry_xml_line() for plc in other_plcs ]))
utils.header ("(Over)wrote %s"%reg_fname)
- return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
- and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
+ return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
+ and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
def sfa_import(self):
"sfa-import-plc"
auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
return self.run_in_guest('sfa-import.py')==0 or \
self.run_in_guest('sfa-import-plc.py')==0 or \
- self.run_in_guest('sfaadmin.py registry import_registry')
+ self.run_in_guest('sfaadmin.py registry import_registry')==0
# not needed anymore
# self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
test_slice.sfi_config(dir_name)
# push into the remote /root/sfi area
location = test_slice.sfi_path()
- remote="/vservers/%s/%s"%(self.vservername,location)
+ remote="%s/%s"%(self.vm_root_in_host(),location)
self.test_ssh.mkdir(remote,abs=True)
# need to strip last level or remote otherwise we get an extra dir level
self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
"creates random entries in the PLCAPI"
# install the stress-test in the plc image
location = "/usr/share/plc_api/plcsh_stress_test.py"
- remote="/vservers/%s/%s"%(self.vservername,location)
+ remote="%s/%s"%(self.vm_root_in_host(),location)
self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
command = location
command += " -- --preserve --short-names"
"gets all possible logs from plc's/qemu node's/slice's for future reference"
# (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
# (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
+ # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
# (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
# (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
# (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
# (1.b)
print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
self.gather_pgsql_logs ()
+ # (1.c)
+ print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
+ self.gather_root_sfi ()
# (2)
print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
for site_spec in self.plc_spec['sites']:
command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
utils.system(command)
+ def gather_root_sfi (self):
+ utils.system("mkdir -p logs/sfi.%s"%self.name())
+ to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
+ command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
+ utils.system(command)
+
def gather_nodes_var_logs (self):
for site_spec in self.plc_spec['sites']:
test_site = TestSite (self,site_spec)