#
import utils
import os, os.path
-import datetime
+from datetime import datetime, timedelta
import time
from TestKey import TestKey
from TestUser import TestUser
-from TestNode import TestNode
+from TestNode import TestNode, CompleterTaskNodeSsh
from TestSsh import TestSsh
+from Completer import Completer, CompleterTask
+
+class CompleterTaskSliceSsh (CompleterTask):
+
+ def __init__ (self, test_plc, hostname, slicename, private_key,command, expected, dry_run):
+ self.test_plc=test_plc
+ self.hostname=hostname
+ self.slicename=slicename
+ self.private_key=private_key
+ self.command=command
+ self.dry_run=dry_run
+ self.expected=expected
+ def run (self, silent):
+ (site_spec,node_spec) = self.test_plc.locate_hostname(self.hostname)
+ test_ssh = TestSsh (self.hostname,key=self.private_key,username=self.slicename)
+ full_command = test_ssh.actual_command(self.command)
+ retcod = utils.system (full_command, silent=silent)
+ if self.dry_run: return True
+ if self.expected: return retcod==0
+ else: return retcod!=0
+ def failure_epilogue (self):
+ if self.expected:
+ print "Could not ssh into sliver %s@%s"%(self.slicename,self.hostname)
+ else:
+ print "Could still ssh into sliver%s@%s (that was expected to be down)"%(self.slicename,self.hostname)
class TestSlice:
isname=self.slice_spec['initscriptname']
utils.header("Adding initscript name %s in %s"%(isname,slice_name))
self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'initscript',isname)
- if self.slice_spec.has_key ('vref'):
- vref_value=self.slice_spec['vref']
- self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'vref',vref_value)
-
+ if 'omf-friendly' in self.slice_spec:
+ utils.header("Making slice %s OMF-friendly"%slice_name)
+ self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'vref','omf')
+ self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'omf_control','yes')
+# setting vref directly like this was useful for multi-arch tests long ago - see wifilab
+# however this should rather use other tags by now, so we drop this for now
+# if self.slice_spec.has_key ('vref'):
+# vref_value=self.slice_spec['vref']
+# self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'vref',vref_value)
+ # epilogue
self.add_nodes()
def check_vsys_defaults (self, options, *args, **kwds):
hostnames=[]
for nodename in self.slice_spec['nodenames']:
node_spec=self.test_site.locate_node(nodename)
- test_node=TestNode(self,self.test_site,node_spec)
+ test_node=TestNode(self.test_plc,self.test_site,node_spec)
hostnames += [test_node.name()]
utils.header("Adding %r in %s"%(hostnames,slice_name))
self.test_plc.apiserver.AddSliceToNodes(auth, slice_name, hostnames)
key_names += user_spec['key_names']
return self.test_plc.locate_private_key_from_key_names (key_names)
- # trying to reach the slice through ssh - expected to answer
- def ssh_slice (self, options, *args, **kwds):
- "tries to ssh-enter the slice with the user key, to ensure slice creation"
- return self.do_ssh_slice(options, expected=True, *args, **kwds)
+ # for TestPlc.slice_mapper__tasks
+ # i.e. returns a list of CompleterTasks that are merged into the same Completer run
+ # to avoid waiting for as many slices as the Plc has
+ # also the __doc__ lines are used for the TestPlc methods, e.g. just 'ssh_slice'
+ def ssh_slice__tasks (self, options, *args, **kwds):
+ "tries to ssh-enter the slice with the user key, to check for slice creation"
+ return self.ssh_tasks(options, expected=True, *args, **kwds)
# when we expect the slice is not reachable
- def ssh_slice_off (self, options, *args, **kwds):
+ def ssh_slice_off__tasks (self, options, *args, **kwds):
"tries to ssh-enter the slice with the user key, expecting it to be unreachable"
- return self.do_ssh_slice(options, expected=False, *args, **kwds)
+ return self.ssh_tasks(options, expected=False, *args, **kwds)
- def do_ssh_slice(self,options,expected=True,timeout_minutes=20,silent_minutes=10,period=15,command=None):
- timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
- graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
+ def ssh_tasks(self,options, expected=True, command=None):
+# timeout_minutes=20,silent_minutes=10,period_seconds=15):
+# timeout = timedelta(minutes=timeout_minutes)
+# graceout = timedelta(minutes=silent_minutes)
+# period = timedelta(seconds=period_seconds)
if not command:
command="echo hostname ; hostname; echo id; id; echo uname -a ; uname -a"
# locate a key
return False
# convert nodenames to real hostnames
- slice_spec = self.slice_spec
- restarted=[]
- tocheck=[]
- for nodename in slice_spec['nodenames']:
- (site_spec,node_spec) = self.test_plc.locate_node(nodename)
- tocheck.append(node_spec['node_fields']['hostname'])
-
if expected: msg="ssh slice access enabled"
else: msg="ssh slice access disabled"
-
- utils.header("checking for %s -- slice %s on nodes %r"%(msg,self.name(),tocheck))
- utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
- (timeout_minutes,silent_minutes,period))
- while tocheck:
- for hostname in tocheck:
- (site_spec,node_spec) = self.test_plc.locate_hostname(hostname)
- test_ssh = TestSsh (hostname,key=private_key,username=self.name())
- full_command = test_ssh.actual_command(command)
- retcod = utils.system (full_command, silent=datetime.datetime.now() < graceout)
- if getattr(options,'dry_run',None): return True
- if expected: success = retcod==0
- else: success = retcod!=0
-
- if success:
- utils.header("OK %s - slice=%s@%s"%(msg,self.name(),hostname))
- tocheck.remove(hostname)
- else:
- # real nodes will have been checked once in case they're up - skip if not
- if TestNode.is_real_model(node_spec['node_fields']['model']):
- utils.header("WARNING : Checking slice %s on real node %s skipped"%(self.name(),hostname))
- tocheck.remove(hostname)
- # nm restart after first failure, if requested
- if options.forcenm and hostname not in restarted:
- utils.header ("forcenm option : restarting nm on %s"%hostname)
- restart_test_ssh=TestSsh(hostname,key="keys/key_admin.rsa")
- access=restart_test_ssh.actual_command('service nm restart')
- if (access==0):
- utils.header('nm restarted on %s'%hostname)
- else:
- utils.header('Failed to restart nm on %s'%(hostname))
- restarted.append(hostname)
- if not tocheck:
- # we're done
- return True
- if datetime.datetime.now() > timeout:
- for hostname in tocheck:
- utils.header("FAILED %s slice=%s@%s"%(msg,self.name(),hostname))
- return False
- # wait for the period
- time.sleep (period)
- # for an empty slice
- return True
+ utils.header("checking for %s -- slice %s"%(msg,self.name()))
+
+ tasks=[]
+ slicename=self.name()
+ dry_run = getattr(options,'dry_run',False)
+ for nodename in self.slice_spec['nodenames']:
+ (site_spec,node_spec) = self.test_plc.locate_node(nodename)
+ tasks.append( CompleterTaskSliceSsh(self.test_plc,node_spec['node_fields']['hostname'],
+ slicename,private_key,command,expected,dry_run))
+ return tasks
+# return Completer (tasks).run (timeout, graceout, period)
def ssh_slice_basics (self, options, *args, **kwds):
"the slice is expected to be UP and we just check a few simple sanity commands, including 'ps' to check for /proc"
if not self.do_ssh_slice_once(options,expected=False, command='false'): overall=False
if not self.do_ssh_slice_once(options,expected=False, command='someimprobablecommandname'): overall=False
if not self.do_ssh_slice_once(options,expected=True, command='ps'): overall=False
+ if not self.do_ssh_slice_once(options,expected=False, command='ls /vservers'): overall=False
return overall
# pick just one nodename and runs the ssh command once
else: success = retcod!=0
if not success: utils.header ("WRONG RESULT for %s"%msg)
return success
+
+ # for TestPlc.slice_mapper__tasks
+ # check that /vservers/<> is present/deleted
+ def slice_fs_present__tasks (self, options):
+ "checks that /vservers/<slicename> exists on the filesystem"
+ return self.check_rootfs_tasks(options,expected=True)
+ def slice_fs_deleted__tasks (self, options):
+ "checks that /vservers/<slicename> has been properly wiped off"
+ return self.check_rootfs_tasks (options,expected=False)
+
+ def check_rootfs_tasks (self, options, expected):
+ # use constant admin key
+ local_key = "keys/key_admin.rsa"
+ node_infos = self.test_plc.all_node_infos()
+ rootfs="/vservers/%s"%self.name()
+ class CompleterTaskRootfs (CompleterTaskNodeSsh):
+ def __init__ (self, nodename, qemuname):
+ CompleterTaskNodeSsh.__init__(self,nodename, qemuname, local_key, expected=expected,
+ command="ls -d %s"%rootfs)
+ def failure_epilogue (self):
+ if expected:
+ print "Could not stat %s - was expected to be present"%rootfs
+ else:
+ print "Sliver rootfs %s still present - this is unexpected"%rootfs
+ utils.system(self.test_ssh.actual_command("ls -l %s; du -hs %s"%(rootfs,rootfs),dry_run=self.dry_run))
+ return [ CompleterTaskRootfs (nodename, qemuname) for (nodename,qemuname) in node_infos ]