from TestKey import TestKey
from TestUser import TestUser
-from TestNode import TestNode
+from TestNode import TestNode, CompleterTaskNodeSsh
from TestSsh import TestSsh
from Completer import Completer, CompleterTask
-class CompleterTaskSshSlice (CompleterTask):
+class CompleterTaskSliceSsh (CompleterTask):
def __init__ (self, test_plc, hostname, slicename, private_key,command, expected, dry_run):
self.test_plc=test_plc
if self.dry_run: return True
if self.expected: return retcod==0
else: return retcod!=0
- def failure_message (self):
+ def failure_epilogue (self):
if self.expected:
- return "Could not ssh into sliver %s@%s"%(self.slicename,self.hostname)
+ print "Could not ssh into sliver %s@%s"%(self.slicename,self.hostname)
else:
- return "Could still ssh into sliver%s@%s (that was expected to be down)"%(self.slicename,self.hostname)
+ print "Could still ssh into sliver%s@%s (that was expected to be down)"%(self.slicename,self.hostname)
class TestSlice:
hostnames=[]
for nodename in self.slice_spec['nodenames']:
node_spec=self.test_site.locate_node(nodename)
- test_node=TestNode(self,self.test_site,node_spec)
+ test_node=TestNode(self.test_plc,self.test_site,node_spec)
hostnames += [test_node.name()]
utils.header("Adding %r in %s"%(hostnames,slice_name))
self.test_plc.apiserver.AddSliceToNodes(auth, slice_name, hostnames)
key_names += user_spec['key_names']
return self.test_plc.locate_private_key_from_key_names (key_names)
- # to be used through TestPlc.slice_mapper_tasks
+ # for TestPlc.slice_mapper__tasks
# i.e. returns a list of CompleterTasks that are merged into the same Completer run
# to avoid waiting for as many slices as the Plc has
# also the __doc__ lines are used for the TestPlc methods, e.g. just 'ssh_slice'
dry_run = getattr(options,'dry_run',False)
for nodename in self.slice_spec['nodenames']:
(site_spec,node_spec) = self.test_plc.locate_node(nodename)
- tasks.append( CompleterTaskSshSlice(self.test_plc,node_spec['node_fields']['hostname'],
+ tasks.append( CompleterTaskSliceSsh(self.test_plc,node_spec['node_fields']['hostname'],
slicename,private_key,command,expected,dry_run))
return tasks
# return Completer (tasks).run (timeout, graceout, period)
else: success = retcod!=0
if not success: utils.header ("WRONG RESULT for %s"%msg)
return success
+
+ # for TestPlc.slice_mapper__tasks
+ # check that /vservers/<> is present/deleted
+ def slice_fs_present__tasks (self, options):
+ "checks that /vservers/<slicename> exists on the filesystem"
+ return self.check_rootfs_tasks(options,expected=True)
+ def slice_fs_deleted__tasks (self, options):
+ "checks that /vservers/<slicename> has been properly wiped off"
+ return self.check_rootfs_tasks (options,expected=False)
+
+ def check_rootfs_tasks (self, options, expected):
+ # use constant admin key
+ local_key = "keys/key_admin.rsa"
+ node_infos = self.test_plc.all_node_infos()
+ rootfs="/vservers/%s"%self.name()
+ class CompleterTaskRootfs (CompleterTaskNodeSsh):
+ def __init__ (self, nodename, qemuname):
+ CompleterTaskNodeSsh.__init__(self,nodename, qemuname, local_key, expected=expected,
+ command="ls -d %s"%rootfs)
+ def failure_epilogue (self):
+ if expected:
+ print "Could not stat %s - was expected to be present"%rootfs
+ else:
+ print "Sliver rootfs %s still present - this is unexpected"%rootfs
+ utils.system(self.test_ssh.actual_command("ls -l %s; du -hs %s"%(rootfs,rootfs),dry_run=self.dry_run))
+ return [ CompleterTaskRootfs (nodename, qemuname) for (nodename,qemuname) in node_infos ]