from TestKey import TestKey
from TestUser import TestUser
-from TestNode import TestNode
+from TestNode import TestNode, CompleterTaskNodeSsh
from TestSsh import TestSsh
from Completer import Completer, CompleterTask
-class CompleterTaskSshSlice (CompleterTask):
+class CompleterTaskSliceSsh (CompleterTask):
def __init__ (self, test_plc, hostname, slicename, private_key,command, expected, dry_run):
self.test_plc=test_plc
hostnames=[]
for nodename in self.slice_spec['nodenames']:
node_spec=self.test_site.locate_node(nodename)
- test_node=TestNode(self,self.test_site,node_spec)
+ test_node=TestNode(self.test_plc,self.test_site,node_spec)
hostnames += [test_node.name()]
utils.header("Adding %r in %s"%(hostnames,slice_name))
self.test_plc.apiserver.AddSliceToNodes(auth, slice_name, hostnames)
key_names += user_spec['key_names']
return self.test_plc.locate_private_key_from_key_names (key_names)
- # to be used through TestPlc.slice_mapper_tasks
+ # for TestPlc.slice_mapper__tasks
# i.e. returns a list of CompleterTasks that are merged into the same Completer run
# to avoid waiting for as many slices as the Plc has
# also the __doc__ lines are used for the TestPlc methods, e.g. just 'ssh_slice'
dry_run = getattr(options,'dry_run',False)
for nodename in self.slice_spec['nodenames']:
(site_spec,node_spec) = self.test_plc.locate_node(nodename)
- tasks.append( CompleterTaskSshSlice(self.test_plc,node_spec['node_fields']['hostname'],
+ tasks.append( CompleterTaskSliceSsh(self.test_plc,node_spec['node_fields']['hostname'],
slicename,private_key,command,expected,dry_run))
return tasks
# return Completer (tasks).run (timeout, graceout, period)
else: success = retcod!=0
if not success: utils.header ("WRONG RESULT for %s"%msg)
return success
+
+ # for TestPlc.slice_mapper__tasks
+ # check that /vservers/<> is present/deleted
+ def slice_fs_present__tasks (self, options):
+ "checks that /vservers/<slicename> exists on the filesystem"
+ return self.check_rootfs_tasks(options,expected=True)
+ def slice_fs_deleted__tasks (self, options):
+ "checks that /vservers/<slicename> has been properly wiped off"
+ return self.check_rootfs_tasks (options,expected=False)
+
+ def check_rootfs_tasks (self, options, expected):
+ # use constant admin key
+ local_key = "keys/key_admin.rsa"
+ node_infos = self.test_plc.all_node_infos()
+ return [ CompleterTaskNodeSsh (nodename, qemuname, local_key, expected=expected,
+ command="ls -d /vservers/%s"%self.name()) \
+ for (nodename,qemuname) in node_infos ]
+
+ overall=True
+ for nodename in self.slice_spec['nodenames']:
+ node_spec=self.test_site.locate_node(nodename)
+ test_node=TestNode(self.test_plc,self.test_site,node_spec)
+ test_node_ssh=test_node.create_test_ssh()
+ command="ls /vservers/%s"%self.name()
+ full_command = test_node_ssh.actual_command(command)
+ retcod=utils.system(full_command,silent=True)
+ # we expect the fs to be present, retcod should be 0
+ if expected: fine=(retcod==0)
+ else: fine=(retcod!=0)
+ if not fine: overall=False
+ return overall
+