X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=system%2FTestSlice.py;h=bfe0b58ce9f07f935b7253fd39d51750d7bb02c7;hb=457af0d724a9b2e0e2da9486abffa87ba936d536;hp=3b37d629997c2c5104e7a498ce32c1085f06236c;hpb=649f388f61085663d060f100fa011fe81305053d;p=tests.git diff --git a/system/TestSlice.py b/system/TestSlice.py index 3b37d62..bfe0b58 100644 --- a/system/TestSlice.py +++ b/system/TestSlice.py @@ -1,35 +1,64 @@ +# -*- python3 -*- # Thierry Parmentelat -# Copyright (C) 2010 INRIA +# Copyright (C) 2015 INRIA # import utils import os, os.path -import datetime +from datetime import datetime, timedelta import time from TestKey import TestKey from TestUser import TestUser -from TestNode import TestNode +from TestNode import TestNode, CompleterTaskNodeSsh from TestSsh import TestSsh +from Completer import CompleterTask + +class CompleterTaskSliceSsh (CompleterTask): + + def __init__ (self, test_plc, hostname, slicename, private_key, command, expected, dry_run): + self.test_plc = test_plc + self.hostname = hostname + self.slicename = slicename + self.private_key = private_key + self.command = command + self.dry_run = dry_run + self.expected = expected + + def run (self, silent): + site_spec, node_spec = self.test_plc.locate_hostname(self.hostname) + test_ssh = TestSsh (self.hostname, key=self.private_key, username=self.slicename) + full_command = test_ssh.actual_command(self.command) + retcod = utils.system (full_command, silent=silent, timeout=10) + if self.dry_run: return True + if self.expected: return retcod == 0 + else: return retcod != 0 + + def failure_epilogue (self): + if self.expected: + print("Could not ssh into sliver {}@{}".format(self.slicename, self.hostname)) + else: + print("Could still ssh into sliver{}@{} (that was expected to be down)"\ + .format(self.slicename, self.hostname)) class TestSlice: - def __init__ (self,test_plc,test_site,slice_spec): - self.test_plc=test_plc - self.test_site=test_site - self.slice_spec=slice_spec - self.test_ssh=TestSsh(self.test_plc.test_ssh) + def __init__ (self, test_plc, test_site, slice_spec): + self.test_plc = test_plc + self.test_site = test_site + self.slice_spec = slice_spec + self.test_ssh = TestSsh(self.test_plc.test_ssh) def name(self): return self.slice_spec['slice_fields']['name'] - def get_slice(self,slice_name): + def get_slice(self, slice_name): for slice_spec in self.test_plc.plc_spec['slices']: - if(slice_spec['slice_fields']['name']== slice_name): + if slice_spec['slice_fields']['name'] == slice_name: return slice_spec def owner_auth(self): owner_spec = self.test_site.locate_user(self.slice_spec['owner']) - return TestUser(self,self.test_site,owner_spec).auth() + return TestUser(self, self.test_site, owner_spec).auth() def slice_name (self): return self.slice_spec['slice_fields']['name'] @@ -39,27 +68,52 @@ class TestSlice: auth = self.owner_auth() slice_fields = self.slice_spec['slice_fields'] slice_name = slice_fields['name'] - utils.header("Creating slice %s"%slice_name) - self.test_plc.apiserver.AddSlice(auth,slice_fields) + utils.header("Creating slice {}".format(slice_name)) + self.test_plc.apiserver.AddSlice(auth, slice_fields) for username in self.slice_spec['usernames']: - user_spec=self.test_site.locate_user(username) - test_user=TestUser(self,self.test_site,user_spec) + user_spec = self.test_site.locate_user(username) + test_user = TestUser(self, self.test_site, user_spec) self.test_plc.apiserver.AddPersonToSlice(auth, test_user.name(), slice_name) # add initscript code or name as appropriate - if self.slice_spec.has_key('initscriptcode'): - iscode=self.slice_spec['initscriptcode'] - utils.header("Adding initscript code %s in %s"%(iscode,slice_name)) - self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'initscript_code',iscode) - elif self.slice_spec.has_key('initscriptname'): - isname=self.slice_spec['initscriptname'] - utils.header("Adding initscript name %s in %s"%(isname,slice_name)) - self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'initscript',isname) - if self.slice_spec.has_key ('vref'): - vref_value=self.slice_spec['vref'] - self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'vref',vref_value) - + if 'initscriptcode' in self.slice_spec: + iscode = self.slice_spec['initscriptcode'] + utils.header("Adding initscript code {} in {}".format(iscode, slice_name)) + self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name, + 'initscript_code', iscode) + elif 'initscriptname' in self.slice_spec: + isname = self.slice_spec['initscriptname'] + utils.header("Adding initscript name {} in {}".format(isname, slice_name)) + self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name, + 'initscript', isname) +# omf-friendly slices is a deprecated feature +# if 'omf-friendly' in self.slice_spec: +# utils.header("Making slice {} OMF-friendly".format(slice_name)) +# self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name, 'vref', 'omf') +# self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name, 'omf_control', 'yes') +# +# setting vref directly like this was useful for multi-arch tests long ago - see wifilab +# however this should rather use other tags by now, so we drop this for now +# if self.slice_spec.has_key ('vref'): +# vref_value = self.slice_spec['vref'] +# self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'vref',vref_value) + # epilogue self.add_nodes() + def check_vsys_defaults (self, options, *args, **kwds): + "check vsys tags match PLC_VSYS_DEFAULTS" + auth = self.owner_auth() + slice_fields = self.slice_spec['slice_fields'] + slice_name = slice_fields['name'] + vsys_tags = self.test_plc.apiserver.GetSliceTags (auth, {'tagname' : 'vsys', 'name' : slice_name}) + values = [st['value'] for st in vsys_tags] + expected = self.test_plc.plc_spec['expected_vsys_tags'] + result = set(values) == set(expected) + if not result: + print('Check vsys defaults with slice {}'.format(slice_name)) + print('Expected {}'.format(expected)) + print('Got {}'.format(values)) + return result + # just add the nodes and handle tags def add_nodes (self): auth = self.owner_auth() @@ -67,113 +121,143 @@ class TestSlice: hostnames=[] for nodename in self.slice_spec['nodenames']: node_spec=self.test_site.locate_node(nodename) - test_node=TestNode(self,self.test_site,node_spec) + test_node=TestNode(self.test_plc, self.test_site, node_spec) hostnames += [test_node.name()] - utils.header("Adding %r in %s"%(hostnames,slice_name)) + utils.header("Adding {} in {}".format(hostnames, slice_name)) self.test_plc.apiserver.AddSliceToNodes(auth, slice_name, hostnames) # trash the slice altogether def delete_slice(self): auth = self.owner_auth() slice_name = self.slice_name() - utils.header("Deleting slice %s"%slice_name) - self.test_plc.apiserver.DeleteSlice(auth,slice_name) + utils.header("Deleting slice {}".format(slice_name)) + self.test_plc.apiserver.DeleteSlice(auth, slice_name) # keep the slice alive and just delete nodes def delete_nodes (self): auth = self.owner_auth() slice_name = self.slice_name() - print 'retrieving slice %s'%slice_name - slice=self.test_plc.apiserver.GetSlices(auth,slice_name)[0] + print('retrieving slice {}'.format(slice_name)) + slice=self.test_plc.apiserver.GetSlices(auth, slice_name)[0] node_ids=slice['node_ids'] - utils.header ("Deleting %d nodes from slice %s"%\ - (len(node_ids),slice_name)) - self.test_plc.apiserver.DeleteSliceFromNodes (auth,slice_name, node_ids) + utils.header ("Deleting {} nodes from slice {}"\ + .format(len(node_ids), slice_name)) + self.test_plc.apiserver.DeleteSliceFromNodes (auth, slice_name, node_ids) - def locate_key(self): - # locate the first avail. key - found=False + def locate_private_key(self): + key_names=[] for username in self.slice_spec['usernames']: user_spec=self.test_site.locate_user(username) - for key_name in user_spec['key_names']: - key_spec=self.test_plc.locate_key(key_name) - test_key=TestKey(self.test_plc,key_spec) - publickey=test_key.publicpath() - privatekey=test_key.privatepath() - if os.path.isfile(publickey) and os.path.isfile(privatekey): - found=True - return (found,privatekey) + key_names += user_spec['key_names'] + return self.test_plc.locate_private_key_from_key_names (key_names) - - # trying to reach the slice through ssh - expected to answer - def ssh_slice (self, options, *args, **kwds): - "tries to ssh-enter the slice with the user key, to ensure slice creation" - return self.do_ssh_slice(options, expected=True, *args, **kwds) + # for TestPlc.slice_mapper__tasks + # i.e. returns a list of CompleterTasks that are merged into the same Completer run + # to avoid waiting for as many slices as the Plc has + # also the __doc__ lines are used for the TestPlc methods, e.g. just 'ssh_slice' + def ssh_slice__tasks (self, options, *args, **kwds): + "tries to ssh-enter the slice with the user key, to check for slice creation" + return self.ssh_tasks(options, expected=True, *args, **kwds) # when we expect the slice is not reachable - def ssh_slice_off (self, options, *args, **kwds): + def ssh_slice_off__tasks (self, options, *args, **kwds): "tries to ssh-enter the slice with the user key, expecting it to be unreachable" - return self.do_ssh_slice(options, expected=False, *args, **kwds) + return self.ssh_tasks(options, expected=False, *args, **kwds) - def do_ssh_slice(self,options,expected=True,timeout_minutes=20,silent_minutes=10,period=15): - timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes) - graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes) + def ssh_tasks(self,options, expected=True, command=None): +# timeout_minutes=20, silent_minutes=10, period_seconds=15): +# timeout = timedelta(minutes=timeout_minutes) +# graceout = timedelta(minutes=silent_minutes) +# period = timedelta(seconds=period_seconds) + if not command: + command = "echo hostname ; hostname; echo id; id; echo uname -a ; uname -a" # locate a key - (found,remote_privatekey)=self.locate_key() - if not found : - utils.header("WARNING: Cannot find a valid key for slice %s"%self.name()) + private_key = self.locate_private_key() + if not private_key : + utils.header("WARNING: Cannot find a valid key for slice {}".format(self.name())) return False # convert nodenames to real hostnames - slice_spec = self.slice_spec - restarted=[] - tocheck=[] - for nodename in slice_spec['nodenames']: - (site_spec,node_spec) = self.test_plc.locate_node(nodename) - tocheck.append(node_spec['node_fields']['hostname']) - if expected: msg="ssh slice access enabled" else: msg="ssh slice access disabled" + utils.header("checking for {} -- slice {}".format(msg, self.name())) + + tasks=[] + slicename=self.name() + dry_run = getattr(options, 'dry_run', False) + for nodename in self.slice_spec['nodenames']: + site_spec, node_spec = self.test_plc.locate_node(nodename) + tasks.append( CompleterTaskSliceSsh(self.test_plc, node_spec['node_fields']['hostname'], + slicename, private_key, command, expected, dry_run)) + return tasks + + def ssh_slice_basics (self, options, *args, **kwds): + "the slice is expected to be UP and we just check a few simple sanity commands, including 'ps' to check for /proc" + overall = True + if not self.do_ssh_slice_once(options, expected=True, command='true'): overall=False + if not self.do_ssh_slice_once(options, expected=False, command='false'): overall=False + if not self.do_ssh_slice_once(options, expected=False, command='someimprobablecommandname'): overall=False + if not self.do_ssh_slice_once(options, expected=True, command='ps'): overall=False + if not self.do_ssh_slice_once(options, expected=False, command='ls /vservers'): overall=False + return overall + + # pick just one nodename and runs the ssh command once + def do_ssh_slice_once(self, options, command, expected): + # locate a key + private_key=self.locate_private_key() + if not private_key : + utils.header("WARNING: Cannot find a valid key for slice {}".format(self.name())) + return False + + # convert nodenames to real hostnames + slice_spec = self.slice_spec + nodename=slice_spec['nodenames'][0] + site_spec, node_spec = self.test_plc.locate_node(nodename) + hostname=node_spec['node_fields']['hostname'] + + if expected: + msg="{} to return TRUE from ssh".format(command) + else: + msg="{} to return FALSE from ssh".format(command) - utils.header("checking for %s -- slice %s on nodes %r"%(msg,self.name(),tocheck)) - utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\ - (timeout_minutes,silent_minutes,period)) - while tocheck: - for hostname in tocheck: - (site_spec,node_spec) = self.test_plc.locate_hostname(hostname) - date_test_ssh = TestSsh (hostname,key=remote_privatekey,username=self.name()) - command = date_test_ssh.actual_command("echo hostname ; hostname; echo id; id; echo uname -a ; uname -a") - date = utils.system (command, silent=datetime.datetime.now() < graceout) - if getattr(options,'dry_run',None): return True - if expected: success = date==0 - else: success = date!=0 - - if success: - utils.header("OK %s - slice=%s@%s"%(msg,self.name(),hostname)) - tocheck.remove(hostname) + utils.header("checking {} -- slice {} on node {}".format(msg, self.name(), hostname)) + site_spec, node_spec = self.test_plc.locate_hostname(hostname) + test_ssh = TestSsh (hostname, key=private_key, username=self.name()) + full_command = test_ssh.actual_command(command) + retcod = utils.system (full_command, silent=True, timeout=10) + if getattr(options, 'dry_run', None): + return True + if expected: + success = retcod==0 + else: + success = retcod!=0 + if not success: + utils.header ("WRONG RESULT for {}".format(msg)) + return success + + # for TestPlc.slice_mapper__tasks + # check that /vservers/<> is present/deleted + def slice_fs_present__tasks (self, options): + "checks that /vservers/ exists on the filesystem" + return self.check_rootfs_tasks(options, expected=True) + def slice_fs_deleted__tasks (self, options): + "checks that /vservers/ has been properly wiped off" + return self.check_rootfs_tasks (options, expected=False) + + def check_rootfs_tasks (self, options, expected): + # use constant admin key + local_key = "keys/key_admin.rsa" + node_infos = self.test_plc.all_node_infos() + rootfs="/vservers/{}".format(self.name()) + class CompleterTaskRootfs (CompleterTaskNodeSsh): + def __init__ (self, nodename, qemuname): + CompleterTaskNodeSsh.__init__(self, nodename, qemuname, local_key, expected=expected, + command="ls -d {}".format(rootfs)) + def failure_epilogue (self): + if expected: + print("Could not stat {} - was expected to be present".format(rootfs)) else: - # real nodes will have been checked once in case they're up - skip if not - if TestNode.is_real_model(node_spec['node_fields']['model']): - utils.header("WARNING : Checking slice %s on real node %s skipped"%(self.name(),hostname)) - tocheck.remove(hostname) - # nm restart after first failure, if requested - if options.forcenm and hostname not in restarted: - utils.header ("forcenm option : restarting nm on %s"%hostname) - restart_test_ssh=TestSsh(hostname,key="keys/key_admin.rsa") - access=restart_test_ssh.actual_command('service nm restart') - if (access==0): - utils.header('nm restarted on %s'%hostname) - else: - utils.header('Failed to restart nm on %s'%(hostname)) - restarted.append(hostname) - if not tocheck: - # we're done - return True - if datetime.datetime.now() > timeout: - for hostname in tocheck: - utils.header("FAILED %s slice=%s@%s"%(msg,self.name(),hostname)) - return False - # wait for the period - time.sleep (period) - # for an empty slice - return True + print("Sliver rootfs {} still present - this is unexpected".format(rootfs)) + utils.system(self.test_ssh.actual_command("ls -l {rootfs}; du -hs {rootfs}".format(**locals()), + dry_run=self.dry_run, timeout=20)) + return [ CompleterTaskRootfs (nodename, qemuname) for (nodename,qemuname) in node_infos ]