X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=system%2FTestSlice.py;h=f5049d3b471a8b90ed65975cd95a49a2ff2f13e9;hb=55177a1292ca841809a3194015d32f76f1f0420c;hp=a3a45680cb0fbd2f752adb6b0906905ab8b8bf18;hpb=88064a626f60bc944bb48558afa4ea584a1c4099;p=tests.git diff --git a/system/TestSlice.py b/system/TestSlice.py index a3a4568..f5049d3 100644 --- a/system/TestSlice.py +++ b/system/TestSlice.py @@ -1,11 +1,40 @@ +# Thierry Parmentelat +# Copyright (C) 2010 INRIA +# import utils import os, os.path -import datetime +from datetime import datetime, timedelta import time from TestKey import TestKey from TestUser import TestUser -from TestNode import TestNode +from TestNode import TestNode, CompleterTaskNodeSsh +from TestSsh import TestSsh +from Completer import Completer, CompleterTask + +class CompleterTaskSliceSsh (CompleterTask): + + def __init__ (self, test_plc, hostname, slicename, private_key,command, expected, dry_run): + self.test_plc=test_plc + self.hostname=hostname + self.slicename=slicename + self.private_key=private_key + self.command=command + self.dry_run=dry_run + self.expected=expected + def run (self, silent): + (site_spec,node_spec) = self.test_plc.locate_hostname(self.hostname) + test_ssh = TestSsh (self.hostname,key=self.private_key,username=self.slicename) + full_command = test_ssh.actual_command(self.command) + retcod = utils.system (full_command, silent=silent) + if self.dry_run: return True + if self.expected: return retcod==0 + else: return retcod!=0 + def failure_message (self): + if self.expected: + return "Could not ssh into sliver %s@%s"%(self.slicename,self.hostname) + else: + return "Could still ssh into sliver%s@%s (that was expected to be down)"%(self.slicename,self.hostname) class TestSlice: @@ -13,112 +42,208 @@ class TestSlice: self.test_plc=test_plc self.test_site=test_site self.slice_spec=slice_spec - + self.test_ssh=TestSsh(self.test_plc.test_ssh) + def name(self): return self.slice_spec['slice_fields']['name'] + + def get_slice(self,slice_name): + for slice_spec in self.test_plc.plc_spec['slices']: + if(slice_spec['slice_fields']['name']== slice_name): + return slice_spec - def delete_slice(self): + def owner_auth(self): owner_spec = self.test_site.locate_user(self.slice_spec['owner']) - auth = TestUser(self,self.test_site,owner_spec).auth() - slice_fields = self.slice_spec['slice_fields'] - slice_name = slice_fields['name'] - self.test_plc.server.DeleteSlice(auth,slice_fields['name']) - utils.header("Deleted slice %s"%slice_fields['name']) + return TestUser(self,self.test_site,owner_spec).auth() - + def slice_name (self): + return self.slice_spec['slice_fields']['name'] + + # init slice with people, and then add nodes def create_slice(self): - owner_spec = self.test_site.locate_user(self.slice_spec['owner']) - auth = TestUser(self,self.test_site,owner_spec).auth() + auth = self.owner_auth() slice_fields = self.slice_spec['slice_fields'] slice_name = slice_fields['name'] - - self.test_plc.server.AddSlice(auth,slice_fields) + utils.header("Creating slice %s"%slice_name) + self.test_plc.apiserver.AddSlice(auth,slice_fields) for username in self.slice_spec['usernames']: user_spec=self.test_site.locate_user(username) test_user=TestUser(self,self.test_site,user_spec) - self.test_plc.server.AddPersonToSlice(auth, test_user.name(), slice_name) + self.test_plc.apiserver.AddPersonToSlice(auth, test_user.name(), slice_name) + # add initscript code or name as appropriate + if self.slice_spec.has_key('initscriptcode'): + iscode=self.slice_spec['initscriptcode'] + utils.header("Adding initscript code %s in %s"%(iscode,slice_name)) + self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'initscript_code',iscode) + elif self.slice_spec.has_key('initscriptname'): + isname=self.slice_spec['initscriptname'] + utils.header("Adding initscript name %s in %s"%(isname,slice_name)) + self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'initscript',isname) + if 'omf-friendly' in self.slice_spec: + utils.header("Making slice %s OMF-friendly"%slice_name) + self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'vref','omf') + self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'omf_control','yes') +# setting vref directly like this was useful for multi-arch tests long ago - see wifilab +# however this should rather use other tags by now, so we drop this for now +# if self.slice_spec.has_key ('vref'): +# vref_value=self.slice_spec['vref'] +# self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'vref',vref_value) + # epilogue + self.add_nodes() + def check_vsys_defaults (self, options, *args, **kwds): + "check vsys tags match PLC_VSYS_DEFAULTS" + auth = self.owner_auth() + slice_fields = self.slice_spec['slice_fields'] + slice_name = slice_fields['name'] + vsys_tags = self.test_plc.apiserver.GetSliceTags (auth,{'tagname':'vsys','name':slice_name}) + values=[ st['value'] for st in vsys_tags ] + expected=self.test_plc.plc_spec['expected_vsys_tags'] + result = set(values) == set(expected) + if not result: + print 'Check vsys defaults with slice %s'%slice_name + print 'Expected %s'%expected + print 'Got %s'%values + return result + + # just add the nodes and handle tags + def add_nodes (self): + auth = self.owner_auth() + slice_name = self.slice_name() hostnames=[] for nodename in self.slice_spec['nodenames']: node_spec=self.test_site.locate_node(nodename) - test_node=TestNode(self,self.test_site,node_spec) + test_node=TestNode(self.test_plc,self.test_site,node_spec) hostnames += [test_node.name()] utils.header("Adding %r in %s"%(hostnames,slice_name)) - self.test_plc.server.AddSliceToNodes(auth, slice_name, hostnames) - if self.slice_spec.has_key('initscriptname'): - isname=self.slice_spec['initscriptname'] - utils.header("Adding initscript %s in %s"%(isname,slice_name)) - self.test_plc.server.AddSliceAttribute(self.test_plc.auth_root(), slice_name,'initscript',isname) + self.test_plc.apiserver.AddSliceToNodes(auth, slice_name, hostnames) - def clear_known_hosts (self): - utils.header("Messing with known_hosts for slice %s"%self.name()) - # scan nodenames - for nodename in self.slice_spec['nodenames']: - self.test_plc.run_in_guest("sed -i -e /^%s/d /root/.ssh/known_hosts"%nodename) - #scan public key and update the known_host file in the root image - self.test_plc.scan_publicKeys(self.slice_spec['nodenames']) - - def locate_key(self,slice_spec): - # locate the first avail. key - found=False - for username in slice_spec['usernames']: + # trash the slice altogether + def delete_slice(self): + auth = self.owner_auth() + slice_name = self.slice_name() + utils.header("Deleting slice %s"%slice_name) + self.test_plc.apiserver.DeleteSlice(auth,slice_name) + + # keep the slice alive and just delete nodes + def delete_nodes (self): + auth = self.owner_auth() + slice_name = self.slice_name() + print 'retrieving slice %s'%slice_name + slice=self.test_plc.apiserver.GetSlices(auth,slice_name)[0] + node_ids=slice['node_ids'] + utils.header ("Deleting %d nodes from slice %s"%\ + (len(node_ids),slice_name)) + self.test_plc.apiserver.DeleteSliceFromNodes (auth,slice_name, node_ids) + + def locate_private_key(self): + key_names=[] + for username in self.slice_spec['usernames']: user_spec=self.test_site.locate_user(username) - for keyname in user_spec['keynames']: - key_spec=self.test_plc.locate_key(keyname) - test_key=TestKey(self.test_plc,key_spec) - publickey=test_key.publicpath() - privatekey=test_key.privatepath() - keyname=test_key.name() - if os.path.isfile(publickey) and os.path.isfile(privatekey): - found=True - #create dir in plc root image - remote_privatekey="/root/keys/%s.rsa"%keyname - if not os.path.isdir("/plc/root/data/root/keys"): - self.test_plc.run_in_guest("mkdir /root/keys" ) - self.test_plc.copy_in_guest(privatekey,remote_privatekey,True) - - return (found,remote_privatekey) - - def do_check_slices(self,options): - bool=True - self.clear_known_hosts() - start_time = datetime.datetime.now() - dead_time=start_time + datetime.timedelta(minutes=15) - for slice_spec in self.test_plc.plc_spec['slices']: - for hostname in slice_spec['nodenames']: - slicename=slice_spec['slice_fields']['name'] - (found,remote_privatekey)=self.locate_key(slice_spec) - if( not found): - raise Exception,"Cannot find a valid key for slice %s"%slicename - break - while(bool): - utils.header('trying to connect to %s@%s'%(slicename,hostname)) - Date=self.test_plc.run_in_guest('ssh -i %s %s@%s date'%(remote_privatekey,slicename,hostname)) - if (Date==0): - break - elif ( start_time <= dead_time ) : - start_time=datetime.datetime.now()+ datetime.timedelta(seconds=45) - time.sleep(45) - elif (options.forcenm): - utils.header('%s@%s : restarting nm in case is in option on %s'%(slicename,hostname,hostname)) - access=self.test_plc.run_in_guest('ssh -i /etc/planetlab/root_ssh_key.rsa root@%s service nm restart'%hostname) - if (access==0): - utils.header('nm restarted on %s'%hostname) - else: - utils.header('%s@%s : Failed to restart the NM on %s'%(slicename,hostname,hostname)) - utils.header('Try to reconnect to %s@%s after the tentative of restarting NM'%(slicename,hostname)) - connect=self.test_plc.run_in_guest('ssh -i %s %s@%s date'%(remote_privatekey,slicename,hostname)) - if (not connect): - utils.header('connected to %s@%s -->'%(slicename,hostname)) - break - else: - utils.header('giving up with to %s@%s -->'%(slicename,hostname)) - bool=False - break - else: - bool=False - break - return bool - - + key_names += user_spec['key_names'] + return self.test_plc.locate_private_key_from_key_names (key_names) + + # for TestPlc.slice_mapper__tasks + # i.e. returns a list of CompleterTasks that are merged into the same Completer run + # to avoid waiting for as many slices as the Plc has + # also the __doc__ lines are used for the TestPlc methods, e.g. just 'ssh_slice' + def ssh_slice__tasks (self, options, *args, **kwds): + "tries to ssh-enter the slice with the user key, to check for slice creation" + return self.ssh_tasks(options, expected=True, *args, **kwds) + + # when we expect the slice is not reachable + def ssh_slice_off__tasks (self, options, *args, **kwds): + "tries to ssh-enter the slice with the user key, expecting it to be unreachable" + return self.ssh_tasks(options, expected=False, *args, **kwds) + + def ssh_tasks(self,options, expected=True, command=None): +# timeout_minutes=20,silent_minutes=10,period_seconds=15): +# timeout = timedelta(minutes=timeout_minutes) +# graceout = timedelta(minutes=silent_minutes) +# period = timedelta(seconds=period_seconds) + if not command: + command="echo hostname ; hostname; echo id; id; echo uname -a ; uname -a" + # locate a key + private_key=self.locate_private_key() + if not private_key : + utils.header("WARNING: Cannot find a valid key for slice %s"%self.name()) + return False + + # convert nodenames to real hostnames + if expected: msg="ssh slice access enabled" + else: msg="ssh slice access disabled" + utils.header("checking for %s -- slice %s"%(msg,self.name())) + + tasks=[] + slicename=self.name() + dry_run = getattr(options,'dry_run',False) + for nodename in self.slice_spec['nodenames']: + (site_spec,node_spec) = self.test_plc.locate_node(nodename) + tasks.append( CompleterTaskSliceSsh(self.test_plc,node_spec['node_fields']['hostname'], + slicename,private_key,command,expected,dry_run)) + return tasks +# return Completer (tasks).run (timeout, graceout, period) + + def ssh_slice_basics (self, options, *args, **kwds): + "the slice is expected to be UP and we just check a few simple sanity commands, including 'ps' to check for /proc" + overall=True + if not self.do_ssh_slice_once(options,expected=True, command='true'): overall=False + if not self.do_ssh_slice_once(options,expected=False, command='false'): overall=False + if not self.do_ssh_slice_once(options,expected=False, command='someimprobablecommandname'): overall=False + if not self.do_ssh_slice_once(options,expected=True, command='ps'): overall=False + if not self.do_ssh_slice_once(options,expected=False, command='ls /vservers'): overall=False + return overall + + # pick just one nodename and runs the ssh command once + def do_ssh_slice_once(self,options,command,expected): + # locate a key + private_key=self.locate_private_key() + if not private_key : + utils.header("WARNING: Cannot find a valid key for slice %s"%self.name()) + return False + + # convert nodenames to real hostnames + slice_spec = self.slice_spec + nodename=slice_spec['nodenames'][0] + (site_spec,node_spec) = self.test_plc.locate_node(nodename) + hostname=node_spec['node_fields']['hostname'] + + if expected: msg="%s to return TRUE from ssh"%command + else: msg="%s to return FALSE from ssh"%command + + utils.header("checking %s -- slice %s on node %s"%(msg,self.name(),hostname)) + (site_spec,node_spec) = self.test_plc.locate_hostname(hostname) + test_ssh = TestSsh (hostname,key=private_key,username=self.name()) + full_command = test_ssh.actual_command(command) + retcod = utils.system (full_command,silent=True) + if getattr(options,'dry_run',None): return True + if expected: success = retcod==0 + else: success = retcod!=0 + if not success: utils.header ("WRONG RESULT for %s"%msg) + return success + + # for TestPlc.slice_mapper__tasks + # check that /vservers/<> is present/deleted + def slice_fs_present__tasks (self, options): + "checks that /vservers/ exists on the filesystem" + return self.check_rootfs_tasks(options,expected=True) + def slice_fs_deleted__tasks (self, options): + "checks that /vservers/ has been properly wiped off" + return self.check_rootfs_tasks (options,expected=False) + def check_rootfs_tasks (self, options, expected): + # use constant admin key + local_key = "keys/key_admin.rsa" + node_infos = self.test_plc.all_node_infos() + rootfs="/vservers/%s"%self.name() + if expected: + failure_message = "Could not stat %s"%rootfs + else: + failure_message = "Sliver rootfs still present in %s"%rootfs + class CompleterTaskRootfs (CompleterTaskNodeSsh): + def __init__ (self, nodename, qemuname): + CompleterTaskNodeSsh.__init__(self,nodename, qemuname, local_key, expected=expected, + message=failure_message, command="ls -d %s"%rootfs) + def failure_epilogue (self): + utils.system(self.test_ssh.actual_command("ls -l %s; du -hs %s"%(rootfs,rootfs),dry_run=self.dry_run)) + return [ CompleterTaskRootfs (nodename, qemuname) for (nodename,qemuname) in node_infos ]