2 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
3 # Copyright (C) 2015 INRIA
7 from datetime import datetime, timedelta
10 from TestKey import TestKey
11 from TestUser import TestUser
12 from TestNode import TestNode, CompleterTaskNodeSsh
13 from TestSsh import TestSsh
14 from Completer import CompleterTask
16 class CompleterTaskSliceSsh (CompleterTask):
18 def __init__ (self, test_plc, hostname, slicename, private_key, command, expected, dry_run):
19 self.test_plc = test_plc
20 self.hostname = hostname
21 self.slicename = slicename
22 self.private_key = private_key
23 self.command = command
24 self.dry_run = dry_run
25 self.expected = expected
27 def run (self, silent):
28 site_spec, node_spec = self.test_plc.locate_hostname(self.hostname)
29 test_ssh = TestSsh (self.hostname, key=self.private_key, username=self.slicename)
30 full_command = test_ssh.actual_command(self.command)
31 retcod = utils.system (full_command, silent=silent, timeout=10)
32 if self.dry_run: return True
33 if self.expected: return retcod == 0
34 else: return retcod != 0
36 def failure_epilogue (self):
38 print("Could not ssh into sliver {}@{}".format(self.slicename, self.hostname))
40 print("Could still ssh into sliver{}@{} (that was expected to be down)"\
41 .format(self.slicename, self.hostname))
45 def __init__ (self, test_plc, test_site, slice_spec):
46 self.test_plc = test_plc
47 self.test_site = test_site
48 self.slice_spec = slice_spec
49 self.test_ssh = TestSsh(self.test_plc.test_ssh)
52 return self.slice_spec['slice_fields']['name']
54 def get_slice(self, slice_name):
55 for slice_spec in self.test_plc.plc_spec['slices']:
56 if slice_spec['slice_fields']['name'] == slice_name:
60 owner_spec = self.test_site.locate_user(self.slice_spec['owner'])
61 return TestUser(self, self.test_site, owner_spec).auth()
63 def slice_name (self):
64 return self.slice_spec['slice_fields']['name']
66 # init slice with people, and then add nodes
67 def create_slice(self):
68 auth = self.owner_auth()
69 slice_fields = self.slice_spec['slice_fields']
70 slice_name = slice_fields['name']
71 utils.header("Creating slice {}".format(slice_name))
72 self.test_plc.apiserver.AddSlice(auth, slice_fields)
73 for username in self.slice_spec['usernames']:
74 user_spec = self.test_site.locate_user(username)
75 test_user = TestUser(self, self.test_site, user_spec)
76 self.test_plc.apiserver.AddPersonToSlice(auth, test_user.name(), slice_name)
77 # add initscript code or name as appropriate
78 if 'initscriptcode' in self.slice_spec:
79 iscode = self.slice_spec['initscriptcode']
80 utils.header("Adding initscript code {} in {}".format(iscode, slice_name))
81 self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,
82 'initscript_code', iscode)
83 elif 'initscriptname' in self.slice_spec:
84 isname = self.slice_spec['initscriptname']
85 utils.header("Adding initscript name {} in {}".format(isname, slice_name))
86 self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,
88 if 'omf-friendly' in self.slice_spec:
89 utils.header("Making slice {} OMF-friendly".format(slice_name))
90 self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name, 'vref', 'omf')
91 self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name, 'omf_control', 'yes')
92 # setting vref directly like this was useful for multi-arch tests long ago - see wifilab
93 # however this should rather use other tags by now, so we drop this for now
94 # if self.slice_spec.has_key ('vref'):
95 # vref_value = self.slice_spec['vref']
96 # self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'vref',vref_value)
100 def check_vsys_defaults (self, options, *args, **kwds):
101 "check vsys tags match PLC_VSYS_DEFAULTS"
102 auth = self.owner_auth()
103 slice_fields = self.slice_spec['slice_fields']
104 slice_name = slice_fields['name']
105 vsys_tags = self.test_plc.apiserver.GetSliceTags (auth, {'tagname' : 'vsys', 'name' : slice_name})
106 values = [st['value'] for st in vsys_tags]
107 expected = self.test_plc.plc_spec['expected_vsys_tags']
108 result = set(values) == set(expected)
110 print('Check vsys defaults with slice {}'.format(slice_name))
111 print('Expected {}'.format(expected))
112 print('Got {}'.format(values))
115 # just add the nodes and handle tags
116 def add_nodes (self):
117 auth = self.owner_auth()
118 slice_name = self.slice_name()
120 for nodename in self.slice_spec['nodenames']:
121 node_spec=self.test_site.locate_node(nodename)
122 test_node=TestNode(self.test_plc, self.test_site, node_spec)
123 hostnames += [test_node.name()]
124 utils.header("Adding {} in {}".format(hostnames, slice_name))
125 self.test_plc.apiserver.AddSliceToNodes(auth, slice_name, hostnames)
127 # trash the slice altogether
128 def delete_slice(self):
129 auth = self.owner_auth()
130 slice_name = self.slice_name()
131 utils.header("Deleting slice {}".format(slice_name))
132 self.test_plc.apiserver.DeleteSlice(auth, slice_name)
134 # keep the slice alive and just delete nodes
135 def delete_nodes (self):
136 auth = self.owner_auth()
137 slice_name = self.slice_name()
138 print('retrieving slice {}'.format(slice_name))
139 slice=self.test_plc.apiserver.GetSlices(auth, slice_name)[0]
140 node_ids=slice['node_ids']
141 utils.header ("Deleting {} nodes from slice {}"\
142 .format(len(node_ids), slice_name))
143 self.test_plc.apiserver.DeleteSliceFromNodes (auth, slice_name, node_ids)
145 def locate_private_key(self):
147 for username in self.slice_spec['usernames']:
148 user_spec=self.test_site.locate_user(username)
149 key_names += user_spec['key_names']
150 return self.test_plc.locate_private_key_from_key_names (key_names)
152 # for TestPlc.slice_mapper__tasks
153 # i.e. returns a list of CompleterTasks that are merged into the same Completer run
154 # to avoid waiting for as many slices as the Plc has
155 # also the __doc__ lines are used for the TestPlc methods, e.g. just 'ssh_slice'
156 def ssh_slice__tasks (self, options, *args, **kwds):
157 "tries to ssh-enter the slice with the user key, to check for slice creation"
158 return self.ssh_tasks(options, expected=True, *args, **kwds)
160 # when we expect the slice is not reachable
161 def ssh_slice_off__tasks (self, options, *args, **kwds):
162 "tries to ssh-enter the slice with the user key, expecting it to be unreachable"
163 return self.ssh_tasks(options, expected=False, *args, **kwds)
165 def ssh_tasks(self,options, expected=True, command=None):
166 # timeout_minutes=20, silent_minutes=10, period_seconds=15):
167 # timeout = timedelta(minutes=timeout_minutes)
168 # graceout = timedelta(minutes=silent_minutes)
169 # period = timedelta(seconds=period_seconds)
171 command = "echo hostname ; hostname; echo id; id; echo uname -a ; uname -a"
173 private_key = self.locate_private_key()
175 utils.header("WARNING: Cannot find a valid key for slice {}".format(self.name()))
178 # convert nodenames to real hostnames
179 if expected: msg="ssh slice access enabled"
180 else: msg="ssh slice access disabled"
181 utils.header("checking for {} -- slice {}".format(msg, self.name()))
184 slicename=self.name()
185 dry_run = getattr(options, 'dry_run', False)
186 for nodename in self.slice_spec['nodenames']:
187 site_spec, node_spec = self.test_plc.locate_node(nodename)
188 tasks.append( CompleterTaskSliceSsh(self.test_plc, node_spec['node_fields']['hostname'],
189 slicename, private_key, command, expected, dry_run))
192 def ssh_slice_basics (self, options, *args, **kwds):
193 "the slice is expected to be UP and we just check a few simple sanity commands, including 'ps' to check for /proc"
195 if not self.do_ssh_slice_once(options, expected=True, command='true'): overall=False
196 if not self.do_ssh_slice_once(options, expected=False, command='false'): overall=False
197 if not self.do_ssh_slice_once(options, expected=False, command='someimprobablecommandname'): overall=False
198 if not self.do_ssh_slice_once(options, expected=True, command='ps'): overall=False
199 if not self.do_ssh_slice_once(options, expected=False, command='ls /vservers'): overall=False
202 # pick just one nodename and runs the ssh command once
203 def do_ssh_slice_once(self, options, command, expected):
205 private_key=self.locate_private_key()
207 utils.header("WARNING: Cannot find a valid key for slice {}".format(self.name()))
210 # convert nodenames to real hostnames
211 slice_spec = self.slice_spec
212 nodename=slice_spec['nodenames'][0]
213 site_spec, node_spec = self.test_plc.locate_node(nodename)
214 hostname=node_spec['node_fields']['hostname']
217 msg="{} to return TRUE from ssh".format(command)
219 msg="{} to return FALSE from ssh".format(command)
221 utils.header("checking {} -- slice {} on node {}".format(msg, self.name(), hostname))
222 site_spec, node_spec = self.test_plc.locate_hostname(hostname)
223 test_ssh = TestSsh (hostname, key=private_key, username=self.name())
224 full_command = test_ssh.actual_command(command)
225 retcod = utils.system (full_command, silent=True, timeout=10)
226 if getattr(options, 'dry_run', None):
233 utils.header ("WRONG RESULT for {}".format(msg))
236 # for TestPlc.slice_mapper__tasks
237 # check that /vservers/<> is present/deleted
238 def slice_fs_present__tasks (self, options):
239 "checks that /vservers/<slicename> exists on the filesystem"
240 return self.check_rootfs_tasks(options, expected=True)
241 def slice_fs_deleted__tasks (self, options):
242 "checks that /vservers/<slicename> has been properly wiped off"
243 return self.check_rootfs_tasks (options, expected=False)
245 def check_rootfs_tasks (self, options, expected):
246 # use constant admin key
247 local_key = "keys/key_admin.rsa"
248 node_infos = self.test_plc.all_node_infos()
249 rootfs="/vservers/{}".format(self.name())
250 class CompleterTaskRootfs (CompleterTaskNodeSsh):
251 def __init__ (self, nodename, qemuname):
252 CompleterTaskNodeSsh.__init__(self, nodename, qemuname, local_key, expected=expected,
253 command="ls -d {}".format(rootfs))
254 def failure_epilogue (self):
256 print("Could not stat {} - was expected to be present".format(rootfs))
258 print("Sliver rootfs {} still present - this is unexpected".format(rootfs))
259 utils.system(self.test_ssh.actual_command("ls -l {rootfs}; du -hs {rootfs}".format(**locals()),
260 dry_run=self.dry_run, timeout=20))
261 return [ CompleterTaskRootfs (nodename, qemuname) for (nodename,qemuname) in node_infos ]