a436ecd5d5e1287ebe051fa70c9b525dd620b5a7
[tests.git] / system / TestSlice.py
1 # -*- python3 -*-
2 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
3 # Copyright (C) 2015 INRIA 
4 #
5 import utils
6 import os, os.path
7 from datetime import datetime, timedelta
8 import time
9
10 from TestKey import TestKey
11 from TestUser import TestUser
12 from TestNode import TestNode, CompleterTaskNodeSsh
13 from TestSsh import TestSsh
14 from Completer import CompleterTask
15
16 class CompleterTaskSliceSsh (CompleterTask):
17
18     def __init__ (self, test_plc, hostname, slicename, private_key, command, expected, dry_run):
19         self.test_plc = test_plc
20         self.hostname = hostname
21         self.slicename = slicename
22         self.private_key = private_key
23         self.command = command
24         self.dry_run = dry_run
25         self.expected = expected
26
27     def run (self, silent): 
28         site_spec, node_spec = self.test_plc.locate_hostname(self.hostname)
29         test_ssh = TestSsh (self.hostname, key=self.private_key, username=self.slicename)
30         full_command = test_ssh.actual_command(self.command)
31         retcod = utils.system (full_command, silent=silent, timeout=10)
32         if self.dry_run:        return True
33         if self.expected:       return retcod == 0
34         else:                   return retcod != 0
35
36     def failure_epilogue (self):
37         if self.expected:
38             print("Could not ssh into sliver {}@{}".format(self.slicename, self.hostname))
39         else:
40             print("Could still ssh into sliver{}@{} (that was expected to be down)"\
41                 .format(self.slicename, self.hostname))
42
43 class TestSlice:
44
45     def __init__ (self, test_plc, test_site, slice_spec):
46         self.test_plc = test_plc
47         self.test_site = test_site
48         self.slice_spec = slice_spec
49         self.test_ssh = TestSsh(self.test_plc.test_ssh)
50         
51     def name(self):
52         return self.slice_spec['slice_fields']['name']
53     
54     def get_slice(self, slice_name):
55         for slice_spec in self.test_plc.plc_spec['slices']:
56             if slice_spec['slice_fields']['name'] == slice_name:
57                 return slice_spec
58
59     def owner_auth(self):
60         owner_spec = self.test_site.locate_user(self.slice_spec['owner'])
61         return TestUser(self, self.test_site, owner_spec).auth()
62
63     def slice_name (self):
64         return self.slice_spec['slice_fields']['name']
65
66     # init slice with people, and then add nodes 
67     def create_slice(self):
68         auth = self.owner_auth()
69         slice_fields = self.slice_spec['slice_fields']
70         slice_name = slice_fields['name']
71         utils.header("Creating slice {}".format(slice_name))
72         self.test_plc.apiserver.AddSlice(auth, slice_fields)
73         for username in self.slice_spec['usernames']:
74                 user_spec = self.test_site.locate_user(username)
75                 test_user = TestUser(self, self.test_site, user_spec)
76                 self.test_plc.apiserver.AddPersonToSlice(auth, test_user.name(), slice_name)
77         # add initscript code or name as appropriate
78         if 'initscriptcode' in self.slice_spec:
79             iscode = self.slice_spec['initscriptcode']
80             utils.header("Adding initscript code {} in {}".format(iscode, slice_name))
81             self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,
82                                                 'initscript_code', iscode)
83         elif 'initscriptname' in self.slice_spec:
84             isname = self.slice_spec['initscriptname']
85             utils.header("Adding initscript name {} in {}".format(isname, slice_name))
86             self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,
87                                                 'initscript', isname)
88         if 'omf-friendly' in self.slice_spec:
89             utils.header("Making slice {} OMF-friendly".format(slice_name))
90             self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name, 'vref', 'omf')
91             self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name, 'omf_control', 'yes')
92 # setting vref directly like this was useful for multi-arch tests long ago - see wifilab
93 # however this should rather use other tags by now, so we drop this for now
94 #        if self.slice_spec.has_key ('vref'):
95 #            vref_value = self.slice_spec['vref']
96 #            self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'vref',vref_value)
97         # epilogue
98         self.add_nodes()
99
100     def check_vsys_defaults (self, options, *args, **kwds):
101         "check vsys tags match PLC_VSYS_DEFAULTS"
102         auth = self.owner_auth()
103         slice_fields = self.slice_spec['slice_fields']
104         slice_name = slice_fields['name']
105         vsys_tags = self.test_plc.apiserver.GetSliceTags (auth, {'tagname' : 'vsys', 'name' : slice_name})
106         values = [st['value'] for st in vsys_tags]
107         expected = self.test_plc.plc_spec['expected_vsys_tags']
108         result = set(values) == set(expected)
109         if not result:
110             print('Check vsys defaults with slice {}'.format(slice_name))
111             print('Expected {}'.format(expected))
112             print('Got {}'.format(values))
113         return result
114
115     # just add the nodes and handle tags
116     def add_nodes (self):
117         auth = self.owner_auth()
118         slice_name = self.slice_name()
119         hostnames=[]
120         for nodename in self.slice_spec['nodenames']:
121             node_spec=self.test_site.locate_node(nodename)
122             test_node=TestNode(self.test_plc, self.test_site, node_spec)
123             hostnames += [test_node.name()]
124         utils.header("Adding {} in {}".format(hostnames, slice_name))
125         self.test_plc.apiserver.AddSliceToNodes(auth, slice_name, hostnames)
126         
127     # trash the slice altogether
128     def delete_slice(self):
129         auth = self.owner_auth()
130         slice_name = self.slice_name()
131         utils.header("Deleting slice {}".format(slice_name))
132         self.test_plc.apiserver.DeleteSlice(auth, slice_name)
133
134     # keep the slice alive and just delete nodes
135     def delete_nodes (self):
136         auth = self.owner_auth()
137         slice_name = self.slice_name()
138         print('retrieving slice {}'.format(slice_name))
139         slice=self.test_plc.apiserver.GetSlices(auth, slice_name)[0]
140         node_ids=slice['node_ids']
141         utils.header ("Deleting {} nodes from slice {}"\
142                       .format(len(node_ids), slice_name))
143         self.test_plc.apiserver.DeleteSliceFromNodes (auth, slice_name, node_ids)
144
145     def locate_private_key(self):
146         key_names=[]
147         for username in self.slice_spec['usernames']:
148             user_spec=self.test_site.locate_user(username)
149             key_names += user_spec['key_names']
150         return self.test_plc.locate_private_key_from_key_names (key_names)
151
152     # for TestPlc.slice_mapper__tasks
153     # i.e. returns a list of CompleterTasks that are merged into the same Completer run
154     # to avoid waiting for as many slices as the Plc has
155     # also the __doc__ lines are used for the TestPlc methods, e.g. just 'ssh_slice'
156     def ssh_slice__tasks (self, options, *args, **kwds):
157         "tries to ssh-enter the slice with the user key, to check for slice creation"
158         return self.ssh_tasks(options, expected=True, *args, **kwds)
159
160     # when we expect the slice is not reachable
161     def ssh_slice_off__tasks (self, options, *args, **kwds):
162         "tries to ssh-enter the slice with the user key, expecting it to be unreachable"
163         return self.ssh_tasks(options, expected=False, *args, **kwds)
164
165     def ssh_tasks(self,options, expected=True, command=None):
166 #                     timeout_minutes=20, silent_minutes=10, period_seconds=15):
167 #        timeout  = timedelta(minutes=timeout_minutes)
168 #        graceout = timedelta(minutes=silent_minutes)
169 #        period   = timedelta(seconds=period_seconds)
170         if not command:
171             command = "echo hostname ; hostname; echo id; id; echo uname -a ; uname -a"
172         # locate a key
173         private_key = self.locate_private_key()
174         if not private_key :
175             utils.header("WARNING: Cannot find a valid key for slice {}".format(self.name()))
176             return False
177
178         # convert nodenames to real hostnames
179         if expected:    msg="ssh slice access enabled"
180         else:           msg="ssh slice access disabled"
181         utils.header("checking for {} -- slice {}".format(msg, self.name()))
182
183         tasks=[]
184         slicename=self.name()
185         dry_run = getattr(options, 'dry_run', False)
186         for nodename in self.slice_spec['nodenames']:
187             site_spec, node_spec = self.test_plc.locate_node(nodename)
188             tasks.append( CompleterTaskSliceSsh(self.test_plc, node_spec['node_fields']['hostname'],
189                                                 slicename, private_key, command, expected, dry_run))
190         return tasks
191
192     def ssh_slice_basics (self, options, *args, **kwds):
193         "the slice is expected to be UP and we just check a few simple sanity commands, including 'ps' to check for /proc"
194         overall = True
195         if not self.do_ssh_slice_once(options, expected=True,  command='true'): overall=False
196         if not self.do_ssh_slice_once(options, expected=False, command='false'): overall=False
197         if not self.do_ssh_slice_once(options, expected=False, command='someimprobablecommandname'): overall=False
198         if not self.do_ssh_slice_once(options, expected=True,  command='ps'): overall=False
199         if not self.do_ssh_slice_once(options, expected=False, command='ls /vservers'): overall=False
200         return overall
201
202     # pick just one nodename and runs the ssh command once
203     def do_ssh_slice_once(self, options, command, expected):
204         # locate a key
205         private_key=self.locate_private_key()
206         if not private_key :
207             utils.header("WARNING: Cannot find a valid key for slice {}".format(self.name()))
208             return False
209
210         # convert nodenames to real hostnames
211         slice_spec = self.slice_spec
212         nodename=slice_spec['nodenames'][0]
213         site_spec, node_spec = self.test_plc.locate_node(nodename)
214         hostname=node_spec['node_fields']['hostname']
215
216         if expected:
217             msg="{} to return TRUE from ssh".format(command)
218         else:
219             msg="{} to return FALSE from ssh".format(command)
220             
221         utils.header("checking {} -- slice {} on node {}".format(msg, self.name(), hostname))
222         site_spec, node_spec = self.test_plc.locate_hostname(hostname)
223         test_ssh = TestSsh (hostname, key=private_key, username=self.name())
224         full_command = test_ssh.actual_command(command)
225         retcod = utils.system (full_command, silent=True, timeout=10)
226         if getattr(options, 'dry_run', None):
227             return True
228         if expected:
229             success = retcod==0
230         else:
231             success = retcod!=0
232         if not success:
233             utils.header ("WRONG RESULT for {}".format(msg))
234         return success
235
236     # for TestPlc.slice_mapper__tasks
237     # check that /vservers/<> is present/deleted
238     def slice_fs_present__tasks (self, options): 
239         "checks that /vservers/<slicename> exists on the filesystem"
240         return self.check_rootfs_tasks(options, expected=True)
241     def slice_fs_deleted__tasks (self, options): 
242         "checks that /vservers/<slicename> has been properly wiped off"
243         return self.check_rootfs_tasks (options, expected=False)
244
245     def check_rootfs_tasks (self, options, expected):
246         # use constant admin key
247         local_key = "keys/key_admin.rsa"
248         node_infos = self.test_plc.all_node_infos()
249         rootfs="/vservers/{}".format(self.name())
250         class CompleterTaskRootfs (CompleterTaskNodeSsh):
251             def __init__ (self, nodename, qemuname):
252                 CompleterTaskNodeSsh.__init__(self, nodename, qemuname, local_key, expected=expected,
253                                               command="ls -d {}".format(rootfs))
254             def failure_epilogue (self):
255                 if expected:
256                     print("Could not stat {} - was expected to be present".format(rootfs))
257                 else:
258                     print("Sliver rootfs {} still present - this is unexpected".format(rootfs))
259                     utils.system(self.test_ssh.actual_command("ls -l {rootfs}; du -hs {rootfs}".format(**locals()),
260                                                               dry_run=self.dry_run, timeout=20))
261         return [ CompleterTaskRootfs (nodename, qemuname) for (nodename,qemuname) in node_infos ]