turn off testing of the omf feature that has been removed
[tests.git] / system / TestSlice.py
1 # -*- python3 -*-
2 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
3 # Copyright (C) 2015 INRIA 
4 #
5 import utils
6 import os, os.path
7 from datetime import datetime, timedelta
8 import time
9
10 from TestKey import TestKey
11 from TestUser import TestUser
12 from TestNode import TestNode, CompleterTaskNodeSsh
13 from TestSsh import TestSsh
14 from Completer import CompleterTask
15
16 class CompleterTaskSliceSsh (CompleterTask):
17
18     def __init__ (self, test_plc, hostname, slicename, private_key, command, expected, dry_run):
19         self.test_plc = test_plc
20         self.hostname = hostname
21         self.slicename = slicename
22         self.private_key = private_key
23         self.command = command
24         self.dry_run = dry_run
25         self.expected = expected
26
27     def run (self, silent): 
28         site_spec, node_spec = self.test_plc.locate_hostname(self.hostname)
29         test_ssh = TestSsh (self.hostname, key=self.private_key, username=self.slicename)
30         full_command = test_ssh.actual_command(self.command)
31         retcod = utils.system (full_command, silent=silent, timeout=10)
32         if self.dry_run:        return True
33         if self.expected:       return retcod == 0
34         else:                   return retcod != 0
35
36     def failure_epilogue (self):
37         if self.expected:
38             print("Could not ssh into sliver {}@{}".format(self.slicename, self.hostname))
39         else:
40             print("Could still ssh into sliver{}@{} (that was expected to be down)"\
41                 .format(self.slicename, self.hostname))
42
43 class TestSlice:
44
45     def __init__ (self, test_plc, test_site, slice_spec):
46         self.test_plc = test_plc
47         self.test_site = test_site
48         self.slice_spec = slice_spec
49         self.test_ssh = TestSsh(self.test_plc.test_ssh)
50         
51     def name(self):
52         return self.slice_spec['slice_fields']['name']
53     
54     def get_slice(self, slice_name):
55         for slice_spec in self.test_plc.plc_spec['slices']:
56             if slice_spec['slice_fields']['name'] == slice_name:
57                 return slice_spec
58
59     def owner_auth(self):
60         owner_spec = self.test_site.locate_user(self.slice_spec['owner'])
61         return TestUser(self, self.test_site, owner_spec).auth()
62
63     def slice_name (self):
64         return self.slice_spec['slice_fields']['name']
65
66     # init slice with people, and then add nodes 
67     def create_slice(self):
68         auth = self.owner_auth()
69         slice_fields = self.slice_spec['slice_fields']
70         slice_name = slice_fields['name']
71         utils.header("Creating slice {}".format(slice_name))
72         self.test_plc.apiserver.AddSlice(auth, slice_fields)
73         for username in self.slice_spec['usernames']:
74                 user_spec = self.test_site.locate_user(username)
75                 test_user = TestUser(self, self.test_site, user_spec)
76                 self.test_plc.apiserver.AddPersonToSlice(auth, test_user.name(), slice_name)
77         # add initscript code or name as appropriate
78         if 'initscriptcode' in self.slice_spec:
79             iscode = self.slice_spec['initscriptcode']
80             utils.header("Adding initscript code {} in {}".format(iscode, slice_name))
81             self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,
82                                                 'initscript_code', iscode)
83         elif 'initscriptname' in self.slice_spec:
84             isname = self.slice_spec['initscriptname']
85             utils.header("Adding initscript name {} in {}".format(isname, slice_name))
86             self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,
87                                                 'initscript', isname)
88 # omf-friendly slices is a deprecated feature
89 #        if 'omf-friendly' in self.slice_spec:
90 #            utils.header("Making slice {} OMF-friendly".format(slice_name))
91 #            self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name, 'vref', 'omf')
92 #            self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name, 'omf_control', 'yes')
93 #
94 # setting vref directly like this was useful for multi-arch tests long ago - see wifilab
95 # however this should rather use other tags by now, so we drop this for now
96 #        if self.slice_spec.has_key ('vref'):
97 #            vref_value = self.slice_spec['vref']
98 #            self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'vref',vref_value)
99         # epilogue
100         self.add_nodes()
101
102     def check_vsys_defaults (self, options, *args, **kwds):
103         "check vsys tags match PLC_VSYS_DEFAULTS"
104         auth = self.owner_auth()
105         slice_fields = self.slice_spec['slice_fields']
106         slice_name = slice_fields['name']
107         vsys_tags = self.test_plc.apiserver.GetSliceTags (auth, {'tagname' : 'vsys', 'name' : slice_name})
108         values = [st['value'] for st in vsys_tags]
109         expected = self.test_plc.plc_spec['expected_vsys_tags']
110         result = set(values) == set(expected)
111         if not result:
112             print('Check vsys defaults with slice {}'.format(slice_name))
113             print('Expected {}'.format(expected))
114             print('Got {}'.format(values))
115         return result
116
117     # just add the nodes and handle tags
118     def add_nodes (self):
119         auth = self.owner_auth()
120         slice_name = self.slice_name()
121         hostnames=[]
122         for nodename in self.slice_spec['nodenames']:
123             node_spec=self.test_site.locate_node(nodename)
124             test_node=TestNode(self.test_plc, self.test_site, node_spec)
125             hostnames += [test_node.name()]
126         utils.header("Adding {} in {}".format(hostnames, slice_name))
127         self.test_plc.apiserver.AddSliceToNodes(auth, slice_name, hostnames)
128         
129     # trash the slice altogether
130     def delete_slice(self):
131         auth = self.owner_auth()
132         slice_name = self.slice_name()
133         utils.header("Deleting slice {}".format(slice_name))
134         self.test_plc.apiserver.DeleteSlice(auth, slice_name)
135
136     # keep the slice alive and just delete nodes
137     def delete_nodes (self):
138         auth = self.owner_auth()
139         slice_name = self.slice_name()
140         print('retrieving slice {}'.format(slice_name))
141         slice=self.test_plc.apiserver.GetSlices(auth, slice_name)[0]
142         node_ids=slice['node_ids']
143         utils.header ("Deleting {} nodes from slice {}"\
144                       .format(len(node_ids), slice_name))
145         self.test_plc.apiserver.DeleteSliceFromNodes (auth, slice_name, node_ids)
146
147     def locate_private_key(self):
148         key_names=[]
149         for username in self.slice_spec['usernames']:
150             user_spec=self.test_site.locate_user(username)
151             key_names += user_spec['key_names']
152         return self.test_plc.locate_private_key_from_key_names (key_names)
153
154     # for TestPlc.slice_mapper__tasks
155     # i.e. returns a list of CompleterTasks that are merged into the same Completer run
156     # to avoid waiting for as many slices as the Plc has
157     # also the __doc__ lines are used for the TestPlc methods, e.g. just 'ssh_slice'
158     def ssh_slice__tasks (self, options, *args, **kwds):
159         "tries to ssh-enter the slice with the user key, to check for slice creation"
160         return self.ssh_tasks(options, expected=True, *args, **kwds)
161
162     # when we expect the slice is not reachable
163     def ssh_slice_off__tasks (self, options, *args, **kwds):
164         "tries to ssh-enter the slice with the user key, expecting it to be unreachable"
165         return self.ssh_tasks(options, expected=False, *args, **kwds)
166
167     def ssh_tasks(self,options, expected=True, command=None):
168 #                     timeout_minutes=20, silent_minutes=10, period_seconds=15):
169 #        timeout  = timedelta(minutes=timeout_minutes)
170 #        graceout = timedelta(minutes=silent_minutes)
171 #        period   = timedelta(seconds=period_seconds)
172         if not command:
173             command = "echo hostname ; hostname; echo id; id; echo uname -a ; uname -a"
174         # locate a key
175         private_key = self.locate_private_key()
176         if not private_key :
177             utils.header("WARNING: Cannot find a valid key for slice {}".format(self.name()))
178             return False
179
180         # convert nodenames to real hostnames
181         if expected:    msg="ssh slice access enabled"
182         else:           msg="ssh slice access disabled"
183         utils.header("checking for {} -- slice {}".format(msg, self.name()))
184
185         tasks=[]
186         slicename=self.name()
187         dry_run = getattr(options, 'dry_run', False)
188         for nodename in self.slice_spec['nodenames']:
189             site_spec, node_spec = self.test_plc.locate_node(nodename)
190             tasks.append( CompleterTaskSliceSsh(self.test_plc, node_spec['node_fields']['hostname'],
191                                                 slicename, private_key, command, expected, dry_run))
192         return tasks
193
194     def ssh_slice_basics (self, options, *args, **kwds):
195         "the slice is expected to be UP and we just check a few simple sanity commands, including 'ps' to check for /proc"
196         overall = True
197         if not self.do_ssh_slice_once(options, expected=True,  command='true'): overall=False
198         if not self.do_ssh_slice_once(options, expected=False, command='false'): overall=False
199         if not self.do_ssh_slice_once(options, expected=False, command='someimprobablecommandname'): overall=False
200         if not self.do_ssh_slice_once(options, expected=True,  command='ps'): overall=False
201         if not self.do_ssh_slice_once(options, expected=False, command='ls /vservers'): overall=False
202         return overall
203
204     # pick just one nodename and runs the ssh command once
205     def do_ssh_slice_once(self, options, command, expected):
206         # locate a key
207         private_key=self.locate_private_key()
208         if not private_key :
209             utils.header("WARNING: Cannot find a valid key for slice {}".format(self.name()))
210             return False
211
212         # convert nodenames to real hostnames
213         slice_spec = self.slice_spec
214         nodename=slice_spec['nodenames'][0]
215         site_spec, node_spec = self.test_plc.locate_node(nodename)
216         hostname=node_spec['node_fields']['hostname']
217
218         if expected:
219             msg="{} to return TRUE from ssh".format(command)
220         else:
221             msg="{} to return FALSE from ssh".format(command)
222             
223         utils.header("checking {} -- slice {} on node {}".format(msg, self.name(), hostname))
224         site_spec, node_spec = self.test_plc.locate_hostname(hostname)
225         test_ssh = TestSsh (hostname, key=private_key, username=self.name())
226         full_command = test_ssh.actual_command(command)
227         retcod = utils.system (full_command, silent=True, timeout=10)
228         if getattr(options, 'dry_run', None):
229             return True
230         if expected:
231             success = retcod==0
232         else:
233             success = retcod!=0
234         if not success:
235             utils.header ("WRONG RESULT for {}".format(msg))
236         return success
237
238     # for TestPlc.slice_mapper__tasks
239     # check that /vservers/<> is present/deleted
240     def slice_fs_present__tasks (self, options): 
241         "checks that /vservers/<slicename> exists on the filesystem"
242         return self.check_rootfs_tasks(options, expected=True)
243     def slice_fs_deleted__tasks (self, options): 
244         "checks that /vservers/<slicename> has been properly wiped off"
245         return self.check_rootfs_tasks (options, expected=False)
246
247     def check_rootfs_tasks (self, options, expected):
248         # use constant admin key
249         local_key = "keys/key_admin.rsa"
250         node_infos = self.test_plc.all_node_infos()
251         rootfs="/vservers/{}".format(self.name())
252         class CompleterTaskRootfs (CompleterTaskNodeSsh):
253             def __init__ (self, nodename, qemuname):
254                 CompleterTaskNodeSsh.__init__(self, nodename, qemuname, local_key, expected=expected,
255                                               command="ls -d {}".format(rootfs))
256             def failure_epilogue (self):
257                 if expected:
258                     print("Could not stat {} - was expected to be present".format(rootfs))
259                 else:
260                     print("Sliver rootfs {} still present - this is unexpected".format(rootfs))
261                     utils.system(self.test_ssh.actual_command("ls -l {rootfs}; du -hs {rootfs}".format(**locals()),
262                                                               dry_run=self.dry_run, timeout=20))
263         return [ CompleterTaskRootfs (nodename, qemuname) for (nodename,qemuname) in node_infos ]