c165b39ba0d9721205324ab5de564a323f23d929
[tests.git] / system / TestSlice.py
1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA 
3 #
4 import utils
5 import os, os.path
6 from datetime import datetime, timedelta
7 import time
8
9 from TestKey import TestKey
10 from TestUser import TestUser
11 from TestNode import TestNode, CompleterTaskNodeSsh
12 from TestSsh import TestSsh
13 from Completer import CompleterTask
14
15 class CompleterTaskSliceSsh (CompleterTask):
16
17     def __init__ (self, test_plc, hostname, slicename, private_key,command, expected, dry_run):
18         self.test_plc = test_plc
19         self.hostname = hostname
20         self.slicename = slicename
21         self.private_key = private_key
22         self.command = command
23         self.dry_run = dry_run
24         self.expected = expected
25
26     def run (self, silent): 
27         (site_spec,node_spec) = self.test_plc.locate_hostname(self.hostname)
28         test_ssh = TestSsh (self.hostname,key=self.private_key,username=self.slicename)
29         full_command = test_ssh.actual_command(self.command)
30         retcod = utils.system (full_command, silent=silent)
31         if self.dry_run: return True
32         if self.expected:       return retcod==0
33         else:                   return retcod!=0
34
35     def failure_epilogue (self):
36         if self.expected:
37             print("Could not ssh into sliver {}@{}".format(self.slicename, self.hostname))
38         else:
39             print("Could still ssh into sliver{}@{} (that was expected to be down)"\
40                 .format(self.slicename, self.hostname))
41
42 class TestSlice:
43
44     def __init__ (self, test_plc, test_site, slice_spec):
45         self.test_plc = test_plc
46         self.test_site = test_site
47         self.slice_spec = slice_spec
48         self.test_ssh = TestSsh(self.test_plc.test_ssh)
49         
50     def name(self):
51         return self.slice_spec['slice_fields']['name']
52     
53     def get_slice(self, slice_name):
54         for slice_spec in self.test_plc.plc_spec['slices']:
55             if slice_spec['slice_fields']['name'] == slice_name:
56                 return slice_spec
57
58     def owner_auth(self):
59         owner_spec = self.test_site.locate_user(self.slice_spec['owner'])
60         return TestUser(self, self.test_site, owner_spec).auth()
61
62     def slice_name (self):
63         return self.slice_spec['slice_fields']['name']
64
65     # init slice with people, and then add nodes 
66     def create_slice(self):
67         auth = self.owner_auth()
68         slice_fields = self.slice_spec['slice_fields']
69         slice_name = slice_fields['name']
70         utils.header("Creating slice {}".format(slice_name))
71         self.test_plc.apiserver.AddSlice(auth, slice_fields)
72         for username in self.slice_spec['usernames']:
73                 user_spec = self.test_site.locate_user(username)
74                 test_user = TestUser(self,self.test_site,user_spec)
75                 self.test_plc.apiserver.AddPersonToSlice(auth, test_user.name(), slice_name)
76         # add initscript code or name as appropriate
77         if 'initscriptcode' in self.slice_spec:
78             iscode = self.slice_spec['initscriptcode']
79             utils.header("Adding initscript code {} in {}".format(iscode, slice_name))
80             self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,
81                                                 'initscript_code', iscode)
82         elif 'initscriptname' in self.slice_spec:
83             isname = self.slice_spec['initscriptname']
84             utils.header("Adding initscript name {} in {}".format(isname, slice_name))
85             self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,
86                                                 'initscript', isname)
87         if 'omf-friendly' in self.slice_spec:
88             utils.header("Making slice {} OMF-friendly".format(slice_name))
89             self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name, 'vref', 'omf')
90             self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name, 'omf_control', 'yes')
91 # setting vref directly like this was useful for multi-arch tests long ago - see wifilab
92 # however this should rather use other tags by now, so we drop this for now
93 #        if self.slice_spec.has_key ('vref'):
94 #            vref_value = self.slice_spec['vref']
95 #            self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'vref',vref_value)
96         # epilogue
97         self.add_nodes()
98
99     def check_vsys_defaults (self, options, *args, **kwds):
100         "check vsys tags match PLC_VSYS_DEFAULTS"
101         auth = self.owner_auth()
102         slice_fields = self.slice_spec['slice_fields']
103         slice_name = slice_fields['name']
104         vsys_tags = self.test_plc.apiserver.GetSliceTags (auth, {'tagname' : 'vsys', 'name' : slice_name})
105         values = [st['value'] for st in vsys_tags]
106         expected = self.test_plc.plc_spec['expected_vsys_tags']
107         result = set(values) == set(expected)
108         if not result:
109             print('Check vsys defaults with slice {}'.format(slice_name))
110             print('Expected {}'.format(expected))
111             print('Got {}'.format(values))
112         return result
113
114     # just add the nodes and handle tags
115     def add_nodes (self):
116         auth = self.owner_auth()
117         slice_name = self.slice_name()
118         hostnames=[]
119         for nodename in self.slice_spec['nodenames']:
120             node_spec=self.test_site.locate_node(nodename)
121             test_node=TestNode(self.test_plc,self.test_site,node_spec)
122             hostnames += [test_node.name()]
123         utils.header("Adding {} in {}".format(hostnames, slice_name))
124         self.test_plc.apiserver.AddSliceToNodes(auth, slice_name, hostnames)
125         
126     # trash the slice altogether
127     def delete_slice(self):
128         auth = self.owner_auth()
129         slice_name = self.slice_name()
130         utils.header("Deleting slice {}".format(slice_name))
131         self.test_plc.apiserver.DeleteSlice(auth,slice_name)
132
133     # keep the slice alive and just delete nodes
134     def delete_nodes (self):
135         auth = self.owner_auth()
136         slice_name = self.slice_name()
137         print('retrieving slice {}'.format(slice_name))
138         slice=self.test_plc.apiserver.GetSlices(auth,slice_name)[0]
139         node_ids=slice['node_ids']
140         utils.header ("Deleting {} nodes from slice {}"\
141                       .format(len(node_ids), slice_name))
142         self.test_plc.apiserver.DeleteSliceFromNodes (auth,slice_name, node_ids)
143
144     def locate_private_key(self):
145         key_names=[]
146         for username in self.slice_spec['usernames']:
147             user_spec=self.test_site.locate_user(username)
148             key_names += user_spec['key_names']
149         return self.test_plc.locate_private_key_from_key_names (key_names)
150
151     # for TestPlc.slice_mapper__tasks
152     # i.e. returns a list of CompleterTasks that are merged into the same Completer run
153     # to avoid waiting for as many slices as the Plc has
154     # also the __doc__ lines are used for the TestPlc methods, e.g. just 'ssh_slice'
155     def ssh_slice__tasks (self, options, *args, **kwds):
156         "tries to ssh-enter the slice with the user key, to check for slice creation"
157         return self.ssh_tasks(options, expected=True, *args, **kwds)
158
159     # when we expect the slice is not reachable
160     def ssh_slice_off__tasks (self, options, *args, **kwds):
161         "tries to ssh-enter the slice with the user key, expecting it to be unreachable"
162         return self.ssh_tasks(options, expected=False, *args, **kwds)
163
164     def ssh_tasks(self,options, expected=True, command=None):
165 #                     timeout_minutes=20,silent_minutes=10,period_seconds=15):
166 #        timeout  = timedelta(minutes=timeout_minutes)
167 #        graceout = timedelta(minutes=silent_minutes)
168 #        period   = timedelta(seconds=period_seconds)
169         if not command:
170             command = "echo hostname ; hostname; echo id; id; echo uname -a ; uname -a"
171         # locate a key
172         private_key = self.locate_private_key()
173         if not private_key :
174             utils.header("WARNING: Cannot find a valid key for slice {}".format(self.name()))
175             return False
176
177         # convert nodenames to real hostnames
178         if expected:    msg="ssh slice access enabled"
179         else:           msg="ssh slice access disabled"
180         utils.header("checking for {} -- slice {}".format(msg, self.name()))
181
182         tasks=[]
183         slicename=self.name()
184         dry_run = getattr(options,'dry_run',False)
185         for nodename in self.slice_spec['nodenames']:
186             (site_spec,node_spec) = self.test_plc.locate_node(nodename)
187             tasks.append( CompleterTaskSliceSsh(self.test_plc,node_spec['node_fields']['hostname'],
188                                                 slicename,private_key,command,expected,dry_run))
189         return tasks
190
191     def ssh_slice_basics (self, options, *args, **kwds):
192         "the slice is expected to be UP and we just check a few simple sanity commands, including 'ps' to check for /proc"
193         overall=True
194         if not self.do_ssh_slice_once(options,expected=True,  command='true'): overall=False
195         if not self.do_ssh_slice_once(options,expected=False, command='false'): overall=False
196         if not self.do_ssh_slice_once(options,expected=False, command='someimprobablecommandname'): overall=False
197         if not self.do_ssh_slice_once(options,expected=True,  command='ps'): overall=False
198         if not self.do_ssh_slice_once(options,expected=False, command='ls /vservers'): overall=False
199         return overall
200
201     # pick just one nodename and runs the ssh command once
202     def do_ssh_slice_once(self,options,command,expected):
203         # locate a key
204         private_key=self.locate_private_key()
205         if not private_key :
206             utils.header("WARNING: Cannot find a valid key for slice {}".format(self.name()))
207             return False
208
209         # convert nodenames to real hostnames
210         slice_spec = self.slice_spec
211         nodename=slice_spec['nodenames'][0]
212         (site_spec,node_spec) = self.test_plc.locate_node(nodename)
213         hostname=node_spec['node_fields']['hostname']
214
215         if expected:
216             msg="{} to return TRUE from ssh".format(command)
217         else:
218             msg="{} to return FALSE from ssh".format(command)
219             
220         utils.header("checking {} -- slice {} on node {}".format(msg, self.name(), hostname))
221         (site_spec,node_spec) = self.test_plc.locate_hostname(hostname)
222         test_ssh = TestSsh (hostname,key=private_key,username=self.name())
223         full_command = test_ssh.actual_command(command)
224         retcod = utils.system (full_command,silent=True)
225         if getattr(options,'dry_run',None):
226             return True
227         if expected:
228             success = retcod==0
229         else:
230             success = retcod!=0
231         if not success:
232             utils.header ("WRONG RESULT for {}".format(msg))
233         return success
234
235     # for TestPlc.slice_mapper__tasks
236     # check that /vservers/<> is present/deleted
237     def slice_fs_present__tasks (self, options): 
238         "checks that /vservers/<slicename> exists on the filesystem"
239         return self.check_rootfs_tasks(options,expected=True)
240     def slice_fs_deleted__tasks (self, options): 
241         "checks that /vservers/<slicename> has been properly wiped off"
242         return self.check_rootfs_tasks (options,expected=False)
243
244     def check_rootfs_tasks (self, options, expected):
245         # use constant admin key
246         local_key = "keys/key_admin.rsa"
247         node_infos = self.test_plc.all_node_infos()
248         rootfs="/vservers/{}".format(self.name())
249         class CompleterTaskRootfs (CompleterTaskNodeSsh):
250             def __init__ (self, nodename, qemuname):
251                 CompleterTaskNodeSsh.__init__(self,nodename, qemuname, local_key, expected=expected,
252                                               command="ls -d {}".format(rootfs))
253             def failure_epilogue (self):
254                 if expected:
255                     print("Could not stat {} - was expected to be present".format(rootfs))
256                 else:
257                     print("Sliver rootfs {} still present - this is unexpected".format(rootfs))
258                     utils.system(self.test_ssh.actual_command("ls -l {rootfs}; du -hs {rootfs}".format(**locals()),
259                                                               dry_run=self.dry_run))
260         return [ CompleterTaskRootfs (nodename, qemuname) for (nodename,qemuname) in node_infos ]