CompleterTask used to have a failure_message mechanism, then we added failure_epilogu...
[tests.git] / system / TestSlice.py
1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA 
3 #
4 import utils
5 import os, os.path
6 from datetime import datetime, timedelta
7 import time
8
9 from TestKey import TestKey
10 from TestUser import TestUser
11 from TestNode import TestNode, CompleterTaskNodeSsh
12 from TestSsh import TestSsh
13 from Completer import Completer, CompleterTask
14
15 class CompleterTaskSliceSsh (CompleterTask):
16
17     def __init__ (self, test_plc, hostname, slicename, private_key,command, expected, dry_run):
18         self.test_plc=test_plc
19         self.hostname=hostname
20         self.slicename=slicename
21         self.private_key=private_key
22         self.command=command
23         self.dry_run=dry_run
24         self.expected=expected
25     def run (self, silent): 
26         (site_spec,node_spec) = self.test_plc.locate_hostname(self.hostname)
27         test_ssh = TestSsh (self.hostname,key=self.private_key,username=self.slicename)
28         full_command = test_ssh.actual_command(self.command)
29         retcod = utils.system (full_command, silent=silent)
30         if self.dry_run: return True
31         if self.expected:       return retcod==0
32         else:                   return retcod!=0
33     def failure_epilogue (self):
34         if self.expected:
35             print "Could not ssh into sliver %s@%s"%(self.slicename,self.hostname)
36         else:
37             print "Could still ssh into sliver%s@%s (that was expected to be down)"%(self.slicename,self.hostname)
38
39 class TestSlice:
40
41     def __init__ (self,test_plc,test_site,slice_spec):
42         self.test_plc=test_plc
43         self.test_site=test_site
44         self.slice_spec=slice_spec
45         self.test_ssh=TestSsh(self.test_plc.test_ssh)
46         
47     def name(self):
48         return self.slice_spec['slice_fields']['name']
49     
50     def get_slice(self,slice_name):
51         for slice_spec in self.test_plc.plc_spec['slices']:
52             if(slice_spec['slice_fields']['name']== slice_name):
53                 return slice_spec
54
55     def owner_auth(self):
56         owner_spec = self.test_site.locate_user(self.slice_spec['owner'])
57         return TestUser(self,self.test_site,owner_spec).auth()
58
59     def slice_name (self):
60         return self.slice_spec['slice_fields']['name']
61
62     # init slice with people, and then add nodes 
63     def create_slice(self):
64         auth = self.owner_auth()
65         slice_fields = self.slice_spec['slice_fields']
66         slice_name = slice_fields['name']
67         utils.header("Creating slice %s"%slice_name)
68         self.test_plc.apiserver.AddSlice(auth,slice_fields)
69         for username in self.slice_spec['usernames']:
70                 user_spec=self.test_site.locate_user(username)
71                 test_user=TestUser(self,self.test_site,user_spec)
72                 self.test_plc.apiserver.AddPersonToSlice(auth, test_user.name(), slice_name)
73         # add initscript code or name as appropriate
74         if self.slice_spec.has_key('initscriptcode'):
75             iscode=self.slice_spec['initscriptcode']
76             utils.header("Adding initscript code %s in %s"%(iscode,slice_name))
77             self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'initscript_code',iscode)
78         elif self.slice_spec.has_key('initscriptname'):
79             isname=self.slice_spec['initscriptname']
80             utils.header("Adding initscript name %s in %s"%(isname,slice_name))
81             self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'initscript',isname)
82         if 'omf-friendly' in self.slice_spec:
83             utils.header("Making slice %s OMF-friendly"%slice_name)
84             self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'vref','omf')
85             self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'omf_control','yes')
86 # setting vref directly like this was useful for multi-arch tests long ago - see wifilab
87 # however this should rather use other tags by now, so we drop this for now
88 #        if self.slice_spec.has_key ('vref'):
89 #            vref_value=self.slice_spec['vref']
90 #            self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'vref',vref_value)
91         # epilogue
92         self.add_nodes()
93
94     def check_vsys_defaults (self, options, *args, **kwds):
95         "check vsys tags match PLC_VSYS_DEFAULTS"
96         auth = self.owner_auth()
97         slice_fields = self.slice_spec['slice_fields']
98         slice_name = slice_fields['name']
99         vsys_tags = self.test_plc.apiserver.GetSliceTags (auth,{'tagname':'vsys','name':slice_name})
100         values=[ st['value'] for st in vsys_tags ]
101         expected=self.test_plc.plc_spec['expected_vsys_tags']
102         result = set(values) == set(expected)
103         if not result:
104             print 'Check vsys defaults with slice %s'%slice_name
105             print 'Expected %s'%expected
106             print 'Got %s'%values
107         return result
108
109     # just add the nodes and handle tags
110     def add_nodes (self):
111         auth = self.owner_auth()
112         slice_name = self.slice_name()
113         hostnames=[]
114         for nodename in self.slice_spec['nodenames']:
115             node_spec=self.test_site.locate_node(nodename)
116             test_node=TestNode(self.test_plc,self.test_site,node_spec)
117             hostnames += [test_node.name()]
118         utils.header("Adding %r in %s"%(hostnames,slice_name))
119         self.test_plc.apiserver.AddSliceToNodes(auth, slice_name, hostnames)
120         
121     # trash the slice altogether
122     def delete_slice(self):
123         auth = self.owner_auth()
124         slice_name = self.slice_name()
125         utils.header("Deleting slice %s"%slice_name)
126         self.test_plc.apiserver.DeleteSlice(auth,slice_name)
127
128     # keep the slice alive and just delete nodes
129     def delete_nodes (self):
130         auth = self.owner_auth()
131         slice_name = self.slice_name()
132         print 'retrieving slice %s'%slice_name
133         slice=self.test_plc.apiserver.GetSlices(auth,slice_name)[0]
134         node_ids=slice['node_ids']
135         utils.header ("Deleting %d nodes from slice %s"%\
136                           (len(node_ids),slice_name))
137         self.test_plc.apiserver.DeleteSliceFromNodes (auth,slice_name, node_ids)
138
139     def locate_private_key(self):
140         key_names=[]
141         for username in self.slice_spec['usernames']:
142             user_spec=self.test_site.locate_user(username)
143             key_names += user_spec['key_names']
144         return self.test_plc.locate_private_key_from_key_names (key_names)
145
146     # for TestPlc.slice_mapper__tasks
147     # i.e. returns a list of CompleterTasks that are merged into the same Completer run
148     # to avoid waiting for as many slices as the Plc has
149     # also the __doc__ lines are used for the TestPlc methods, e.g. just 'ssh_slice'
150     def ssh_slice__tasks (self, options, *args, **kwds):
151         "tries to ssh-enter the slice with the user key, to check for slice creation"
152         return self.ssh_tasks(options, expected=True, *args, **kwds)
153
154     # when we expect the slice is not reachable
155     def ssh_slice_off__tasks (self, options, *args, **kwds):
156         "tries to ssh-enter the slice with the user key, expecting it to be unreachable"
157         return self.ssh_tasks(options, expected=False, *args, **kwds)
158
159     def ssh_tasks(self,options, expected=True, command=None):
160 #                     timeout_minutes=20,silent_minutes=10,period_seconds=15):
161 #        timeout  = timedelta(minutes=timeout_minutes)
162 #        graceout = timedelta(minutes=silent_minutes)
163 #        period   = timedelta(seconds=period_seconds)
164         if not command:
165             command="echo hostname ; hostname; echo id; id; echo uname -a ; uname -a"
166         # locate a key
167         private_key=self.locate_private_key()
168         if not private_key :
169             utils.header("WARNING: Cannot find a valid key for slice %s"%self.name())
170             return False
171
172         # convert nodenames to real hostnames
173         if expected:    msg="ssh slice access enabled"
174         else:           msg="ssh slice access disabled"
175         utils.header("checking for %s -- slice %s"%(msg,self.name()))
176
177         tasks=[]
178         slicename=self.name()
179         dry_run = getattr(options,'dry_run',False)
180         for nodename in self.slice_spec['nodenames']:
181             (site_spec,node_spec) = self.test_plc.locate_node(nodename)
182             tasks.append( CompleterTaskSliceSsh(self.test_plc,node_spec['node_fields']['hostname'],
183                                                 slicename,private_key,command,expected,dry_run))
184         return tasks
185 #        return Completer (tasks).run (timeout, graceout, period)
186
187     def ssh_slice_basics (self, options, *args, **kwds):
188         "the slice is expected to be UP and we just check a few simple sanity commands, including 'ps' to check for /proc"
189         overall=True
190         if not self.do_ssh_slice_once(options,expected=True,  command='true'): overall=False
191         if not self.do_ssh_slice_once(options,expected=False, command='false'): overall=False
192         if not self.do_ssh_slice_once(options,expected=False, command='someimprobablecommandname'): overall=False
193         if not self.do_ssh_slice_once(options,expected=True,  command='ps'): overall=False
194         if not self.do_ssh_slice_once(options,expected=False, command='ls /vservers'): overall=False
195         return overall
196
197     # pick just one nodename and runs the ssh command once
198     def do_ssh_slice_once(self,options,command,expected):
199         # locate a key
200         private_key=self.locate_private_key()
201         if not private_key :
202             utils.header("WARNING: Cannot find a valid key for slice %s"%self.name())
203             return False
204
205         # convert nodenames to real hostnames
206         slice_spec = self.slice_spec
207         nodename=slice_spec['nodenames'][0]
208         (site_spec,node_spec) = self.test_plc.locate_node(nodename)
209         hostname=node_spec['node_fields']['hostname']
210
211         if expected:    msg="%s to return TRUE from ssh"%command
212         else:           msg="%s to return FALSE from ssh"%command
213             
214         utils.header("checking %s -- slice %s on node %s"%(msg,self.name(),hostname))
215         (site_spec,node_spec) = self.test_plc.locate_hostname(hostname)
216         test_ssh = TestSsh (hostname,key=private_key,username=self.name())
217         full_command = test_ssh.actual_command(command)
218         retcod = utils.system (full_command,silent=True)
219         if getattr(options,'dry_run',None): return True
220         if expected:    success = retcod==0
221         else:           success = retcod!=0
222         if not success: utils.header ("WRONG RESULT for %s"%msg)
223         return success
224
225     # for TestPlc.slice_mapper__tasks
226     # check that /vservers/<> is present/deleted
227     def slice_fs_present__tasks (self, options): 
228         "checks that /vservers/<slicename> exists on the filesystem"
229         return self.check_rootfs_tasks(options,expected=True)
230     def slice_fs_deleted__tasks (self, options): 
231         "checks that /vservers/<slicename> has been properly wiped off"
232         return self.check_rootfs_tasks (options,expected=False)
233
234     def check_rootfs_tasks (self, options, expected):
235         # use constant admin key
236         local_key = "keys/key_admin.rsa"
237         node_infos = self.test_plc.all_node_infos()
238         rootfs="/vservers/%s"%self.name()
239         class CompleterTaskRootfs (CompleterTaskNodeSsh):
240             def __init__ (self, nodename, qemuname):
241                 CompleterTaskNodeSsh.__init__(self,nodename, qemuname, local_key, expected=expected,
242                                               command="ls -d %s"%rootfs)
243             def failure_epilogue (self):
244                 if expected:
245                     print "Could not stat %s - was expected to be present"%rootfs
246                 else:
247                     print "Sliver rootfs %s still present - this is unexpected"%rootfs
248                     utils.system(self.test_ssh.actual_command("ls -l %s; du -hs %s"%(rootfs,rootfs),dry_run=self.dry_run))
249         return [ CompleterTaskRootfs (nodename, qemuname) for (nodename,qemuname) in node_infos ]