fix bug in get_aggregate_nodes()
[sfa.git] / sfa / openstack / nova_driver.py
1 import time
2 import datetime
3 #
4 from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
5     RecordNotFound, SfaNotImplemented, SliverDoesNotExist
6 from sfa.util.sfalogging import logger
7 from sfa.util.defaultdict import defaultdict
8 from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
9 from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf, urn_to_sliver_id
10 from sfa.util.cache import Cache
11 # used to be used in get_ticket
12 #from sfa.trust.sfaticket import SfaTicket
13 from sfa.rspecs.version_manager import VersionManager
14 from sfa.rspecs.rspec import RSpec
15 # the driver interface, mostly provides default behaviours
16 from sfa.managers.driver import Driver
17 from sfa.openstack.nova_shell import NovaShell
18 from sfa.openstack.osaggregate import OSAggregate
19 from sfa.plc.plslices import PlSlices
20 from sfa.util.osxrn import OSXrn
21
22
23 def list_to_dict(recs, key):
24     """
25     convert a list of dictionaries into a dictionary keyed on the 
26     specified dictionary key 
27     """
28     return dict ( [ (rec[key],rec) for rec in recs ] )
29
30 #
31 # PlShell is just an xmlrpc serverproxy where methods
32 # can be sent as-is; it takes care of authentication
33 # from the global config
34
35 class NovaDriver (Driver):
36
37     # the cache instance is a class member so it survives across incoming requests
38     cache = None
39
40     def __init__ (self, config):
41         Driver.__init__ (self, config)
42         self.shell = NovaShell (config)
43         self.cache=None
44         if config.SFA_AGGREGATE_CACHING:
45             if NovaDriver.cache is None:
46                 NovaDriver.cache = Cache()
47             self.cache = NovaDriver.cache
48  
49     ########################################
50     ########## registry oriented
51     ########################################
52
53     ########## disabled users 
54     def is_enabled (self, record):
55         # all records are enabled
56         return True
57
58     def augment_records_with_testbed_info (self, sfa_records):
59         return self.fill_record_info (sfa_records)
60
61     ########## 
62     def register (self, sfa_record, hrn, pub_key):
63         type = sfa_record['type']
64         pl_record = self.sfa_fields_to_pl_fields(type, hrn, sfa_record)
65
66         if type == 'slice':
67             acceptable_fields=['url', 'instantiation', 'name', 'description']
68             # add slice description, name, researchers, PI 
69             pass
70
71         elif type == 'user':
72             # add person roles, projects and keys
73             pass
74         return pointer
75         
76     ##########
77     # xxx actually old_sfa_record comes filled with plc stuff as well in the original code
78     def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
79         pointer = old_sfa_record['pointer']
80         type = old_sfa_record['type']
81
82         # new_key implemented for users only
83         if new_key and type not in [ 'user' ]:
84             raise UnknownSfaType(type)
85
86         elif type == "slice":
87             # can update description, researchers and PI
88             pass 
89         elif type == "user":
90             # can update  slices, keys and roles
91             pass
92         return True
93         
94
95     ##########
96     def remove (self, sfa_record):
97         type=sfa_record['type']
98         name = Xrn(sfa_record['hrn']).get_leaf()     
99         if type == 'user':
100             if self.shell.user_get(name):
101                 self.shell.user_delete(name)
102         elif type == 'slice':
103             if self.shell.project_get(name):
104                 self.shell.project_delete(name)
105         return True
106
107
108     ####################
109     def fill_record_info(self, records):
110         """
111         Given a (list of) SFA record, fill in the PLC specific 
112         and SFA specific fields in the record. 
113         """
114         if not isinstance(records, list):
115             records = [records]
116
117         for record in records:
118             name = Xrn(record['hrn']).get_leaf()
119             os_record = None
120             if record['type'] == 'user':
121                 os_record = self.shell.auth_manager.get_user(name)
122                 projects = self.shell.db.project_get_by_user(name)
123                 record['slices'] = [self.hrn + "." + proj.name for \
124                                     proj in projects]
125                 record['roles'] = self.shell.db.user_get_roles(name)
126                 keys = self.shell.db.key_pair_get_all_by_user(name)
127                 record['keys'] = [key.public_key for key in keys]     
128             elif record['type'] == 'slice': 
129                 os_record = self.shell.auth_manager.get_project(name)
130                 record['description'] = os_record.description
131                 record['PI'] = [self.hrn + "." + os_record.project_manager.name]
132                 record['geni_creator'] = record['PI'] 
133                 record['researcher'] = [self.hrn + "." + user for \
134                                          user in os_record.member_ids]
135             else:
136                 continue
137             record['geni_urn'] = hrn_to_urn(record['hrn'], record['type'])
138             record['geni_certificate'] = record['gid'] 
139             record['name'] = os_record.name
140             #if os_record.created_at is not None:    
141             #    record['date_created'] = datetime_to_string(utcparse(os_record.created_at))
142             #if os_record.updated_at is not None:
143             #    record['last_updated'] = datetime_to_string(utcparse(os_record.updated_at))
144  
145         return records
146
147
148     ####################
149     # plcapi works by changes, compute what needs to be added/deleted
150     def update_relation (self, subject_type, target_type, subject_id, target_ids):
151         # hard-wire the code for slice/user for now, could be smarter if needed
152         if subject_type =='slice' and target_type == 'user':
153             subject=self.shell.project_get(subject_id)[0]
154             current_target_ids = [user.name for user in subject.members]
155             add_target_ids = list ( set (target_ids).difference(current_target_ids))
156             del_target_ids = list ( set (current_target_ids).difference(target_ids))
157             logger.debug ("subject_id = %s (type=%s)"%(subject_id,type(subject_id)))
158             for target_id in add_target_ids:
159                 self.shell.project_add_member(target_id,subject_id)
160                 logger.debug ("add_target_id = %s (type=%s)"%(target_id,type(target_id)))
161             for target_id in del_target_ids:
162                 logger.debug ("del_target_id = %s (type=%s)"%(target_id,type(target_id)))
163                 self.shell.project_remove_member(target_id, subject_id)
164         else:
165             logger.info('unexpected relation to maintain, %s -> %s'%(subject_type,target_type))
166
167         
168     ########################################
169     ########## aggregate oriented
170     ########################################
171
172     def testbed_name (self): return "openstack"
173
174     # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
175     def aggregate_version (self):
176         version_manager = VersionManager()
177         ad_rspec_versions = []
178         request_rspec_versions = []
179         for rspec_version in version_manager.versions:
180             if rspec_version.content_type in ['*', 'ad']:
181                 ad_rspec_versions.append(rspec_version.to_dict())
182             if rspec_version.content_type in ['*', 'request']:
183                 request_rspec_versions.append(rspec_version.to_dict()) 
184         return {
185             'testbed':self.testbed_name(),
186             'geni_request_rspec_versions': request_rspec_versions,
187             'geni_ad_rspec_versions': ad_rspec_versions,
188             }
189
190     def list_slices (self, creds, options):
191         # look in cache first
192         if self.cache:
193             slices = self.cache.get('slices')
194             if slices:
195                 logger.debug("OpenStackDriver.list_slices returns from cache")
196                 return slices
197     
198         # get data from db
199         projs = self.shell.auth_manager.get_projects()
200         slice_urns = [OSXrn(proj.name, 'slice').urn for proj in projs] 
201     
202         # cache the result
203         if self.cache:
204             logger.debug ("OpenStackDriver.list_slices stores value in cache")
205             self.cache.add('slices', slice_urns) 
206     
207         return slice_urns
208         
209     # first 2 args are None in case of resource discovery
210     def list_resources (self, slice_urn, slice_hrn, creds, options):
211         cached_requested = options.get('cached', True) 
212     
213         version_manager = VersionManager()
214         # get the rspec's return format from options
215         rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
216         version_string = "rspec_%s" % (rspec_version)
217     
218         #panos adding the info option to the caching key (can be improved)
219         if options.get('info'):
220             version_string = version_string + "_"+options.get('info', 'default')
221     
222         # look in cache first
223         if cached_requested and self.cache and not slice_hrn:
224             rspec = self.cache.get(version_string)
225             if rspec:
226                 logger.debug("OpenStackDriver.ListResources: returning cached advertisement")
227                 return rspec 
228     
229         #panos: passing user-defined options
230         #print "manager options = ",options
231         aggregate = OSAggregate(self)
232         rspec =  aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version, 
233                                      options=options)
234     
235         # cache the result
236         if self.cache and not slice_hrn:
237             logger.debug("OpenStackDriver.ListResources: stores advertisement in cache")
238             self.cache.add(version_string, rspec)
239     
240         return rspec
241     
242     def sliver_status (self, slice_urn, slice_hrn):
243         # find out where this slice is currently running
244         slicename = hrn_to_pl_slicename(slice_hrn)
245         
246         slices = self.shell.GetSlices([slicename], ['slice_id', 'node_ids','person_ids','name','expires'])
247         if len(slices) == 0:        
248             raise SliverDoesNotExist("%s (used %s as slicename internally)" % (slice_hrn, slicename))
249         slice = slices[0]
250         
251         # report about the local nodes only
252         nodes = self.shell.GetNodes({'node_id':slice['node_ids'],'peer_id':None},
253                               ['node_id', 'hostname', 'site_id', 'boot_state', 'last_contact'])
254
255         if len(nodes) == 0:
256             raise SliverDoesNotExist("You have not allocated any slivers here") 
257
258         site_ids = [node['site_id'] for node in nodes]
259     
260         result = {}
261         top_level_status = 'unknown'
262         if nodes:
263             top_level_status = 'ready'
264         result['geni_urn'] = slice_urn
265         result['pl_login'] = slice['name']
266         result['pl_expires'] = datetime_to_string(utcparse(slice['expires']))
267         
268         resources = []
269         for node in nodes:
270             res = {}
271             res['pl_hostname'] = node['hostname']
272             res['pl_boot_state'] = node['boot_state']
273             res['pl_last_contact'] = node['last_contact']
274             if node['last_contact'] is not None:
275                 
276                 res['pl_last_contact'] = datetime_to_string(utcparse(node['last_contact']))
277             sliver_id = urn_to_sliver_id(slice_urn, slice['slice_id'], node['node_id']) 
278             res['geni_urn'] = sliver_id
279             if node['boot_state'] == 'boot':
280                 res['geni_status'] = 'ready'
281             else:
282                 res['geni_status'] = 'failed'
283                 top_level_status = 'failed' 
284                 
285             res['geni_error'] = ''
286     
287             resources.append(res)
288             
289         result['geni_status'] = top_level_status
290         result['geni_resources'] = resources
291         return result
292
293     def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
294
295         aggregate = OSAggregate(self)
296         slicename = get_leaf(slice_hrn)
297         
298         # parse rspec
299         rspec = RSpec(rspec_string)
300         requested_attributes = rspec.version.get_slice_attributes()
301         
302         # ensure slice record exists
303         slice = aggregate.verify_slice(slicename, users, options=options)
304         # ensure person records exists
305         persons = aggregate.verify_slice_users(slicename, users, options=options)
306         # add/remove slice from nodes
307         slices.verify_instances(slicename, rspec)    
308    
309         return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
310
311     def delete_sliver (self, slice_urn, slice_hrn, creds, options):
312         name = OSXrn(xrn=slice_urn).name
313         slice = self.shell.project_get(name)
314         if not slice:
315             return 1
316         
317         self.shell.DeleteSliceFromNodes(slicename, slice['node_ids'])
318         instances = self.shell.db.instance_get_all_by_project(name)
319         for instance in instances:
320             self.shell.db.instance_destroy(instance.instance_id)
321         return 1
322     
323     def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
324         return True
325
326     def start_slice (self, slice_urn, slice_hrn, creds):
327         return 1
328
329     def stop_slice (self, slice_urn, slice_hrn, creds):
330         name = OSXrn(xrn=slice_urn).name
331         slice = self.shell.get_project(name)
332         instances = self.shell.db.instance_get_all_by_project(name)
333         for instance in instances:
334             self.shell.db.instance_stop(instance.instance_id)
335         return 1
336     
337     def reset_slice (self, slice_urn, slice_hrn, creds):
338         raise SfaNotImplemented ("reset_slice not available at this interface")
339     
340     # xxx this code is quite old and has not run for ages
341     # it is obviously totally broken and needs a rewrite
342     def get_ticket (self, slice_urn, slice_hrn, creds, rspec_string, options):
343         raise SfaNotImplemented,"OpenStackDriver.get_ticket needs a rewrite"
344 # please keep this code for future reference
345 #        slices = PlSlices(self)
346 #        peer = slices.get_peer(slice_hrn)
347 #        sfa_peer = slices.get_sfa_peer(slice_hrn)
348 #    
349 #        # get the slice record
350 #        credential = api.getCredential()
351 #        interface = api.registries[api.hrn]
352 #        registry = api.server_proxy(interface, credential)
353 #        records = registry.Resolve(xrn, credential)
354 #    
355 #        # make sure we get a local slice record
356 #        record = None
357 #        for tmp_record in records:
358 #            if tmp_record['type'] == 'slice' and \
359 #               not tmp_record['peer_authority']:
360 #    #Error (E0602, GetTicket): Undefined variable 'SliceRecord'
361 #                slice_record = SliceRecord(dict=tmp_record)
362 #        if not record:
363 #            raise RecordNotFound(slice_hrn)
364 #        
365 #        # similar to CreateSliver, we must verify that the required records exist
366 #        # at this aggregate before we can issue a ticket
367 #        # parse rspec
368 #        rspec = RSpec(rspec_string)
369 #        requested_attributes = rspec.version.get_slice_attributes()
370 #    
371 #        # ensure site record exists
372 #        site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer)
373 #        # ensure slice record exists
374 #        slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer)
375 #        # ensure person records exists
376 #    # xxx users is undefined in this context
377 #        persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer)
378 #        # ensure slice attributes exists
379 #        slices.verify_slice_attributes(slice, requested_attributes)
380 #        
381 #        # get sliver info
382 #        slivers = slices.get_slivers(slice_hrn)
383 #    
384 #        if not slivers:
385 #            raise SliverDoesNotExist(slice_hrn)
386 #    
387 #        # get initscripts
388 #        initscripts = []
389 #        data = {
390 #            'timestamp': int(time.time()),
391 #            'initscripts': initscripts,
392 #            'slivers': slivers
393 #        }
394 #    
395 #        # create the ticket
396 #        object_gid = record.get_gid_object()
397 #        new_ticket = SfaTicket(subject = object_gid.get_subject())
398 #        new_ticket.set_gid_caller(api.auth.client_gid)
399 #        new_ticket.set_gid_object(object_gid)
400 #        new_ticket.set_issuer(key=api.key, subject=self.hrn)
401 #        new_ticket.set_pubkey(object_gid.get_pubkey())
402 #        new_ticket.set_attributes(data)
403 #        new_ticket.set_rspec(rspec)
404 #        #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
405 #        new_ticket.encode()
406 #        new_ticket.sign()
407 #    
408 #        return new_ticket.save_to_string(save_parents=True)