rename NovaShell to Shell. cleanup
[sfa.git] / sfa / openstack / nova_driver.py
1 import time
2 import datetime
3
4 from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
5     RecordNotFound, SfaNotImplemented, SliverDoesNotExist, \
6     SfaInvalidArgument
7
8 from sfa.util.sfalogging import logger
9 from sfa.util.defaultdict import defaultdict
10 from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
11 from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf, urn_to_sliver_id
12 from sfa.planetlab.plxrn import PlXrn
13 from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename
14 from sfa.util.cache import Cache
15 from sfa.trust.credential import Credential
16 # used to be used in get_ticket
17 #from sfa.trust.sfaticket import SfaTicket
18
19 from sfa.rspecs.version_manager import VersionManager
20 from sfa.rspecs.rspec import RSpec
21
22 # the driver interface, mostly provides default behaviours
23 from sfa.managers.driver import Driver
24 from sfa.openstack.nova_shell import NovaShell
25 from sfa.openstack.euca_shell import EucaShell
26 from sfa.openstack.osaggregate import OSAggregate
27 from sfa.planetlab.plslices import PlSlices
28 from sfa.util.osxrn import OSXrn
29
30
31 def list_to_dict(recs, key):
32     """
33     convert a list of dictionaries into a dictionary keyed on the 
34     specified dictionary key 
35     """
36     return dict ( [ (rec[key],rec) for rec in recs ] )
37
38 #
39 # PlShell is just an xmlrpc serverproxy where methods
40 # can be sent as-is; it takes care of authentication
41 # from the global config
42
43 class NovaDriver (Driver):
44
45     # the cache instance is a class member so it survives across incoming requests
46     cache = None
47
48     def __init__ (self, config):
49         Driver.__init__ (self, config)
50         self.shell = Shell (config)
51         self.cache=None
52         if config.SFA_AGGREGATE_CACHING:
53             if NovaDriver.cache is None:
54                 NovaDriver.cache = Cache()
55             self.cache = NovaDriver.cache
56  
57     ########################################
58     ########## registry oriented
59     ########################################
60
61     ########## disabled users 
62     def is_enabled (self, record):
63         # all records are enabled
64         return True
65
66     def augment_records_with_testbed_info (self, sfa_records):
67         return self.fill_record_info (sfa_records)
68
69     ########## 
70     def register (self, sfa_record, hrn, pub_key):
71         type = sfa_record['type']
72         
73         #pl_record = self.sfa_fields_to_pl_fields(type     dd , hrn, sfa_record)
74            
75         if type == 'slice':
76             # add slice description, name, researchers, PI 
77             name = hrn_to_os_slicename(hrn)
78             researchers = sfa_record.get('researchers', [])
79             pis = sfa_record.get('pis', [])
80             project_manager = None
81             description = sfa_record.get('description', None)
82             if pis:
83                 project_manager = Xrn(pis[0], 'user').get_leaf()
84             elif researchers:
85                 project_manager = Xrn(researchers[0], 'user').get_leaf()
86             if not project_manager:
87                 err_string = "Cannot create a project without a project manager. " + \
88                              "Please specify at least one PI or researcher for project: " + \
89                              name    
90                 raise SfaInvalidArgument(err_string)
91
92             users = [Xrn(user, 'user').get_leaf() for user in \
93                      pis + researchers]
94             self.shell.auth_manager.create_project(name, project_manager, description, users)
95
96         elif type == 'user':
97             # add person roles, projects and keys
98             name = Xrn(hrn).get_leaf()
99             self.shell.auth_manager.create_user(name)
100             projects = sfa_records.get('slices', [])
101             for project in projects:
102                 project_name = Xrn(project).get_leaf()
103                 self.shell.auth_manager.add_to_project(name, project_name)
104             keys = sfa_records.get('keys', [])
105             for key in keys:
106                 key_dict = {
107                     'user_id': name,
108                     'name': name,
109                     'public': key,
110                 }
111                 self.shell.db.key_pair_create(key_dict)       
112                   
113         return name
114         
115     ##########
116     # xxx actually old_sfa_record comes filled with plc stuff as well in the original code
117     def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
118         type = new_sfa_record['type'] 
119         
120         # new_key implemented for users only
121         if new_key and type not in [ 'user' ]:
122             raise UnknownSfaType(type)
123
124         elif type == "slice":
125             # can update project manager and description
126             name = hrn_to_os_slicename(hrn)
127             researchers = sfa_record.get('researchers', [])
128             pis = sfa_record.get('pis', [])
129             project_manager = None
130             description = sfa_record.get('description', None)
131             if pis:
132                 project_manager = Xrn(pis[0], 'user').get_leaf()
133             elif researchers:
134                 project_manager = Xrn(researchers[0], 'user').get_leaf()
135             self.shell.auth_manager.modify_project(name, project_manager, description)
136
137         elif type == "user":
138             # can techinally update access_key and secret_key,
139             # but that is not in our scope, so we do nothing.  
140             pass
141         return True
142         
143
144     ##########
145     def remove (self, sfa_record):
146         type=sfa_record['type']
147         if type == 'user':
148             name = Xrn(sfa_record['hrn']).get_leaf()     
149             if self.shell.auth_manager.get_user(name):
150                 self.shell.auth_manager.delete_user(name)
151         elif type == 'slice':
152             name = hrn_to_os_slicename(sfa_record['hrn'])     
153             if self.shell.auth_manager.get_project(name):
154                 self.shell.auth_manager.delete_project(name)
155         return True
156
157
158     ####################
159     def fill_record_info(self, records):
160         """
161         Given a (list of) SFA record, fill in the PLC specific 
162         and SFA specific fields in the record. 
163         """
164         if not isinstance(records, list):
165             records = [records]
166
167         for record in records:
168             os_record = None
169             if record['type'] == 'user':
170                 name = Xrn(record['hrn']).get_leaf()
171                 os_record = self.shell.auth_manager.get_user(name)
172                 projects = self.shell.db.project_get_by_user(name)
173                 record['slices'] = [self.hrn + "." + proj.name for \
174                                     proj in projects]
175                 record['roles'] = self.shell.db.user_get_roles(name)
176                 keys = self.shell.db.key_pair_get_all_by_user(name)
177                 record['keys'] = [key.public_key for key in keys]     
178             elif record['type'] == 'slice':
179                 name = hrn_to_os_slicename(record['hrn']) 
180                 os_record = self.shell.auth_manager.get_project(name)
181                 record['description'] = os_record.description
182                 record['PI'] = [self.hrn + "." + os_record.project_manager.name]
183                 record['geni_creator'] = record['PI'] 
184                 record['researcher'] = [self.hrn + "." + user for \
185                                          user in os_record.member_ids]
186             else:
187                 continue
188             record['geni_urn'] = hrn_to_urn(record['hrn'], record['type'])
189             record['geni_certificate'] = record['gid'] 
190             record['name'] = os_record.name
191             #if os_record.created_at is not None:    
192             #    record['date_created'] = datetime_to_string(utcparse(os_record.created_at))
193             #if os_record.updated_at is not None:
194             #    record['last_updated'] = datetime_to_string(utcparse(os_record.updated_at))
195  
196         return records
197
198
199     ####################
200     # plcapi works by changes, compute what needs to be added/deleted
201     def update_relation (self, subject_type, target_type, subject_id, target_ids):
202         # hard-wire the code for slice/user for now, could be smarter if needed
203         if subject_type =='slice' and target_type == 'user':
204             subject=self.shell.project_get(subject_id)[0]
205             current_target_ids = [user.name for user in subject.members]
206             add_target_ids = list ( set (target_ids).difference(current_target_ids))
207             del_target_ids = list ( set (current_target_ids).difference(target_ids))
208             logger.debug ("subject_id = %s (type=%s)"%(subject_id,type(subject_id)))
209             for target_id in add_target_ids:
210                 self.shell.project_add_member(target_id,subject_id)
211                 logger.debug ("add_target_id = %s (type=%s)"%(target_id,type(target_id)))
212             for target_id in del_target_ids:
213                 logger.debug ("del_target_id = %s (type=%s)"%(target_id,type(target_id)))
214                 self.shell.project_remove_member(target_id, subject_id)
215         else:
216             logger.info('unexpected relation to maintain, %s -> %s'%(subject_type,target_type))
217
218         
219     ########################################
220     ########## aggregate oriented
221     ########################################
222
223     def testbed_name (self): return "openstack"
224
225     # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
226     def aggregate_version (self):
227         version_manager = VersionManager()
228         ad_rspec_versions = []
229         request_rspec_versions = []
230         for rspec_version in version_manager.versions:
231             if rspec_version.content_type in ['*', 'ad']:
232                 ad_rspec_versions.append(rspec_version.to_dict())
233             if rspec_version.content_type in ['*', 'request']:
234                 request_rspec_versions.append(rspec_version.to_dict()) 
235         return {
236             'testbed':self.testbed_name(),
237             'geni_request_rspec_versions': request_rspec_versions,
238             'geni_ad_rspec_versions': ad_rspec_versions,
239             }
240
241     def list_slices (self, creds, options):
242         # look in cache first
243         if self.cache:
244             slices = self.cache.get('slices')
245             if slices:
246                 logger.debug("OpenStackDriver.list_slices returns from cache")
247                 return slices
248     
249         # get data from db
250         projs = self.shell.auth_manager.get_projects()
251         slice_urns = [OSXrn(proj.name, 'slice').urn for proj in projs] 
252     
253         # cache the result
254         if self.cache:
255             logger.debug ("OpenStackDriver.list_slices stores value in cache")
256             self.cache.add('slices', slice_urns) 
257     
258         return slice_urns
259         
260     # first 2 args are None in case of resource discovery
261     def list_resources (self, slice_urn, slice_hrn, creds, options):
262         cached_requested = options.get('cached', True) 
263     
264         version_manager = VersionManager()
265         # get the rspec's return format from options
266         rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
267         version_string = "rspec_%s" % (rspec_version)
268     
269         #panos adding the info option to the caching key (can be improved)
270         if options.get('info'):
271             version_string = version_string + "_"+options.get('info', 'default')
272     
273         # look in cache first
274         if cached_requested and self.cache and not slice_hrn:
275             rspec = self.cache.get(version_string)
276             if rspec:
277                 logger.debug("OpenStackDriver.ListResources: returning cached advertisement")
278                 return rspec 
279     
280         #panos: passing user-defined options
281         #print "manager options = ",options
282         aggregate = OSAggregate(self)
283         rspec =  aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version, 
284                                      options=options)
285     
286         # cache the result
287         if self.cache and not slice_hrn:
288             logger.debug("OpenStackDriver.ListResources: stores advertisement in cache")
289             self.cache.add(version_string, rspec)
290     
291         return rspec
292     
293     def sliver_status (self, slice_urn, slice_hrn):
294         # find out where this slice is currently running
295         project_name = hrn_to_os_slicename(slice_hrn)
296         project = self.shell.auth_manager.get_project(project_name)
297         instances = self.shell.db.instance_get_all_by_project(project_name)
298         if len(instances) == 0:
299             raise SliverDoesNotExist("You have not allocated any slivers here") 
300         
301         result = {}
302         top_level_status = 'unknown'
303         if instances:
304             top_level_status = 'ready'
305         result['geni_urn'] = slice_urn
306         result['plos_login'] = 'root' 
307         result['plos_expires'] = None
308         
309         resources = []
310         for instance in instances:
311             res = {}
312             # instances are accessed by ip, not hostname. We need to report the ip
313             # somewhere so users know where to ssh to.     
314             res['plos_hostname'] = instance.hostname
315             res['plos_created_at'] = datetime_to_string(utcparse(instance.created_at))    
316             res['plos_boot_state'] = instance.vm_state
317             res['plos_sliver_type'] = instance.instance_type.name 
318             sliver_id =  Xrn(slice_urn).get_sliver_id(instance.project_id, \
319                                                       instance.hostname, instance.id)
320             res['geni_urn'] = sliver_id
321
322             if instance.vm_state == 'running':
323                 res['boot_state'] = 'ready';
324             else:
325                 res['boot_state'] = 'unknown'  
326             resources.append(res)
327             
328         result['geni_status'] = top_level_status
329         result['geni_resources'] = resources
330         return result
331
332     def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
333
334         aggregate = OSAggregate(self)
335         rspec = RSpec(rspec_string)
336         instance_name = hrn_to_os_slicename(slice_hrn)
337        
338         # assume first user is the caller and use their context
339         # for the ec2/euca api connection. Also, use the first users
340         # key as the project key.
341         key_name = None
342         if len(users) > 1:
343             key_name = aggregate.create_instance_key(slice_hrn, users[0])
344
345         # collect public keys
346         pubkeys = []
347         for user in users:
348             pubkeys.extend(user['keys'])
349            
350         aggregate.run_instances(instance_name, rspec_string, key_name, pubkeys)    
351    
352         return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
353
354     def delete_sliver (self, slice_urn, slice_hrn, creds, options):
355         aggregate = OSAggregate(self)
356         project_name = hrn_to_os_slicename(slice_hrn)
357         return aggregate.delete_instances(project_name)   
358
359     def update_sliver(self, slice_urn, slice_hrn, rspec, creds, options):
360         name = hrn_to_os_slicename(slice_hrn)
361         aggregate = OSAggregate(self)
362         return aggregate.update_instances(name)
363     
364     def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
365         return True
366
367     def start_slice (self, slice_urn, slice_hrn, creds):
368         return 1
369
370     def stop_slice (self, slice_urn, slice_hrn, creds):
371         name = OSXrn(xrn=slice_urn).name
372         aggregate = OSAggregate(self)
373         return aggregate.stop_instances(name) 
374
375     def reset_slice (self, slice_urn, slice_hrn, creds):
376         raise SfaNotImplemented ("reset_slice not available at this interface")
377     
378     # xxx this code is quite old and has not run for ages
379     # it is obviously totally broken and needs a rewrite
380     def get_ticket (self, slice_urn, slice_hrn, creds, rspec_string, options):
381         raise SfaNotImplemented,"OpenStackDriver.get_ticket needs a rewrite"
382 # please keep this code for future reference
383 #        slices = PlSlices(self)
384 #        peer = slices.get_peer(slice_hrn)
385 #        sfa_peer = slices.get_sfa_peer(slice_hrn)
386 #    
387 #        # get the slice record
388 #        credential = api.getCredential()
389 #        interface = api.registries[api.hrn]
390 #        registry = api.server_proxy(interface, credential)
391 #        records = registry.Resolve(xrn, credential)
392 #    
393 #        # make sure we get a local slice record
394 #        record = None
395 #        for tmp_record in records:
396 #            if tmp_record['type'] == 'slice' and \
397 #               not tmp_record['peer_authority']:
398 #    #Error (E0602, GetTicket): Undefined variable 'SliceRecord'
399 #                slice_record = SliceRecord(dict=tmp_record)
400 #        if not record:
401 #            raise RecordNotFound(slice_hrn)
402 #        
403 #        # similar to CreateSliver, we must verify that the required records exist
404 #        # at this aggregate before we can issue a ticket
405 #        # parse rspec
406 #        rspec = RSpec(rspec_string)
407 #        requested_attributes = rspec.version.get_slice_attributes()
408 #    
409 #        # ensure site record exists
410 #        site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer)
411 #        # ensure slice record exists
412 #        slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer)
413 #        # ensure person records exists
414 #    # xxx users is undefined in this context
415 #        persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer)
416 #        # ensure slice attributes exists
417 #        slices.verify_slice_attributes(slice, requested_attributes)
418 #        
419 #        # get sliver info
420 #        slivers = slices.get_slivers(slice_hrn)
421 #    
422 #        if not slivers:
423 #            raise SliverDoesNotExist(slice_hrn)
424 #    
425 #        # get initscripts
426 #        initscripts = []
427 #        data = {
428 #            'timestamp': int(time.time()),
429 #            'initscripts': initscripts,
430 #            'slivers': slivers
431 #        }
432 #    
433 #        # create the ticket
434 #        object_gid = record.get_gid_object()
435 #        new_ticket = SfaTicket(subject = object_gid.get_subject())
436 #        new_ticket.set_gid_caller(api.auth.client_gid)
437 #        new_ticket.set_gid_object(object_gid)
438 #        new_ticket.set_issuer(key=api.key, subject=self.hrn)
439 #        new_ticket.set_pubkey(object_gid.get_pubkey())
440 #        new_ticket.set_attributes(data)
441 #        new_ticket.set_rspec(rspec)
442 #        #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
443 #        new_ticket.encode()
444 #        new_ticket.sign()
445 #    
446 #        return new_ticket.save_to_string(save_parents=True)