renamed sfa/plc into sfa/planetlab
[sfa.git] / sfa / openstack / nova_driver.py
1 import time
2 import datetime
3
4 from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
5     RecordNotFound, SfaNotImplemented, SliverDoesNotExist, \
6     SfaInvalidArgument
7
8 from sfa.util.sfalogging import logger
9 from sfa.util.defaultdict import defaultdict
10 from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
11 from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf, urn_to_sliver_id
12 from sfa.util.cache import Cache
13 from sfa.trust.credential import Credential
14 # used to be used in get_ticket
15 #from sfa.trust.sfaticket import SfaTicket
16
17 from sfa.rspecs.version_manager import VersionManager
18 from sfa.rspecs.rspec import RSpec
19
20 # the driver interface, mostly provides default behaviours
21 from sfa.managers.driver import Driver
22 from sfa.openstack.nova_shell import NovaShell
23 from sfa.openstack.euca_shell import EucaShell
24 from sfa.openstack.osaggregate import OSAggregate
25 from sfa.planetlab.plslices import PlSlices
26 from sfa.util.osxrn import OSXrn
27
28
29 def list_to_dict(recs, key):
30     """
31     convert a list of dictionaries into a dictionary keyed on the 
32     specified dictionary key 
33     """
34     return dict ( [ (rec[key],rec) for rec in recs ] )
35
36 #
37 # PlShell is just an xmlrpc serverproxy where methods
38 # can be sent as-is; it takes care of authentication
39 # from the global config
40
41 class NovaDriver (Driver):
42
43     # the cache instance is a class member so it survives across incoming requests
44     cache = None
45
46     def __init__ (self, config):
47         Driver.__init__ (self, config)
48         self.shell = NovaShell (config)
49         self.euca_shell = EucaShell(config)
50         self.cache=None
51         if config.SFA_AGGREGATE_CACHING:
52             if NovaDriver.cache is None:
53                 NovaDriver.cache = Cache()
54             self.cache = NovaDriver.cache
55  
56     ########################################
57     ########## registry oriented
58     ########################################
59
60     ########## disabled users 
61     def is_enabled (self, record):
62         # all records are enabled
63         return True
64
65     def augment_records_with_testbed_info (self, sfa_records):
66         return self.fill_record_info (sfa_records)
67
68     ########## 
69     def register (self, sfa_record, hrn, pub_key):
70         type = sfa_record['type']
71         
72         #pl_record = self.sfa_fields_to_pl_fields(type     dd , hrn, sfa_record)
73            
74         if type == 'slice':
75             # add slice description, name, researchers, PI 
76             name = Xrn(hrn).get_leaf()
77             researchers = sfa_record.get('researchers', [])
78             pis = sfa_record.get('pis', [])
79             project_manager = None
80             description = sfa_record.get('description', None)
81             if pis:
82                 project_manager = Xrn(pis[0], 'user').get_leaf()
83             elif researchers:
84                 project_manager = Xrn(researchers[0], 'user').get_leaf()
85             if not project_manager:
86                 err_string = "Cannot create a project without a project manager. " + \
87                              "Please specify at least one PI or researcher for project: " + \
88                              name    
89                 raise SfaInvalidArgument(err_string)
90
91             users = [Xrn(user, 'user').get_leaf() for user in \
92                      pis + researchers]
93             self.shell.auth_manager.create_project(name, project_manager, description, users)
94
95         elif type == 'user':
96             # add person roles, projects and keys
97             name = Xrn(hrn).get_leaf()
98             self.shell.auth_manager.create_user(name)
99             projects = sfa_records.get('slices', [])
100             for project in projects:
101                 project_name = Xrn(project).get_leaf()
102                 self.shell.auth_manager.add_to_project(name, project_name)
103             keys = sfa_records.get('keys', [])
104             for key in keys:
105                 key_dict = {
106                     'user_id': name,
107                     'name': name,
108                     'public': key,
109                 }
110                 self.shell.db.key_pair_create(key_dict)       
111                   
112         return name
113         
114     ##########
115     # xxx actually old_sfa_record comes filled with plc stuff as well in the original code
116     def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
117         type = new_sfa_record['type'] 
118         
119         # new_key implemented for users only
120         if new_key and type not in [ 'user' ]:
121             raise UnknownSfaType(type)
122
123         elif type == "slice":
124             # can update project manager and description
125             name = Xrn(hrn).get_leaf()
126             researchers = sfa_record.get('researchers', [])
127             pis = sfa_record.get('pis', [])
128             project_manager = None
129             description = sfa_record.get('description', None)
130             if pis:
131                 project_manager = Xrn(pis[0], 'user').get_leaf()
132             elif researchers:
133                 project_manager = Xrn(researchers[0], 'user').get_leaf()
134             self.shell.auth_manager.modify_project(name, project_manager, description)
135
136         elif type == "user":
137             # can techinally update access_key and secret_key,
138             # but that is not in our scope, so we do nothing.  
139             pass
140         return True
141         
142
143     ##########
144     def remove (self, sfa_record):
145         type=sfa_record['type']
146         name = Xrn(sfa_record['hrn']).get_leaf()     
147         if type == 'user':
148             if self.shell.auth_manager.get_user(name):
149                 self.shell.auth_manager.delete_user(name)
150         elif type == 'slice':
151             if self.shell.auth_manager.get_project(name):
152                 self.shell.auth_manager.delete_project(name)
153         return True
154
155
156     ####################
157     def fill_record_info(self, records):
158         """
159         Given a (list of) SFA record, fill in the PLC specific 
160         and SFA specific fields in the record. 
161         """
162         if not isinstance(records, list):
163             records = [records]
164
165         for record in records:
166             name = Xrn(record['hrn']).get_leaf()
167             os_record = None
168             if record['type'] == 'user':
169                 os_record = self.shell.auth_manager.get_user(name)
170                 projects = self.shell.db.project_get_by_user(name)
171                 record['slices'] = [self.hrn + "." + proj.name for \
172                                     proj in projects]
173                 record['roles'] = self.shell.db.user_get_roles(name)
174                 keys = self.shell.db.key_pair_get_all_by_user(name)
175                 record['keys'] = [key.public_key for key in keys]     
176             elif record['type'] == 'slice': 
177                 os_record = self.shell.auth_manager.get_project(name)
178                 record['description'] = os_record.description
179                 record['PI'] = [self.hrn + "." + os_record.project_manager.name]
180                 record['geni_creator'] = record['PI'] 
181                 record['researcher'] = [self.hrn + "." + user for \
182                                          user in os_record.member_ids]
183             else:
184                 continue
185             record['geni_urn'] = hrn_to_urn(record['hrn'], record['type'])
186             record['geni_certificate'] = record['gid'] 
187             record['name'] = os_record.name
188             #if os_record.created_at is not None:    
189             #    record['date_created'] = datetime_to_string(utcparse(os_record.created_at))
190             #if os_record.updated_at is not None:
191             #    record['last_updated'] = datetime_to_string(utcparse(os_record.updated_at))
192  
193         return records
194
195
196     ####################
197     # plcapi works by changes, compute what needs to be added/deleted
198     def update_relation (self, subject_type, target_type, subject_id, target_ids):
199         # hard-wire the code for slice/user for now, could be smarter if needed
200         if subject_type =='slice' and target_type == 'user':
201             subject=self.shell.project_get(subject_id)[0]
202             current_target_ids = [user.name for user in subject.members]
203             add_target_ids = list ( set (target_ids).difference(current_target_ids))
204             del_target_ids = list ( set (current_target_ids).difference(target_ids))
205             logger.debug ("subject_id = %s (type=%s)"%(subject_id,type(subject_id)))
206             for target_id in add_target_ids:
207                 self.shell.project_add_member(target_id,subject_id)
208                 logger.debug ("add_target_id = %s (type=%s)"%(target_id,type(target_id)))
209             for target_id in del_target_ids:
210                 logger.debug ("del_target_id = %s (type=%s)"%(target_id,type(target_id)))
211                 self.shell.project_remove_member(target_id, subject_id)
212         else:
213             logger.info('unexpected relation to maintain, %s -> %s'%(subject_type,target_type))
214
215         
216     ########################################
217     ########## aggregate oriented
218     ########################################
219
220     def testbed_name (self): return "openstack"
221
222     # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
223     def aggregate_version (self):
224         version_manager = VersionManager()
225         ad_rspec_versions = []
226         request_rspec_versions = []
227         for rspec_version in version_manager.versions:
228             if rspec_version.content_type in ['*', 'ad']:
229                 ad_rspec_versions.append(rspec_version.to_dict())
230             if rspec_version.content_type in ['*', 'request']:
231                 request_rspec_versions.append(rspec_version.to_dict()) 
232         return {
233             'testbed':self.testbed_name(),
234             'geni_request_rspec_versions': request_rspec_versions,
235             'geni_ad_rspec_versions': ad_rspec_versions,
236             }
237
238     def list_slices (self, creds, options):
239         # look in cache first
240         if self.cache:
241             slices = self.cache.get('slices')
242             if slices:
243                 logger.debug("OpenStackDriver.list_slices returns from cache")
244                 return slices
245     
246         # get data from db
247         projs = self.shell.auth_manager.get_projects()
248         slice_urns = [OSXrn(proj.name, 'slice').urn for proj in projs] 
249     
250         # cache the result
251         if self.cache:
252             logger.debug ("OpenStackDriver.list_slices stores value in cache")
253             self.cache.add('slices', slice_urns) 
254     
255         return slice_urns
256         
257     # first 2 args are None in case of resource discovery
258     def list_resources (self, slice_urn, slice_hrn, creds, options):
259         cached_requested = options.get('cached', True) 
260     
261         version_manager = VersionManager()
262         # get the rspec's return format from options
263         rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
264         version_string = "rspec_%s" % (rspec_version)
265     
266         #panos adding the info option to the caching key (can be improved)
267         if options.get('info'):
268             version_string = version_string + "_"+options.get('info', 'default')
269     
270         # look in cache first
271         if cached_requested and self.cache and not slice_hrn:
272             rspec = self.cache.get(version_string)
273             if rspec:
274                 logger.debug("OpenStackDriver.ListResources: returning cached advertisement")
275                 return rspec 
276     
277         #panos: passing user-defined options
278         #print "manager options = ",options
279         aggregate = OSAggregate(self)
280         rspec =  aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version, 
281                                      options=options)
282     
283         # cache the result
284         if self.cache and not slice_hrn:
285             logger.debug("OpenStackDriver.ListResources: stores advertisement in cache")
286             self.cache.add(version_string, rspec)
287     
288         return rspec
289     
290     def sliver_status (self, slice_urn, slice_hrn):
291         # find out where this slice is currently running
292         project_name = Xrn(slice_urn).get_leaf()
293         project = self.shell.auth_manager.get_project(project_name)
294         instances = self.shell.db.instance_get_all_by_project(project_name)
295         if len(instances) == 0:
296             raise SliverDoesNotExist("You have not allocated any slivers here") 
297         
298         result = {}
299         top_level_status = 'unknown'
300         if instances:
301             top_level_status = 'ready'
302         result['geni_urn'] = slice_urn
303         result['plos_login'] = 'root' 
304         result['plos_expires'] = None
305         
306         resources = []
307         for instance in instances:
308             res = {}
309             # instances are accessed by ip, not hostname. We need to report the ip
310             # somewhere so users know where to ssh to.     
311             res['plos_hostname'] = instance.hostname
312             res['plos_created_at'] = datetime_to_string(utcparse(instance.created_at))    
313             res['plos_boot_state'] = instance.vm_state
314             res['plos_sliver_type'] = instance.instance_type.name 
315             sliver_id =  Xrn(slice_urn).get_sliver_id(instance.project_id, \
316                                                       instance.hostname, instance.id)
317             res['geni_urn'] = sliver_id
318
319             if instance.vm_state == 'running':
320                 res['boot_state'] = 'ready';
321             else:
322                 res['boot_state'] = 'unknown'  
323             resources.append(res)
324             
325         result['geni_status'] = top_level_status
326         result['geni_resources'] = resources
327         return result
328
329     def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
330
331         project_name = get_leaf(slice_hrn)
332         aggregate = OSAggregate(self)
333         # parse rspec
334         rspec = RSpec(rspec_string)
335        
336         # ensure project and users exist in local db
337         aggregate.create_project(project_name, users, options=options)
338      
339         # collect publick keys
340         pubkeys = []
341         project_key = None
342         for user in users:
343             pubkeys.extend(user['keys'])
344             # assume first user is the caller and use their context
345             # for the ec2/euca api connection. Also, use the first users
346             # key as the project key.   
347             if not project_key:
348                 username = Xrn(user['urn']).get_leaf()
349                 user_keys = self.shell.db.key_pair_get_all_by_user(username)
350                 if user_keys:
351                     project_key = user_keys[0].name
352                      
353         # ensure person records exists
354         self.euca_shell.init_context(project_name)  
355         aggregate.run_instances(project_name, rspec_string, project_key, pubkeys)    
356    
357         return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
358
359     def delete_sliver (self, slice_urn, slice_hrn, creds, options):
360         # we need to do this using the context of one of the slice users
361         project_name = Xrn(slice_urn).get_leaf()
362         self.euca_shell.init_context(project_name) 
363         name = OSXrn(xrn=slice_urn).name
364         aggregate = OSAggregate(self)
365         return aggregate.delete_instances(name)   
366
367     def update_sliver(self, slice_urn, slice_hrn, rspec, creds, options):
368         name = OSXrn(xrn=slice_urn).name
369         aggregate = OSAggregate(self)
370         return aggregate.update_instances(name)
371     
372     def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
373         return True
374
375     def start_slice (self, slice_urn, slice_hrn, creds):
376         return 1
377
378     def stop_slice (self, slice_urn, slice_hrn, creds):
379         name = OSXrn(xrn=slice_urn).name
380         aggregate = OSAggregate(self)
381         return aggregate.stop_instances(name) 
382
383     def reset_slice (self, slice_urn, slice_hrn, creds):
384         raise SfaNotImplemented ("reset_slice not available at this interface")
385     
386     # xxx this code is quite old and has not run for ages
387     # it is obviously totally broken and needs a rewrite
388     def get_ticket (self, slice_urn, slice_hrn, creds, rspec_string, options):
389         raise SfaNotImplemented,"OpenStackDriver.get_ticket needs a rewrite"
390 # please keep this code for future reference
391 #        slices = PlSlices(self)
392 #        peer = slices.get_peer(slice_hrn)
393 #        sfa_peer = slices.get_sfa_peer(slice_hrn)
394 #    
395 #        # get the slice record
396 #        credential = api.getCredential()
397 #        interface = api.registries[api.hrn]
398 #        registry = api.server_proxy(interface, credential)
399 #        records = registry.Resolve(xrn, credential)
400 #    
401 #        # make sure we get a local slice record
402 #        record = None
403 #        for tmp_record in records:
404 #            if tmp_record['type'] == 'slice' and \
405 #               not tmp_record['peer_authority']:
406 #    #Error (E0602, GetTicket): Undefined variable 'SliceRecord'
407 #                slice_record = SliceRecord(dict=tmp_record)
408 #        if not record:
409 #            raise RecordNotFound(slice_hrn)
410 #        
411 #        # similar to CreateSliver, we must verify that the required records exist
412 #        # at this aggregate before we can issue a ticket
413 #        # parse rspec
414 #        rspec = RSpec(rspec_string)
415 #        requested_attributes = rspec.version.get_slice_attributes()
416 #    
417 #        # ensure site record exists
418 #        site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer)
419 #        # ensure slice record exists
420 #        slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer)
421 #        # ensure person records exists
422 #    # xxx users is undefined in this context
423 #        persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer)
424 #        # ensure slice attributes exists
425 #        slices.verify_slice_attributes(slice, requested_attributes)
426 #        
427 #        # get sliver info
428 #        slivers = slices.get_slivers(slice_hrn)
429 #    
430 #        if not slivers:
431 #            raise SliverDoesNotExist(slice_hrn)
432 #    
433 #        # get initscripts
434 #        initscripts = []
435 #        data = {
436 #            'timestamp': int(time.time()),
437 #            'initscripts': initscripts,
438 #            'slivers': slivers
439 #        }
440 #    
441 #        # create the ticket
442 #        object_gid = record.get_gid_object()
443 #        new_ticket = SfaTicket(subject = object_gid.get_subject())
444 #        new_ticket.set_gid_caller(api.auth.client_gid)
445 #        new_ticket.set_gid_object(object_gid)
446 #        new_ticket.set_issuer(key=api.key, subject=self.hrn)
447 #        new_ticket.set_pubkey(object_gid.get_pubkey())
448 #        new_ticket.set_attributes(data)
449 #        new_ticket.set_rspec(rspec)
450 #        #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
451 #        new_ticket.encode()
452 #        new_ticket.sign()
453 #    
454 #        return new_ticket.save_to_string(save_parents=True)