renamed sfa/plc into sfa/planetlab
[sfa.git] / sfa / openstack / nova_driver.py
index 0ee4c72..e4b95b0 100644 (file)
@@ -1,22 +1,28 @@
 import time
 import datetime
-#
+
 from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
-    RecordNotFound, SfaNotImplemented, SliverDoesNotExist
+    RecordNotFound, SfaNotImplemented, SliverDoesNotExist, \
+    SfaInvalidArgument
+
 from sfa.util.sfalogging import logger
 from sfa.util.defaultdict import defaultdict
 from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
 from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf, urn_to_sliver_id
 from sfa.util.cache import Cache
+from sfa.trust.credential import Credential
 # used to be used in get_ticket
 #from sfa.trust.sfaticket import SfaTicket
+
 from sfa.rspecs.version_manager import VersionManager
 from sfa.rspecs.rspec import RSpec
+
 # the driver interface, mostly provides default behaviours
 from sfa.managers.driver import Driver
 from sfa.openstack.nova_shell import NovaShell
+from sfa.openstack.euca_shell import EucaShell
 from sfa.openstack.osaggregate import OSAggregate
-from sfa.plc.plslices import PlSlices
+from sfa.planetlab.plslices import PlSlices
 from sfa.util.osxrn import OSXrn
 
 
@@ -40,6 +46,7 @@ class NovaDriver (Driver):
     def __init__ (self, config):
         Driver.__init__ (self, config)
         self.shell = NovaShell (config)
+        self.euca_shell = EucaShell(config)
         self.cache=None
         if config.SFA_AGGREGATE_CACHING:
             if NovaDriver.cache is None:
@@ -61,33 +68,74 @@ class NovaDriver (Driver):
     ########## 
     def register (self, sfa_record, hrn, pub_key):
         type = sfa_record['type']
-        pl_record = self.sfa_fields_to_pl_fields(type, hrn, sfa_record)
-
+        
+        #pl_record = self.sfa_fields_to_pl_fields(type     dd , hrn, sfa_record)
+           
         if type == 'slice':
-            acceptable_fields=['url', 'instantiation', 'name', 'description']
             # add slice description, name, researchers, PI 
-            pass
+            name = Xrn(hrn).get_leaf()
+            researchers = sfa_record.get('researchers', [])
+            pis = sfa_record.get('pis', [])
+            project_manager = None
+            description = sfa_record.get('description', None)
+            if pis:
+                project_manager = Xrn(pis[0], 'user').get_leaf()
+            elif researchers:
+                project_manager = Xrn(researchers[0], 'user').get_leaf()
+            if not project_manager:
+                err_string = "Cannot create a project without a project manager. " + \
+                             "Please specify at least one PI or researcher for project: " + \
+                             name    
+                raise SfaInvalidArgument(err_string)
+
+            users = [Xrn(user, 'user').get_leaf() for user in \
+                     pis + researchers]
+            self.shell.auth_manager.create_project(name, project_manager, description, users)
 
         elif type == 'user':
             # add person roles, projects and keys
-            pass
-        return pointer
+            name = Xrn(hrn).get_leaf()
+            self.shell.auth_manager.create_user(name)
+            projects = sfa_records.get('slices', [])
+            for project in projects:
+                project_name = Xrn(project).get_leaf()
+                self.shell.auth_manager.add_to_project(name, project_name)
+            keys = sfa_records.get('keys', [])
+            for key in keys:
+                key_dict = {
+                    'user_id': name,
+                    'name': name,
+                    'public': key,
+                }
+                self.shell.db.key_pair_create(key_dict)       
+                  
+        return name
         
     ##########
     # xxx actually old_sfa_record comes filled with plc stuff as well in the original code
     def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
-        pointer = old_sfa_record['pointer']
-        type = old_sfa_record['type']
-
+        type = new_sfa_record['type'] 
+        
         # new_key implemented for users only
         if new_key and type not in [ 'user' ]:
             raise UnknownSfaType(type)
 
         elif type == "slice":
-            # can update description, researchers and PI
-            pass 
+            # can update project manager and description
+            name = Xrn(hrn).get_leaf()
+            researchers = sfa_record.get('researchers', [])
+            pis = sfa_record.get('pis', [])
+            project_manager = None
+            description = sfa_record.get('description', None)
+            if pis:
+                project_manager = Xrn(pis[0], 'user').get_leaf()
+            elif researchers:
+                project_manager = Xrn(researchers[0], 'user').get_leaf()
+            self.shell.auth_manager.modify_project(name, project_manager, description)
+
         elif type == "user":
-            # can update  slices, keys and roles
+            # can techinally update access_key and secret_key,
+            # but that is not in our scope, so we do nothing.  
             pass
         return True
         
@@ -97,11 +145,11 @@ class NovaDriver (Driver):
         type=sfa_record['type']
         name = Xrn(sfa_record['hrn']).get_leaf()     
         if type == 'user':
-            if self.shell.user_get(name):
-                self.shell.user_delete(name)
+            if self.shell.auth_manager.get_user(name):
+                self.shell.auth_manager.delete_user(name)
         elif type == 'slice':
-            if self.shell.project_get(name):
-                self.shell.project_delete(name)
+            if self.shell.auth_manager.get_project(name):
+                self.shell.auth_manager.delete_project(name)
         return True
 
 
@@ -241,49 +289,37 @@ class NovaDriver (Driver):
     
     def sliver_status (self, slice_urn, slice_hrn):
         # find out where this slice is currently running
-        slicename = hrn_to_pl_slicename(slice_hrn)
-        
-        slices = self.shell.GetSlices([slicename], ['slice_id', 'node_ids','person_ids','name','expires'])
-        if len(slices) == 0:        
-            raise SliverDoesNotExist("%s (used %s as slicename internally)" % (slice_hrn, slicename))
-        slice = slices[0]
-        
-        # report about the local nodes only
-        nodes = self.shell.GetNodes({'node_id':slice['node_ids'],'peer_id':None},
-                              ['node_id', 'hostname', 'site_id', 'boot_state', 'last_contact'])
-
-        if len(nodes) == 0:
+        project_name = Xrn(slice_urn).get_leaf()
+        project = self.shell.auth_manager.get_project(project_name)
+        instances = self.shell.db.instance_get_all_by_project(project_name)
+        if len(instances) == 0:
             raise SliverDoesNotExist("You have not allocated any slivers here") 
-
-        site_ids = [node['site_id'] for node in nodes]
-    
+        
         result = {}
         top_level_status = 'unknown'
-        if nodes:
+        if instances:
             top_level_status = 'ready'
         result['geni_urn'] = slice_urn
-        result['pl_login'] = slice['name']
-        result['pl_expires'] = datetime_to_string(utcparse(slice['expires']))
+        result['plos_login'] = 'root' 
+        result['plos_expires'] = None
         
         resources = []
-        for node in nodes:
+        for instance in instances:
             res = {}
-            res['pl_hostname'] = node['hostname']
-            res['pl_boot_state'] = node['boot_state']
-            res['pl_last_contact'] = node['last_contact']
-            if node['last_contact'] is not None:
-                
-                res['pl_last_contact'] = datetime_to_string(utcparse(node['last_contact']))
-            sliver_id = urn_to_sliver_id(slice_urn, slice['slice_id'], node['node_id']) 
+            # instances are accessed by ip, not hostname. We need to report the ip
+            # somewhere so users know where to ssh to.     
+            res['plos_hostname'] = instance.hostname
+            res['plos_created_at'] = datetime_to_string(utcparse(instance.created_at))    
+            res['plos_boot_state'] = instance.vm_state
+            res['plos_sliver_type'] = instance.instance_type.name 
+            sliver_id =  Xrn(slice_urn).get_sliver_id(instance.project_id, \
+                                                      instance.hostname, instance.id)
             res['geni_urn'] = sliver_id
-            if node['boot_state'] == 'boot':
-                res['geni_status'] = 'ready'
+
+            if instance.vm_state == 'running':
+                res['boot_state'] = 'ready';
             else:
-                res['geni_status'] = 'failed'
-                top_level_status = 'failed' 
-                
-            res['geni_error'] = ''
-    
+                res['boot_state'] = 'unknown'  
             resources.append(res)
             
         result['geni_status'] = top_level_status
@@ -292,33 +328,46 @@ class NovaDriver (Driver):
 
     def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
 
+        project_name = get_leaf(slice_hrn)
         aggregate = OSAggregate(self)
-        slicename = get_leaf(slice_hrn)
-        
         # parse rspec
         rspec = RSpec(rspec_string)
-        requested_attributes = rspec.version.get_slice_attributes()
-        
-        # ensure slice record exists
-        slice = aggregate.verify_slice(slicename, users, options=options)
+       
+        # ensure project and users exist in local db
+        aggregate.create_project(project_name, users, options=options)
+     
+        # collect publick keys
+        pubkeys = []
+        project_key = None
+        for user in users:
+            pubkeys.extend(user['keys'])
+            # assume first user is the caller and use their context
+            # for the ec2/euca api connection. Also, use the first users
+            # key as the project key.   
+            if not project_key:
+                username = Xrn(user['urn']).get_leaf()
+                user_keys = self.shell.db.key_pair_get_all_by_user(username)
+                if user_keys:
+                    project_key = user_keys[0].name
+                     
         # ensure person records exists
-        persons = aggregate.verify_slice_users(slicename, users, options=options)
-        # add/remove slice from nodes
-        slices.verify_instances(slicename, rspec)    
+        self.euca_shell.init_context(project_name)  
+        aggregate.run_instances(project_name, rspec_string, project_key, pubkeys)    
    
         return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
 
     def delete_sliver (self, slice_urn, slice_hrn, creds, options):
+        # we need to do this using the context of one of the slice users
+        project_name = Xrn(slice_urn).get_leaf()
+        self.euca_shell.init_context(project_name) 
         name = OSXrn(xrn=slice_urn).name
-        slice = self.shell.project_get(name)
-        if not slice:
-            return 1
-        
-        self.shell.DeleteSliceFromNodes(slicename, slice['node_ids'])
-        instances = self.shell.db.instance_get_all_by_project(name)
-        for instance in instances:
-            self.shell.db.instance_destroy(instance.instance_id)
-        return 1
+        aggregate = OSAggregate(self)
+        return aggregate.delete_instances(name)   
+
+    def update_sliver(self, slice_urn, slice_hrn, rspec, creds, options):
+        name = OSXrn(xrn=slice_urn).name
+        aggregate = OSAggregate(self)
+        return aggregate.update_instances(name)
     
     def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
         return True
@@ -328,12 +377,9 @@ class NovaDriver (Driver):
 
     def stop_slice (self, slice_urn, slice_hrn, creds):
         name = OSXrn(xrn=slice_urn).name
-        slice = self.shell.get_project(name)
-        instances = self.shell.db.instance_get_all_by_project(name)
-        for instance in instances:
-            self.shell.db.instance_stop(instance.instance_id)
-        return 1
-    
+        aggregate = OSAggregate(self)
+        return aggregate.stop_instances(name) 
+
     def reset_slice (self, slice_urn, slice_hrn, creds):
         raise SfaNotImplemented ("reset_slice not available at this interface")