refactored
authorTony Mack <tmack@paris.CS.Princeton.EDU>
Sat, 1 Dec 2012 03:01:06 +0000 (22:01 -0500)
committerTony Mack <tmack@paris.CS.Princeton.EDU>
Sat, 1 Dec 2012 03:01:06 +0000 (22:01 -0500)
sfa/openstack/nova_driver.py
sfa/openstack/osaggregate.py
sfa/planetlab/pldriver.py

index 44c4d36..ec4b1f9 100644 (file)
@@ -13,9 +13,10 @@ from sfa.util.cache import Cache
 from sfa.trust.credential import Credential
 # used to be used in get_ticket
 #from sfa.trust.sfaticket import SfaTicket
-
 from sfa.rspecs.version_manager import VersionManager
 from sfa.rspecs.rspec import RSpec
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, SliverAllocation
 
 # the driver interface, mostly provides default behaviours
 from sfa.managers.driver import Driver
@@ -353,21 +354,42 @@ class NovaDriver(Driver):
         rspec = RSpec(rspec_string)
         instance_name = hrn_to_os_slicename(slice_hrn)
         tenant_name = OSXrn(xrn=slice_hrn, type='slice').get_tenant_name()
-        aggregate.run_instances(instance_name, tenant_name, rspec_string, key_name, pubkeys)    
+        slivers = aggregate.run_instances(instance_name, tenant_name, \
+                                          rspec_string, key_name, pubkeys)
+        
+        # update all sliver allocation states setting then to geni_allocated    
+        sliver_ids = [sliver.id for sliver in slivers]
+        SliverAllocation.set_allocations(sliver_ids, 'geni_allocated')
    
         return aggregate.describe(urns=[urn], version=rspec.version)
 
     def provision(self, urns, options={}):
+        # update sliver allocation states and set them to geni_provisioned
+        aggregate = OSAggregate(self)
+        instances = aggregate.get_instances(urns)
+        sliver_ids = []
+        for instance in instances:
+            sliver_hrn = "%s.%s" % (self.driver.hrn, instance.id)
+            sliver_ids.append(Xrn(sliver_hrn, type='sliver').urn)
+        SliverAllocation.set_allocations(sliver_ids, 'geni_provisioned') 
+            
         return self.describe(urns, options=options) 
 
     def delete (self, urns, options={}):
+        # collect sliver ids so we can update sliver allocation states after
+        # we remove the slivers.
         aggregate = OSAggregate(self)
-        for urn in urns:
-            xrn = OSXrn(xrn=urn, type='slice')
-            tenant_name = xrn.get_tenant_name()
-            project_name = xrn.get_slicename()
-            id = xrn.id
-            aggregate.delete_instance(tenant_name, project_name, id)   
+        instances = aggregate.get_instances(urns)
+        sliver_ids = []
+        for instance in instances:
+            sliver_hrn = "%s.%s" % (self.driver.hrn, instance.id)
+            sliver_ids.append(Xrn(sliver_hrn, type='sliver').urn)
+            
+            # delete the instance
+            aggregate.delete_instance(instance)
+            
+        # delete sliver allocation states
+        SliverAllocation.delete_allocations(sliver_ids)
         return True
 
     def renew (self, urns, expiration_time, options={}):
index 8a16304..e99ffe5 100644 (file)
@@ -57,7 +57,6 @@ class OSAggregate:
             zones = [zone.name for zone in zones]
         return zones
 
-
     def list_resources(self, version=None, options={}):
         version_manager = VersionManager()
         version = version_manager.get_version(version)
@@ -96,19 +95,20 @@ class OSAggregate:
         ids = set()
         for urn in urns:
             xrn = OSXrn(xrn=urn)
-            names.add(xrn.get_slice_name())
-            if xrn.id:
-                ids.add(xrn.id)
+            if xrn.type == 'slice':
+                names.add(xrn.get_slice_name())
+            elif xrn.type == 'sliver':
+                ids.add(xrn.leaf)
 
         # look up instances
         instances = []
-        for name in name:
-            servers = self.driver.shell.nova_manager.servers.findall(name=name)
-            instances.extend(servers)
-
-        # filter on id
+        filter = {}
+        if names:
+            filter['name'] = names
         if ids:
-            instances = [server for server in servers if server.id in ids]
+            filter['id'] = ids   
+        servers = self.driver.shell.nova_manager.servers.findall(**filter)
+        instances.extend(servers)
 
         return instances
 
@@ -335,6 +335,7 @@ class OSAggregate:
         rspec = RSpec(rspec)
         requested_instances = defaultdict(list)
         # iterate over clouds/zones/nodes
+        slivers = []
         for node in rspec.version.get_nodes_with_slivers():
             instances = node.get('slivers', [])
             if not instances:
@@ -354,39 +355,41 @@ class OSAggregate:
                         metadata['component_id'] = node['component_id']
                     if node.get('client_id'):
                         metadata['client_id'] = node['client_id'] 
-                    self.driver.shell.nova_manager.servers.create(flavor=flavor_id,
+                    server = self.driver.shell.nova_manager.servers.create(
+                                                            flavor=flavor_id,
                                                             image=image_id,
                                                             key_name = key_name,
                                                             security_groups = [group_name],
                                                             files=files,
                                                             meta=metadata, 
                                                             name=instance_name)
+                    slivers.append(server)
                 except Exception, err:    
                     logger.log_exc(err)                                
                            
+        return slivers        
 
-
-    def delete_instance(self, tenant_name, instance_name, id=None):
+    def delete_instance(self, instance):
     
-        def _delete_security_group(instance):
-            security_group = instance.metadata.get('security_groups', '')
+        def _delete_security_group(inst):
+            security_group = inst.metadata.get('security_groups', '')
             if security_group:
                 manager = SecurityGroup(self.driver)
                 timeout = 10.0 # wait a maximum of 10 seconds before forcing the security group delete
                 start_time = time.time()
                 instance_deleted = False
                 while instance_deleted == False and (time.time() - start_time) < timeout:
-                    inst = self.driver.shell.nova_manager.servers.findall(id=instance.id)
-                    if not inst:
+                    tmp_inst = self.driver.shell.nova_manager.servers.findall(id=inst.id)
+                    if not tmp_inst:
                         instance_deleted = True
                     time.sleep(.5)
                 manager.delete_security_group(security_group)
 
-        thread_manager = ThreadManager() 
-        self.driver.shell.nova_manager.connect(tenant=tenant_name)
-        args = {'name': instance_name}
-        if id:
-            args['id'] = id
+        thread_manager = ThreadManager()
+        tenant = self.driver.shell.auth_manager.tenants.find(id=instance.tenant_id)  
+        self.driver.shell.nova_manager.connect(tenant=tenant.name)
+        args = {'name': instance.name
+                'id': instance.id}
         instances = self.driver.shell.nova_manager.servers.findall(**args)
         security_group_manager = SecurityGroup(self.driver)
         for instance in instances:
index 1c0254a..5359473 100644 (file)
@@ -615,28 +615,13 @@ class PlDriver (Driver):
         nodes = slices.verify_slice_nodes(slice, requested_slivers, peer)
 
         # update all sliver allocation states setting then to geni_allocated   
-        sliver_state_updated = {}
+        sliver_ids = []
         for node in nodes:
             sliver_hrn = '%s.%s-%s' % (self.hrn, slice['slice_id'], node['node_id'])
             sliver_id = Xrn(sliver_hrn, type='sliver').urn
-            sliver_state_updated[sliver_id] = False 
-
-        constraint = SliverAllocation.sliver_id.in_(sliver_state_updated.keys())
-        cur_sliver_allocations = dbsession.query(SliverAllocation).filter(constraint)
-        for sliver_allocation in cur_sliver_allocations:
-            sliver_allocation.allocation_state = 'geni_allocated'
-            sliver_state_updated[sliver_allocation.sliver_id] = True
-        dbsession.commit()
-
-        # Some states may not have been updated becuase no sliver allocation state record 
-        # exists for the sliver. Insert new allocation records for these slivers and set 
-        # it to geni_allocated.
-        for (sliver_id, state_updated) in sliver_state_updated.items():
-            if state_updated == False:
-                record = SliverAllocation(sliver_id=sliver_id, allocation_state='geni_allocated')
-                dbsession.add(record)
-        dbsession.commit()  
-   
+            sliver_ids.append(sliver_id)
+        SliverAllocation.set_allocations(sliver_ids, 'geni_allocated')
+         
         # add/remove links links 
         slices.verify_slice_links(slice, rspec.version.get_link_requests(), nodes)
 
@@ -666,16 +651,13 @@ class PlDriver (Driver):
         aggregate = PlAggregate(self)
         slivers = aggregate.get_slivers(urns)
         sliver_ids = [sliver['sliver_id'] for sliver in slivers]
-        constraint = SliverAllocation.sliver_id.in_(sliver_ids)
-        cur_sliver_allocations = dbsession.query(SliverAllocation).filter(constraint)
-        for sliver_allocation in cur_sliver_allocations:
-            sliver_allocation.allocation_state = 'geni_provisioned'
-        dbsession.commit()
+        SliverAllocation.set_allocations(sliver_ids, 'geni_provisioned')
      
         return self.describe(urns, None, options=options)
 
     def delete(self, urns, options={}):
-
+        # collect sliver ids so we can update sliver allocation states after
+        # we remove the slivers.
         aggregate = PlAggregate(self)
         slivers = aggregate.get_slivers(urns)
         slice_id = slivers[0]['slice_id'] 
@@ -697,12 +679,8 @@ class PlDriver (Driver):
  
             self.shell.DeleteSliceFromNodes(slice_id, node_ids)
  
-            # update slivera allocation states
-            constraint = SliverAllocation.sliver_id.in_(sliver_ids)
-            cur_sliver_allocations = dbsession.query(SliverAllocation).filter(constraint)
-            for sliver_allocation in cur_sliver_allocations:
-                dbsession.delete(sliver_allocation)
-            dbsession.commit()
+            # delete sliver allocation states
+            SliverAllocation.delete_allocations(sliver_ids)
         finally:
             if peer:
                 self.shell.BindObjectToPeer('slice', slice_id, peer, slice['peer_slice_id'])