Merge branch 'master' into senslab2
[sfa.git] / sfa / openstack / osaggregate.py
index 73e0f61..d5d0bfa 100644 (file)
@@ -21,6 +21,7 @@ from sfa.planetlab.plxrn import PlXrn
 from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename
 from sfa.rspecs.version_manager import VersionManager
 from sfa.openstack.security_group import SecurityGroup
+from sfa.server.threadmanager import ThreadManager
 from sfa.util.sfalogging import logger
 
 def pubkeys_to_user_data(pubkeys):
@@ -91,28 +92,31 @@ class OSAggregate:
         zones = self.get_availability_zones()
         name = hrn_to_os_slicename(slice_xrn)
         instances = self.driver.shell.nova_manager.servers.findall(name=name)
-        rspec_nodes = []
+        node_dict = {}
         for instance in instances:
-            rspec_node = Node()
-            
-            #TODO: find a way to look up an instances availability zone in essex
-            #if instance.availability_zone:
-            #    node_xrn = OSXrn(instance.availability_zone, 'node')
-            #else:
-            #    node_xrn = OSXrn('cloud', 'node')
+            # determine node urn
             node_xrn = instance.metadata.get('component_id')
-            node_xrn
             if not node_xrn:
                 node_xrn = OSXrn('cloud', type='node')
             else:
-                node_xrn = OSXrn(xrn=node_xrn, type='node') 
+                node_xrn = OSXrn(xrn=node_xrn, type='node')
 
-            rspec_node['component_id'] = node_xrn.urn
-            rspec_node['component_name'] = node_xrn.name
-            rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
+            if not node_xrn.urn in node_dict:
+                rspec_node = Node()
+                rspec_node['component_id'] = node_xrn.urn
+                rspec_node['component_name'] = node_xrn.name
+                rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
+                rspec_node['slivers'] = []
+                node_dict[node_xrn.urn] = rspec_node
+            else:
+                rspec_node = node_dict[node_xrn.urn]
+
+            if instance.metadata.get('client_id'):
+                rspec_node['client_id'] = instance.metadata.get('client_id')
+            
             flavor = self.driver.shell.nova_manager.flavors.find(id=instance.flavor['id'])
             sliver = instance_to_sliver(flavor)
-            rspec_node['slivers'] = [sliver]
+            rspec_node['slivers'].append(sliver)
             image = self.driver.shell.image_manager.get_images(id=instance.image['id'])
             if isinstance(image, list) and len(image) > 0:
                 image = image[0]
@@ -120,8 +124,19 @@ class OSAggregate:
             sliver['disk_image'] = [disk_image]
 
             # build interfaces            
-            interfaces = []
+            rspec_node['services'] = []
+            rspec_node['interfaces'] = []
             addresses = instance.addresses
+            # HACK: public ips are stored in the list of private, but 
+            # this seems wrong. Assume pub ip is the last in the list of 
+            # private ips until openstack bug is fixed.      
+            if addresses.get('private'):
+                login = Login({'authentication': 'ssh-keys',
+                               'hostname': addresses.get('private')[-1]['addr'],
+                               'port':'22', 'username': 'root'})
+                service = Services({'login': login})
+                rspec_node['services'].append(service)    
+            
             for private_ip in addresses.get('private', []):
                 if_xrn = PlXrn(auth=self.driver.hrn, 
                                interface='node%s:eth0' % (instance.hostId)) 
@@ -129,11 +144,9 @@ class OSAggregate:
                 interface['ips'] =  [{'address': private_ip['addr'],
                                      #'netmask': private_ip['network'],
                                      'type': private_ip['version']}]
-                interfaces.append(interface)
-            rspec_node['interfaces'] = interfaces 
+                rspec_node['interfaces'].append(interface) 
             
             # slivers always provide the ssh service
-            rspec_node['services'] = []
             for public_ip in addresses.get('public', []):
                 login = Login({'authentication': 'ssh-keys', 
                                'hostname': public_ip['addr'], 
@@ -141,7 +154,7 @@ class OSAggregate:
                 service = Services({'login': login})
                 rspec_node['services'].append(service)
             rspec_nodes.append(rspec_node)
-        return rspec_nodes
+        return node_dict.values()
 
     def get_aggregate_nodes(self):
         zones = self.get_availability_zones()
@@ -271,6 +284,8 @@ class OSAggregate:
                     metadata['security_groups'] = group_name
                     if node.get('component_id'):
                         metadata['component_id'] = node['component_id']
+                    if node.get('client_id'):
+                        metadata['client_id'] = node['client_id']
                     self.driver.shell.nova_manager.servers.create(flavor=flavor_id,
                                                             image=image_id,
                                                             key_name = key_name,
@@ -284,20 +299,32 @@ class OSAggregate:
 
 
     def delete_instances(self, instance_name, tenant_name):
+
+        def _delete_security_group(instance):
+            security_group = instance.metadata.get('security_groups', '')
+            if security_group:
+                manager = SecurityGroup(self.driver)
+                timeout = 10.0 # wait a maximum of 10 seconds before forcing the security group delete
+                start_time = time.time()
+                instance_deleted = False
+                while instance_deleted == False and (time.time() - start_time) < timeout:
+                    inst = self.driver.shell.nova_manager.servers.findall(id=instance.id)
+                    if not inst:
+                        instance_deleted = True
+                    time.sleep(.5)
+                manager.delete_security_group(security_group)
+
+        thread_manager = ThreadManager()
         self.driver.shell.nova_manager.connect(tenant=tenant_name)
         instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
-        security_group_manager = SecurityGroup(self.driver)
         for instance in instances:
-            # deleate this instance's security groups
-            security_group = instance.metadata.get('security_groups', '')
-            if security_group:
-                # dont delete the default security group
-                if security_group != 'default': 
-                    security_group_manager.delete_security_group(security_group)
             # destroy instance
             self.driver.shell.nova_manager.servers.delete(instance)
+            # deleate this instance's security groups
+            thread_manager.run(_delete_security_group, instance)
         return 1
 
+
     def stop_instances(self, instance_name, tenant_name):
         self.driver.shell.nova_manager.connect(tenant=tenant_name)
         instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)