X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sfa%2Fopenstack%2Fosaggregate.py;h=6e64408dbbb44d3e223116fb2ba68b05fd80d54e;hb=b8bbafc52d7735d4cae7e2042139ef2dd61b70c1;hp=9018d2fe086cd618888f106026cd8d9c700e8d42;hpb=e39d74b1efe38dff372bf423e62093c535e2cbda;p=sfa.git diff --git a/sfa/openstack/osaggregate.py b/sfa/openstack/osaggregate.py index 9018d2fe..6e64408d 100644 --- a/sfa/openstack/osaggregate.py +++ b/sfa/openstack/osaggregate.py @@ -21,6 +21,7 @@ from sfa.planetlab.plxrn import PlXrn from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename from sfa.rspecs.version_manager import VersionManager from sfa.openstack.security_group import SecurityGroup +from sfa.server.threadmanager import ThreadManager from sfa.util.sfalogging import logger def pubkeys_to_user_data(pubkeys): @@ -93,26 +94,25 @@ class OSAggregate: instances = self.driver.shell.nova_manager.servers.findall(name=name) rspec_nodes = [] for instance in instances: - rspec_node = Node() - - #TODO: find a way to look up an instances availability zone in essex - #if instance.availability_zone: - # node_xrn = OSXrn(instance.availability_zone, 'node') - #else: - # node_xrn = OSXrn('cloud', 'node') + # determine node urn node_xrn = instance.metadata.get('component_id') - node_xrn if not node_xrn: node_xrn = OSXrn('cloud', type='node') else: - node_xrn = OSXrn(xrn=node_xrn, type='node') + node_xrn = OSXrn(xrn=node_xrn, type='node') + rspec_node = Node() rspec_node['component_id'] = node_xrn.urn rspec_node['component_name'] = node_xrn.name rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn() + rspec_node['slivers'] = [] + + if instance.metadata.get('client_id'): + rspec_node['client_id'] = instance.metadata.get('client_id') + flavor = self.driver.shell.nova_manager.flavors.find(id=instance.flavor['id']) sliver = instance_to_sliver(flavor) - rspec_node['slivers'] = [sliver] + rspec_node['slivers'].append(sliver) image = self.driver.shell.image_manager.get_images(id=instance.image['id']) if isinstance(image, list) and len(image) > 0: image = image[0] @@ -120,8 +120,19 @@ class OSAggregate: sliver['disk_image'] = [disk_image] # build interfaces - interfaces = [] + rspec_node['services'] = [] + rspec_node['interfaces'] = [] addresses = instance.addresses + # HACK: public ips are stored in the list of private, but + # this seems wrong. Assume pub ip is the last in the list of + # private ips until openstack bug is fixed. + if addresses.get('private'): + login = Login({'authentication': 'ssh-keys', + 'hostname': addresses.get('private')[-1]['addr'], + 'port':'22', 'username': 'root'}) + service = Services({'login': login}) + rspec_node['services'].append(service) + for private_ip in addresses.get('private', []): if_xrn = PlXrn(auth=self.driver.hrn, interface='node%s:eth0' % (instance.hostId)) @@ -129,11 +140,9 @@ class OSAggregate: interface['ips'] = [{'address': private_ip['addr'], #'netmask': private_ip['network'], 'type': private_ip['version']}] - interfaces.append(interface) - rspec_node['interfaces'] = interfaces + rspec_node['interfaces'].append(interface) # slivers always provide the ssh service - rspec_node['services'] = [] for public_ip in addresses.get('public', []): login = Login({'authentication': 'ssh-keys', 'hostname': public_ip['addr'], @@ -259,22 +268,24 @@ class OSAggregate: if not instances: continue for instance in instances: - metadata = {} - flavor_id = self.driver.shell.nova_manager.flavors.find(name=instance['name']) - image = instance.get('disk_image') - if image and isinstance(image, list): - image = image[0] - image_id = self.driver.shell.nova_manager.images.find(name=image['name']) - fw_rules = instance.get('fw_rules', []) - group_name = self.create_security_group(instance_name, fw_rules) - metadata['security_groups'] = group_name - if node.get('component_id'): - metadata['component_id'] = node['component_id'] try: + metadata = {} + flavor_id = self.driver.shell.nova_manager.flavors.find(name=instance['name']) + image = instance.get('disk_image') + if image and isinstance(image, list): + image = image[0] + image_id = self.driver.shell.nova_manager.images.find(name=image['name']) + fw_rules = instance.get('fw_rules', []) + group_name = self.create_security_group(instance_name, fw_rules) + metadata['security_groups'] = group_name + if node.get('component_id'): + metadata['component_id'] = node['component_id'] + if node.get('client_id'): + metadata['client_id'] = node['client_id'] self.driver.shell.nova_manager.servers.create(flavor=flavor_id, image=image_id, key_name = key_name, - security_group = group_name, + security_groups = [group_name], files=files, meta=metadata, name=instance_name) @@ -283,21 +294,35 @@ class OSAggregate: - def delete_instances(self, instance_name): - instances = self.driver.shell.nova_manager.servers.findall(name=instance_name) - security_group_manager = SecurityGroup(self.driver) - for instance in instances: - # deleate this instance's security groups + def delete_instances(self, instance_name, tenant_name): + + def _delete_security_group(instance): security_group = instance.metadata.get('security_groups', '') if security_group: - # dont delete the default security group - if security_group != 'default': - security_group_manager.delete_security_group(security_group) + manager = SecurityGroup(self.driver) + timeout = 10.0 # wait a maximum of 10 seconds before forcing the security group delete + start_time = time.time() + instance_deleted = False + while instance_deleted == False and (time.time() - start_time) < timeout: + inst = self.driver.shell.nova_manager.servers.findall(id=instance.id) + if not inst: + instance_deleted = True + time.sleep(.5) + manager.delete_security_group(security_group) + + thread_manager = ThreadManager() + self.driver.shell.nova_manager.connect(tenant=tenant_name) + instances = self.driver.shell.nova_manager.servers.findall(name=instance_name) + for instance in instances: # destroy instance self.driver.shell.nova_manager.servers.delete(instance) + # deleate this instance's security groups + thread_manager.run(_delete_security_group, instance) return 1 - def stop_instances(self, instance_name): + + def stop_instances(self, instance_name, tenant_name): + self.driver.shell.nova_manager.connect(tenant=tenant_name) instances = self.driver.shell.nova_manager.servers.findall(name=instance_name) for instance in instances: self.driver.shell.nova_manager.servers.pause(instance)