X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sfa%2Fopenstack%2Fosaggregate.py;h=de3499556ed0e3a7bed5a846d160f55dd404b876;hb=1cc8e9613cab8b5b22478de369f259e591c54e6d;hp=3738a6bea97db53789f159c6d854b24f14a6031f;hpb=0988a22d73fd502b090614825fe6fd4e50d48bb2;p=sfa.git diff --git a/sfa/openstack/osaggregate.py b/sfa/openstack/osaggregate.py index 3738a6be..de349955 100644 --- a/sfa/openstack/osaggregate.py +++ b/sfa/openstack/osaggregate.py @@ -24,6 +24,7 @@ from sfa.planetlab.plxrn import PlXrn from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename from sfa.rspecs.version_manager import VersionManager from sfa.openstack.security_group import SecurityGroup +from sfa.server.threadmanager import ThreadManager from sfa.util.sfalogging import logger def pubkeys_to_user_data(pubkeys): @@ -36,19 +37,6 @@ def pubkeys_to_user_data(pubkeys): user_data += "\n" return user_data -def instance_to_sliver(instance, xrn=None): - sliver_urn = None - if slice_xrn: - sliver_xrn = Xrn(xrn=slice_xrn, type='slice', id=instance.id).get_urn() - - sliver = Sliver({'slice_id': sliver_urn, - 'name': instance.name, - 'type': instance.name, - 'cpus': str(instance.vcpus), - 'memory': str(instance.ram), - 'storage': str(instance.disk)}) - return sliver - def image_to_rspec_disk_image(image): img = DiskImage() img['name'] = image['name'] @@ -70,7 +58,6 @@ class OSAggregate: zones = [zone.name for zone in zones] return zones - def list_resources(self, version=None, options={}): version_manager = VersionManager() version = version_manager.get_version(version) @@ -85,18 +72,25 @@ class OSAggregate: tenant_name = OSXrn(xrn=urns[0], type='slice').get_tenant_name() self.driver.shell.nova_manager.connect(tenant=tenant_name) instances = self.get_instances(urns) - if len(instances) == 0: - raise SliverDoesNotExist("You have not allocated any slivers here") + # lookup the sliver allocations + sliver_ids = [sliver['sliver_id'] for sliver in slivers] + constraint = SliverAllocation.sliver_id.in_(sliver_ids) + sliver_allocations = dbsession.query(SliverAllocation).filter(constraint) + sliver_allocation_dict = {} + for sliver_allocation in sliver_allocations: + sliver_allocation_dict[sliver_allocation.sliver_id] = sliver_allocation geni_slivers = [] rspec_nodes = [] for instance in instances: rspec_nodes.append(self.instance_to_rspec_node(instance)) - geni_slivers.append(self.instance_to_geni_sliver(instance)) + geni_sliver = self.instance_to_geni_sliver(instance, sliver_sllocation_dict) + geni_slivers.append(geni_sliver) version_manager = VersionManager() version = version_manager.get_version(version) rspec_version = version_manager._get_version(version.type, version.version, 'manifest') - rspec = RSpec(version=version, user_options=options) + rspec = RSpec(version=rspec_version, user_options=options) + rspec.xml.set('expires', datetime_to_string(utcparse(time.time()))) rspec.version.add_nodes(rspec_nodes) result = {'geni_urn': Xrn(urns[0]).get_urn(), 'geni_rspec': rspec.toxml(), @@ -110,19 +104,20 @@ class OSAggregate: ids = set() for urn in urns: xrn = OSXrn(xrn=urn) - names.add(xrn.get_slice_name()) - if xrn.id: - ids.add(xrn.id) + if xrn.type == 'slice': + names.add(xrn.get_slice_name()) + elif xrn.type == 'sliver': + ids.add(xrn.leaf) # look up instances instances = [] - for name in name: - servers = self.driver.shell.nova_manager.servers.findall(name=name) - instances.extend(servers) - - # filter on id + filter = {} + if names: + filter['name'] = names if ids: - instances = [server in servers if server.id in ids] + filter['id'] = ids + servers = self.driver.shell.nova_manager.servers.findall(**filter) + instances.extend(servers) return instances @@ -205,9 +200,10 @@ class OSAggregate: def instance_to_sliver(self, instance, xrn=None): if xrn: - xrn = Xrn(xrn=slice_xrn, type='slice', id=instance.id).get_urn() + sliver_hrn = '%s.%s' % (self.driver.hrn, instance.id) + sliver_id = Xrn(sliver_hrn, type='sliver').urn - sliver = Sliver({'sliver_id': xrn.get_urn(), + sliver = Sliver({'sliver_id': sliver_id, 'name': instance.name, 'type': instance.name, 'cpus': str(instance.vcpus), @@ -215,28 +211,38 @@ class OSAggregate: 'storage': str(instance.disk)}) return sliver - def instance_to_geni_sliver(self, instance): - op_status = "geni_unknown" - state = instance.state.lower() - if state == 'active': - op_status = 'geni_ready' - elif state == 'building': - op_status = 'geni_configuring' - elif state == 'failed': - op_status =' geni_failed' - - urn = OSXrn(name=instance.name, type='slice', id=instance.id).get_urn() + def instance_to_geni_sliver(self, instance, sliver_allocations = {}): + sliver_hrn = '%s.%s' % (self.driver.hrn, instance.id) + sliver_id = Xrn(sliver_hrn, type='sliver').urn + + # set sliver allocation and operational status + sliver_allocation = sliver_allocations[sliver_id] + if sliver_allocation: + allocation_status = sliver_allocation.allocation_state + if allocation_status == 'geni_allocated': + op_status = 'geni_pending_allocation' + elif allocation_status == 'geni_provisioned': + state = instance.state.lower() + if state == 'active': + op_status = 'geni_ready' + elif state == 'building': + op_status = 'geni_notready' + elif state == 'failed': + op_status =' geni_failed' + else: + op_status = 'geni_unknown' + else: + allocation_status = 'geni_unallocated' # required fields - geni_sliver = {'geni_sliver_urn': urn, + geni_sliver = {'geni_sliver_urn': sliver_id, 'geni_expires': None, - 'geni_allocation_status': 'geni_provisioned', + 'geni_allocation_status': allocation_status, 'geni_operational_status': op_status, 'geni_error': None, 'plos_created_at': datetime_to_string(utcparse(instance.created)), 'plos_sliver_type': self.shell.nova_manager.flavors.find(id=instance.flavor['id']).name, } - return geni_sliver def get_aggregate_nodes(self): @@ -263,7 +269,7 @@ class OSAggregate: sliver = self.instance_to_sliver(instance) sliver['disk_image'] = disk_images slivers.append(sliver) - + rspec_node['available'] = 'true' rspec_node['slivers'] = slivers rspec_nodes.append(rspec_node) @@ -314,6 +320,11 @@ class OSAggregate: cidr_ip = rule.get('cidr_ip'), port_range = rule.get('port_range'), icmp_type_code = rule.get('icmp_type_code')) + # Open ICMP by default + security_group.add_rule_to_group(group_name, + protocol = "icmp", + cidr_ip = "0.0.0.0/0", + icmp_type_code = "-1:-1") return group_name def add_rule_to_security_group(self, group_name, **kwds): @@ -347,7 +358,9 @@ class OSAggregate: files = {'/root/.ssh/authorized_keys': authorized_keys} rspec = RSpec(rspec) requested_instances = defaultdict(list) + # iterate over clouds/zones/nodes + slivers = [] for node in rspec.version.get_nodes_with_slivers(): instances = node.get('slivers', []) if not instances: @@ -359,6 +372,8 @@ class OSAggregate: image = instance.get('disk_image') if image and isinstance(image, list): image = image[0] + else: + raise InvalidRSpec("Must specify a disk_image for each VM") image_id = self.driver.shell.nova_manager.images.find(name=image['name']) fw_rules = instance.get('fw_rules', []) group_name = self.create_security_group(instance_name, fw_rules) @@ -367,39 +382,41 @@ class OSAggregate: metadata['component_id'] = node['component_id'] if node.get('client_id'): metadata['client_id'] = node['client_id'] - self.driver.shell.nova_manager.servers.create(flavor=flavor_id, + server = self.driver.shell.nova_manager.servers.create( + flavor=flavor_id, image=image_id, key_name = key_name, security_groups = [group_name], files=files, meta=metadata, name=instance_name) + slivers.append(server) except Exception, err: logger.log_exc(err) + return slivers - - def delete_instance(self, tenant_name, instance_name, id=None): + def delete_instance(self, instance): - def _delete_security_group(instance): - security_group = instance.metadata.get('security_groups', '') + def _delete_security_group(inst): + security_group = inst.metadata.get('security_groups', '') if security_group: manager = SecurityGroup(self.driver) timeout = 10.0 # wait a maximum of 10 seconds before forcing the security group delete start_time = time.time() instance_deleted = False while instance_deleted == False and (time.time() - start_time) < timeout: - inst = self.driver.shell.nova_manager.servers.findall(id=instance.id) - if not inst: + tmp_inst = self.driver.shell.nova_manager.servers.findall(id=inst.id) + if not tmp_inst: instance_deleted = True time.sleep(.5) manager.delete_security_group(security_group) - thread_manager = ThreadManager() - self.driver.shell.nova_manager.connect(tenant=tenant_name) - args = {'name': instance_name} - if id: - args['id'] = id + thread_manager = ThreadManager() + tenant = self.driver.shell.auth_manager.tenants.find(id=instance.tenant_id) + self.driver.shell.nova_manager.connect(tenant=tenant.name) + args = {'name': instance.name, + 'id': instance.id} instances = self.driver.shell.nova_manager.servers.findall(**args) security_group_manager = SecurityGroup(self.driver) for instance in instances: