From e39d74b1efe38dff372bf423e62093c535e2cbda Mon Sep 17 00:00:00 2001 From: Tony Mack Date: Tue, 14 Aug 2012 15:29:50 -0400 Subject: [PATCH] connect to nova using the correct tenant when managing instances --- sfa/openstack/nova_driver.py | 24 ++++++++---------------- sfa/openstack/osaggregate.py | 26 +++++++++++++++++++++----- 2 files changed, 29 insertions(+), 21 deletions(-) diff --git a/sfa/openstack/nova_driver.py b/sfa/openstack/nova_driver.py index 27795f73..3ddf7830 100644 --- a/sfa/openstack/nova_driver.py +++ b/sfa/openstack/nova_driver.py @@ -388,6 +388,10 @@ class NovaDriver(Driver): return rspec def sliver_status (self, slice_urn, slice_hrn): + # update nova connection + tenant_name = OSXrn(xrn=slice_hrn, type='slice').get_tenant_name() + self.shell.nova_manager.connect(tenant=tenant_name) + # find out where this slice is currently running project_name = hrn_to_os_slicename(slice_hrn) instances = self.shell.nova_manager.servers.findall(name=project_name) @@ -432,22 +436,7 @@ class NovaDriver(Driver): def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options): aggregate = OSAggregate(self) - rspec = RSpec(rspec_string) - instance_name = hrn_to_os_slicename(slice_hrn) - # make sure a tenant exists for this slice - tenant = aggregate.create_tenant(slice_hrn) - - # add the sfa admin user to this tenant and update our nova client connection - # to use these credentials for the rest of this session. This emsures that the instances - # we create will be assigned to the correct tenant. - sfa_admin_user = self.shell.auth_manager.users.find(name=self.shell.auth_manager.opts['OS_USERNAME']) - user_role = self.shell.auth_manager.roles.find(name='user') - admin_role = self.shell.auth_manager.roles.find(name='admin') - self.shell.auth_manager.roles.add_user_role(sfa_admin_user, admin_role, tenant) - self.shell.auth_manager.roles.add_user_role(sfa_admin_user, user_role, tenant) - self.shell.nova_manager.connect(tenant=tenant.name) - # assume first user is the caller and use their context # for the ec2/euca api connection. Also, use the first users # key as the project key. @@ -460,7 +449,10 @@ class NovaDriver(Driver): for user in users: pubkeys.extend(user['keys']) - aggregate.run_instances(instance_name, rspec_string, key_name, pubkeys) + rspec = RSpec(rspec_string) + instance_name = hrn_to_os_slicename(slice_hrn) + tenant_name = OSXrn(xrn=slice_hrn, type='slice').get_tenant_name() + aggregate.run_instances(instance_name, tenant_name, rspec_string, key_name, pubkeys) return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version) diff --git a/sfa/openstack/osaggregate.py b/sfa/openstack/osaggregate.py index 519f8e82..9018d2fe 100644 --- a/sfa/openstack/osaggregate.py +++ b/sfa/openstack/osaggregate.py @@ -84,6 +84,10 @@ class OSAggregate: return zones def get_slice_nodes(self, slice_xrn): + # update nova connection + tenant_name = OSXrn(xrn=slice_xrn, type='slice').get_tenant_name() + self.driver.shell.nova_manager.connect(tenant=tenant_name) + zones = self.get_availability_zones() name = hrn_to_os_slicename(slice_xrn) instances = self.driver.shell.nova_manager.servers.findall(name=name) @@ -170,8 +174,7 @@ class OSAggregate: return rspec_nodes - def create_tenant(self, slice_hrn): - tenant_name = OSXrn(xrn=slice_hrn, type='slice').get_tenant_name() + def create_tenant(self, tenant_name): tenants = self.driver.shell.auth_manager.tenants.findall(name=tenant_name) if not tenants: self.driver.shell.auth_manager.tenants.create(tenant_name, tenant_name) @@ -228,11 +231,24 @@ class OSAggregate: - def run_instances(self, slicename, rspec, key_name, pubkeys): + def run_instances(self, instance_name, tenant_name, rspec, key_name, pubkeys): #logger.debug('Reserving an instance: image: %s, flavor: ' \ # '%s, key: %s, name: %s' % \ # (image_id, flavor_id, key_name, slicename)) + # make sure a tenant exists for this slice + tenant = self.create_tenant(tenant_name) + + # add the sfa admin user to this tenant and update our nova client connection + # to use these credentials for the rest of this session. This emsures that the instances + # we create will be assigned to the correct tenant. + sfa_admin_user = self.driver.shell.auth_manager.users.find(name=self.driver.shell.auth_manager.opts['OS_USERNAME']) + user_role = self.driver.shell.auth_manager.roles.find(name='user') + admin_role = self.driver.shell.auth_manager.roles.find(name='admin') + self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, admin_role, tenant) + self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, user_role, tenant) + self.driver.shell.nova_manager.connect(tenant=tenant.name) + authorized_keys = "\n".join(pubkeys) files = {'/root/.ssh/authorized_keys': authorized_keys} rspec = RSpec(rspec) @@ -250,7 +266,7 @@ class OSAggregate: image = image[0] image_id = self.driver.shell.nova_manager.images.find(name=image['name']) fw_rules = instance.get('fw_rules', []) - group_name = self.create_security_group(slicename, fw_rules) + group_name = self.create_security_group(instance_name, fw_rules) metadata['security_groups'] = group_name if node.get('component_id'): metadata['component_id'] = node['component_id'] @@ -261,7 +277,7 @@ class OSAggregate: security_group = group_name, files=files, meta=metadata, - name=slicename) + name=instance_name) except Exception, err: logger.log_exc(err) -- 2.43.0