user_data += "\n"
return user_data
-def instance_to_sliver(instance, slice_xrn=None):
- sliver_id = None
+def instance_to_sliver(instance, xrn=None):
+ sliver_urn = None
if slice_xrn:
- xrn = Xrn(slice_xrn, 'slice')
- sliver_id = xrn.get_sliver_id(instance.project_id, instance.hostname, instance.id)
+ sliver_xrn = Xrn(xrn=slice_xrn, type='slice', id=instance.id).get_urn()
- sliver = Sliver({'slice_id': sliver_id,
+ sliver = Sliver({'slice_id': sliver_urn,
'name': instance.name,
'type': instance.name,
'cpus': str(instance.vcpus),
def __init__(self, driver):
self.driver = driver
- def get_rspec(self, slice_xrn=None, version=None, options={}):
- version_manager = VersionManager()
- version = version_manager.get_version(version)
- if not slice_xrn:
- rspec_version = version_manager._get_version(version.type, version.version, 'ad')
- nodes = self.get_aggregate_nodes()
- else:
- rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
- nodes = self.get_slice_nodes(slice_xrn)
- rspec = RSpec(version=rspec_version, user_options=options)
- rspec.version.add_nodes(nodes)
- return rspec.toxml()
-
def get_availability_zones(self):
- # essex release
zones = self.driver.shell.nova_manager.dns_domains.domains()
-
if not zones:
zones = ['cloud']
else:
zones = [zone.name for zone in zones]
return zones
+
+ def describe(self, urns, version=None, options={}):
+
+ return {}
+
+ def list_resources(self, version=None, options={}):
+ version_manager = VersionManager()
+ version = version_manager.get_version(version)
+ rspec_version = version_manager._get_version(version.type, version.version, 'ad')
+ rspec = RSpec(version=version, user_options=options)
+ nodes = self.get_aggregate_nodes()
+ rspec.version.add_nodes(nodes)
+ return rspec.toxml()
+
+ def describe(self, urns, version=None, options={}):
+ version_manager = VersionManager()
+ version = version_manager.get_version(version)
+ rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
+ rspec = RSpec(version=version, user_options=options)
+ nodes = self.get_slice_nodes(slice_xrn)
+ rspec.version.add_nodes(nodes)
+ result = {'geni_urn': '',
+ 'geni_rspec': rspec.toxml(),
+ 'geni_slivers': []}
+
+ return result
+
def get_slice_nodes(self, slice_xrn):
+ # update nova connection
+ tenant_name = OSXrn(xrn=slice_xrn, type='slice').get_tenant_name()
+ self.driver.shell.nova_manager.connect(tenant=tenant_name)
+
zones = self.get_availability_zones()
name = hrn_to_os_slicename(slice_xrn)
instances = self.driver.shell.nova_manager.servers.findall(name=name)
- rspec_nodes = []
+ node_dict = {}
for instance in instances:
- rspec_node = Node()
-
- #TODO: find a way to look up an instances availability zone in essex
- #if instance.availability_zone:
- # node_xrn = OSXrn(instance.availability_zone, 'node')
- #else:
- # node_xrn = OSXrn('cloud', 'node')
+ # determine node urn
node_xrn = instance.metadata.get('component_id')
- node_xrn
if not node_xrn:
- node_xrn = OSXrn('cloud', 'node')
+ node_xrn = OSXrn('cloud', type='node')
else:
- node_xrn = OSXrn(xrn=node_xrn, 'node')
+ node_xrn = OSXrn(xrn=node_xrn, type='node')
+
+ if not node_xrn.urn in node_dict:
+ rspec_node = Node()
+ rspec_node['component_id'] = node_xrn.urn
+ rspec_node['component_name'] = node_xrn.name
+ rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
+ rspec_node['slivers'] = []
+ node_dict[node_xrn.urn] = rspec_node
+ else:
+ rspec_node = node_dict[node_xrn.urn]
- rspec_node['component_id'] = node_xrn.urn
- rspec_node['component_name'] = node_xrn.name
- rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
flavor = self.driver.shell.nova_manager.flavors.find(id=instance.flavor['id'])
sliver = instance_to_sliver(flavor)
- rspec_node['slivers'] = [sliver]
+ rspec_node['slivers'].append(sliver)
image = self.driver.shell.image_manager.get_images(id=instance.image['id'])
if isinstance(image, list) and len(image) > 0:
image = image[0]
sliver['disk_image'] = [disk_image]
# build interfaces
- interfaces = []
+ rspec_node['services'] = []
+ rspec_node['interfaces'] = []
addresses = instance.addresses
+ # HACK: public ips are stored in the list of private, but
+ # this seems wrong. Assume pub ip is the last in the list of
+ # private ips until openstack bug is fixed.
+ if addresses.get('private'):
+ login = Login({'authentication': 'ssh-keys',
+ 'hostname': addresses.get('private')[-1]['addr'],
+ 'port':'22', 'username': 'root'})
+ service = Services({'login': login})
+ rspec_node['services'].append(service)
+
for private_ip in addresses.get('private', []):
if_xrn = PlXrn(auth=self.driver.hrn,
interface='node%s:eth0' % (instance.hostId))
interface['ips'] = [{'address': private_ip['addr'],
#'netmask': private_ip['network'],
'type': private_ip['version']}]
- interfaces.append(interface)
- rspec_node['interfaces'] = interfaces
+ rspec_node['interfaces'].append(interface)
# slivers always provide the ssh service
- rspec_node['services'] = []
for public_ip in addresses.get('public', []):
login = Login({'authentication': 'ssh-keys',
'hostname': public_ip['addr'],
'port':'22', 'username': 'root'})
service = Services({'login': login})
rspec_node['services'].append(service)
- rspec_nodes.append(rspec_node)
- return rspec_nodes
+ return node_dict.values()
def get_aggregate_nodes(self):
zones = self.get_availability_zones()
return rspec_nodes
+ def create_tenant(self, tenant_name):
+ tenants = self.driver.shell.auth_manager.tenants.findall(name=tenant_name)
+ if not tenants:
+ self.driver.shell.auth_manager.tenants.create(tenant_name, tenant_name)
+ tenant = self.driver.shell.auth_manager.tenants.find(name=tenant_name)
+ else:
+ tenant = tenants[0]
+ return tenant
+
+
def create_instance_key(self, slice_hrn, user):
slice_name = Xrn(slice_hrn).leaf
user_name = Xrn(user['urn']).leaf
- key_name = "%s:%s" % (slice_name, user_name)
+ key_name = "%s_%s" % (slice_name, user_name)
pubkey = user['keys'][0]
key_found = False
existing_keys = self.driver.shell.nova_manager.keypairs.findall(name=key_name)
- def run_instances(self, slicename, rspec, key_name, pubkeys):
+ def run_instances(self, instance_name, tenant_name, rspec, key_name, pubkeys):
#logger.debug('Reserving an instance: image: %s, flavor: ' \
# '%s, key: %s, name: %s' % \
# (image_id, flavor_id, key_name, slicename))
+ # make sure a tenant exists for this slice
+ tenant = self.create_tenant(tenant_name)
+
+ # add the sfa admin user to this tenant and update our nova client connection
+ # to use these credentials for the rest of this session. This emsures that the instances
+ # we create will be assigned to the correct tenant.
+ sfa_admin_user = self.driver.shell.auth_manager.users.find(name=self.driver.shell.auth_manager.opts['OS_USERNAME'])
+ user_role = self.driver.shell.auth_manager.roles.find(name='user')
+ admin_role = self.driver.shell.auth_manager.roles.find(name='admin')
+ self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, admin_role, tenant)
+ self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, user_role, tenant)
+ self.driver.shell.nova_manager.connect(tenant=tenant.name)
+
authorized_keys = "\n".join(pubkeys)
files = {'/root/.ssh/authorized_keys': authorized_keys}
rspec = RSpec(rspec)
if not instances:
continue
for instance in instances:
- metadata = {}
- flavor_id = self.driver.shell.nova_manager.flavors.find(name=instance['name'])
- image = instance.get('disk_image')
- if image and isinstance(image, list):
- image = image[0]
- image_id = self.driver.shell.nova_manager.images.find(name=image['name'])
- fw_rules = instance.get('fw_rules', [])
- group_name = self.create_security_group(slicename, fw_rules)
- metadata['security_groups'] = group_name
- if node.get('component_id'):
- metadata['component_id'] = node['component_id']
try:
+ metadata = {}
+ flavor_id = self.driver.shell.nova_manager.flavors.find(name=instance['name'])
+ image = instance.get('disk_image')
+ if image and isinstance(image, list):
+ image = image[0]
+ image_id = self.driver.shell.nova_manager.images.find(name=image['name'])
+ fw_rules = instance.get('fw_rules', [])
+ group_name = self.create_security_group(instance_name, fw_rules)
+ metadata['security_groups'] = group_name
+ if node.get('component_id'):
+ metadata['component_id'] = node['component_id']
self.driver.shell.nova_manager.servers.create(flavor=flavor_id,
image=image_id,
key_name = key_name,
- security_group = group_name,
+ security_groups = [group_name],
files=files,
meta=metadata,
- name=slicename)
+ name=instance_name)
except Exception, err:
logger.log_exc(err)
- def delete_instances(self, instance_name):
+ def delete_instances(self, instance_name, tenant_name):
+ self.driver.shell.nova_manager.connect(tenant=tenant_name)
instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
security_group_manager = SecurityGroup(self.driver)
for instance in instances:
self.driver.shell.nova_manager.servers.delete(instance)
return 1
- def stop_instances(self, instance_name):
+ def stop_instances(self, instance_name, tenant_name):
+ self.driver.shell.nova_manager.connect(tenant=tenant_name)
instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
for instance in instances:
self.driver.shell.nova_manager.servers.pause(instance)