import socket
import base64
import string
-import random
+import random
+import time
from collections import defaultdict
from nova.exception import ImageNotFound
from nova.api.ec2.cloud import CloudController
-from sfa.util.faults import SfaAPIError
+from sfa.util.faults import SfaAPIError, SliverDoesNotExist
+from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
from sfa.rspecs.rspec import RSpec
from sfa.rspecs.elements.hardware_type import HardwareType
from sfa.rspecs.elements.node import Node
user_data += "\n"
return user_data
-def instance_to_sliver(instance, slice_xrn=None):
- sliver_id = None
+def instance_to_sliver(instance, xrn=None):
+ sliver_urn = None
if slice_xrn:
- xrn = Xrn(slice_xrn, 'slice')
- sliver_id = xrn.get_sliver_id(instance.project_id, instance.hostname, instance.id)
+ sliver_xrn = Xrn(xrn=slice_xrn, type='slice', id=instance.id).get_urn()
- sliver = Sliver({'slice_id': sliver_id,
+ sliver = Sliver({'slice_id': sliver_urn,
'name': instance.name,
'type': instance.name,
'cpus': str(instance.vcpus),
def __init__(self, driver):
self.driver = driver
- def get_rspec(self, slice_xrn=None, version=None, options={}):
- version_manager = VersionManager()
- version = version_manager.get_version(version)
- if not slice_xrn:
- rspec_version = version_manager._get_version(version.type, version.version, 'ad')
- nodes = self.get_aggregate_nodes()
- else:
- rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
- nodes = self.get_slice_nodes(slice_xrn)
- rspec = RSpec(version=rspec_version, user_options=options)
- rspec.version.add_nodes(nodes)
- return rspec.toxml()
-
def get_availability_zones(self):
- # essex release
zones = self.driver.shell.nova_manager.dns_domains.domains()
-
if not zones:
zones = ['cloud']
else:
zones = [zone.name for zone in zones]
return zones
- def get_slice_nodes(self, slice_xrn):
+
+ def list_resources(self, version=None, options={}):
+ version_manager = VersionManager()
+ version = version_manager.get_version(version)
+ rspec_version = version_manager._get_version(version.type, version.version, 'ad')
+ rspec = RSpec(version=version, user_options=options)
+ nodes = self.get_aggregate_nodes()
+ rspec.version.add_nodes(nodes)
+ return rspec.toxml()
+
+ def describe(self, urns, version=None, options={}):
# update nova connection
- tenant_name = OSXrn(xrn=slice_xrn, type='slice').get_tenant_name()
- self.driver.shell.nova_manager.connect(tenant=tenant_name)
-
- zones = self.get_availability_zones()
- name = hrn_to_os_slicename(slice_xrn)
- instances = self.driver.shell.nova_manager.servers.findall(name=name)
+ tenant_name = OSXrn(xrn=urns[0], type='slice').get_tenant_name()
+ self.driver.shell.nova_manager.connect(tenant=tenant_name)
+ instances = self.get_instances(urns)
+ if len(instances) == 0:
+ raise SliverDoesNotExist("You have not allocated any slivers here")
+
+ geni_slivers = []
rspec_nodes = []
for instance in instances:
- rspec_node = Node()
-
- #TODO: find a way to look up an instances availability zone in essex
- #if instance.availability_zone:
- # node_xrn = OSXrn(instance.availability_zone, 'node')
- #else:
- # node_xrn = OSXrn('cloud', 'node')
- node_xrn = instance.metadata.get('component_id')
- node_xrn
- if not node_xrn:
- node_xrn = OSXrn('cloud', type='node')
- else:
- node_xrn = OSXrn(xrn=node_xrn, type='node')
-
- rspec_node['component_id'] = node_xrn.urn
- rspec_node['component_name'] = node_xrn.name
- rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
- flavor = self.driver.shell.nova_manager.flavors.find(id=instance.flavor['id'])
- sliver = instance_to_sliver(flavor)
- rspec_node['slivers'] = [sliver]
- image = self.driver.shell.image_manager.get_images(id=instance.image['id'])
- if isinstance(image, list) and len(image) > 0:
- image = image[0]
- disk_image = image_to_rspec_disk_image(image)
- sliver['disk_image'] = [disk_image]
-
- # build interfaces
- interfaces = []
- addresses = instance.addresses
- for private_ip in addresses.get('private', []):
- if_xrn = PlXrn(auth=self.driver.hrn,
- interface='node%s:eth0' % (instance.hostId))
- interface = Interface({'component_id': if_xrn.urn})
- interface['ips'] = [{'address': private_ip['addr'],
- #'netmask': private_ip['network'],
- 'type': private_ip['version']}]
- interfaces.append(interface)
- rspec_node['interfaces'] = interfaces
-
- # slivers always provide the ssh service
- rspec_node['services'] = []
- for public_ip in addresses.get('public', []):
- login = Login({'authentication': 'ssh-keys',
- 'hostname': public_ip['addr'],
- 'port':'22', 'username': 'root'})
- service = Services({'login': login})
- rspec_node['services'].append(service)
- rspec_nodes.append(rspec_node)
- return rspec_nodes
+ rspec_nodes.append(self.instance_to_rspec_node(instance))
+ geni_slivers.append(self.instance_to_geni_sliver(instance))
+ version_manager = VersionManager()
+ version = version_manager.get_version(version)
+ rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
+ rspec = RSpec(version=version, user_options=options)
+ rspec.version.add_nodes(rspec_nodes)
+ result = {'geni_urn': Xrn(urns[0]).get_urn(),
+ 'geni_rspec': rspec.toxml(),
+ 'geni_slivers': geni_slivers}
+
+ return result
+
+ def get_instances(self, urns):
+ # parse slice names and sliver ids
+ names = set()
+ ids = set()
+ for urn in urns:
+ xrn = OSXrn(xrn=urn)
+ names.add(xrn.get_slice_name())
+ if xrn.id:
+ ids.add(xrn.id)
+
+ # look up instances
+ instances = []
+ for name in name:
+ servers = self.driver.shell.nova_manager.servers.findall(name=name)
+ instances.extend(servers)
+
+ # filter on id
+ if ids:
+ instances = [server in servers if server.id in ids]
+ return instances
+
+ def instance_to_rspec_node(self, instance):
+ # determine node urn
+ node_xrn = instance.metadata.get('component_id')
+ if not node_xrn:
+ node_xrn = OSXrn('cloud', type='node')
+ else:
+ node_xrn = OSXrn(xrn=node_xrn, type='node')
+
+ rspec_node = Node()
+ rspec_node['component_id'] = node_xrn.urn
+ rspec_node['component_name'] = node_xrn.name
+ rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
+ if instance.metadata.get('client_id'):
+ rspec_node['client_id'] = instance.metadata.get('client_id')
+ flavor = self.driver.shell.nova_manager.flavors.find(id=instance.flavor['id'])
+ rspec_node['slivers'] = [self.instance_to_sliver(flavor)]
+ image = self.driver.shell.image_manager.get_images(id=instance.image['id'])
+ if isinstance(image, list) and len(image) > 0:
+ image = image[0]
+ disk_image = image_to_rspec_disk_image(image)
+ sliver['disk_image'] = [disk_image]
+
+ # build interfaces
+ rspec_node['services'] = []
+ rspec_node['interfaces'] = []
+ addresses = instance.addresses
+ # HACK: public ips are stored in the list of private, but
+ # this seems wrong. Assume pub ip is the last in the list of
+ # private ips until openstack bug is fixed.
+ if addresses.get('private'):
+ login = Login({'authentication': 'ssh-keys',
+ 'hostname': addresses.get('private')[-1]['addr'],
+ 'port':'22', 'username': 'root'})
+ service = Services({'login': login})
+ rspec_node['services'].append(service)
+
+ for private_ip in addresses.get('private', []):
+ if_xrn = PlXrn(auth=self.driver.hrn,
+ interface='node%s:eth0' % (instance.hostId))
+ interface = Interface({'component_id': if_xrn.urn})
+ interface['ips'] = [{'address': private_ip['addr'],
+ #'netmask': private_ip['network'],
+ 'type': private_ip['version']}]
+ rspec_node['interfaces'].append(interface)
+
+ # slivers always provide the ssh service
+ for public_ip in addresses.get('public', []):
+ login = Login({'authentication': 'ssh-keys',
+ 'hostname': public_ip['addr'],
+ 'port':'22', 'username': 'root'})
+ service = Services({'login': login})
+ rspec_node['services'].append(service)
+ return rspec_node
+
+
+ def instance_to_sliver(self, instance, xrn=None):
+ if xrn:
+ xrn = Xrn(xrn=slice_xrn, type='slice', id=instance.id).get_urn()
+
+ sliver = Sliver({'sliver_id': xrn.get_urn(),
+ 'name': instance.name,
+ 'type': instance.name,
+ 'cpus': str(instance.vcpus),
+ 'memory': str(instance.ram),
+ 'storage': str(instance.disk)})
+ return sliver
+
+ def instance_to_geni_sliver(self, instance):
+ op_status = "geni_unknown"
+ state = instance.state.lower()
+ if state == 'active':
+ op_status = 'geni_ready'
+ elif state == 'building':
+ op_status = 'geni_configuring'
+ elif state == 'failed':
+ op_status =' geni_failed'
+
+ urn = OSXrn(name=instance.name, type='slice', id=instance.id).get_urn()
+ # required fields
+ geni_sliver = {'geni_sliver_urn': urn,
+ 'geni_expires': None,
+ 'geni_allocation_status': 'geni_provisioned',
+ 'geni_operational_status': op_status,
+ 'geni_error': None,
+ 'plos_created_at': datetime_to_string(utcparse(instance.created)),
+ 'plos_sliver_type': self.shell.nova_manager.flavors.find(id=instance.flavor['id']).name,
+ }
+
+
+ return geni_sliver
+
def get_aggregate_nodes(self):
zones = self.get_availability_zones()
# available sliver/instance/vm types
HardwareType({'name': 'pc'})]
slivers = []
for instance in instances:
- sliver = instance_to_sliver(instance)
+ sliver = self.instance_to_sliver(instance)
sliver['disk_image'] = disk_images
slivers.append(sliver)
return rspec_nodes
-
def create_tenant(self, tenant_name):
tenants = self.driver.shell.auth_manager.tenants.findall(name=tenant_name)
if not tenants:
tenant = tenants[0]
return tenant
-
def create_instance_key(self, slice_hrn, user):
slice_name = Xrn(slice_hrn).leaf
user_name = Xrn(user['urn']).leaf
metadata['security_groups'] = group_name
if node.get('component_id'):
metadata['component_id'] = node['component_id']
+ if node.get('client_id'):
+ metadata['client_id'] = node['client_id']
self.driver.shell.nova_manager.servers.create(flavor=flavor_id,
image=image_id,
key_name = key_name,
- security_group = group_name,
+ security_groups = [group_name],
files=files,
meta=metadata,
name=instance_name)
- def delete_instances(self, instance_name, tenant_name):
+ def delete_instance(self, tenant_name, instance_name, id=None):
+
+ def _delete_security_group(instance):
+ security_group = instance.metadata.get('security_groups', '')
+ if security_group:
+ manager = SecurityGroup(self.driver)
+ timeout = 10.0 # wait a maximum of 10 seconds before forcing the security group delete
+ start_time = time.time()
+ instance_deleted = False
+ while instance_deleted == False and (time.time() - start_time) < timeout:
+ inst = self.driver.shell.nova_manager.servers.findall(id=instance.id)
+ if not inst:
+ instance_deleted = True
+ time.sleep(.5)
+ manager.delete_security_group(security_group)
+
+ thread_manager = ThreadManager()
self.driver.shell.nova_manager.connect(tenant=tenant_name)
- instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
+ args = {'name': instance_name}
+ if id:
+ args['id'] = id
+ instances = self.driver.shell.nova_manager.servers.findall(**args)
security_group_manager = SecurityGroup(self.driver)
for instance in instances:
- # deleate this instance's security groups
- security_group = instance.metadata.get('security_groups', '')
- if security_group:
- # dont delete the default security group
- if security_group != 'default':
- security_group_manager.delete_security_group(security_group)
# destroy instance
self.driver.shell.nova_manager.servers.delete(instance)
+ # deleate this instance's security groups
+ thread_manager.run(_delete_security_group, instance)
return 1
- def stop_instances(self, instance_name, tenant_name):
+ def stop_instances(self, instance_name, tenant_name, id=None):
self.driver.shell.nova_manager.connect(tenant=tenant_name)
- instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
+ args = {'name': instance_name}
+ if id:
+ args['id'] = id
+ instances = self.driver.shell.nova_manager.servers.findall(**args)
for instance in instances:
self.driver.shell.nova_manager.servers.pause(instance)
return 1
+ def start_instances(self, instance_name, tenant_name, id=None):
+ self.driver.shell.nova_manager.connect(tenant=tenant_name)
+ args = {'name': instance_name}
+ if id:
+ args['id'] = id
+ instances = self.driver.shell.nova_manager.servers.findall(**args)
+ for instance in instances:
+ self.driver.shell.nova_manager.servers.resume(instance)
+ return 1
+
+ def restart_instances(self, instacne_name, tenant_name, id=None):
+ self.stop_instances(instance_name, tenant_name, id)
+ self.start_instances(instance_name, tenant_name, id)
+ return 1
+
def update_instances(self, project_name):
pass