</variablelist>
</category>
- <!-- ======================================== -->
- <category id="sfa_nova">
- <name>SFA Flash Policy</name>
- <description>The settings that affect how SFA connects to
- the Nova/EC2 API</description>
- <variablelist>
- <variable id="user" type="string">
- <name>Sfa nova user</name>
- <value>novaadmin</value>
- <description>Account/context to use when performing
- administrative nova operations</description>
- </variable>
- <variable id="api_url" type="string">
- <name>Nova API url</name>
- <value>127.0.0.1</value>
- <description>The Nova/EC2 API url </description>
- </variable>
- <variable id="api_port" type="int">
- <name>Nova API Port</name>
- <value>8773</value>
- <description>The Nova/EC2 API port.</description>
- </variable>
- <variable id="novarc" type="string">
- <name>novarc</name>
- <value>/root/novarc</value>
- <description>Path to novarc client config file</description>
- </variable>
- </variablelist>
- </category>
-
<!-- ======================================== -->
<category id="sfa_nitos">
<name></name>
flavour_xml_section_hash = {
'pl': 'sfa_plc',
- 'openstack': 'sfa_nova',
'nitos': 'sfa_nitos',
'dummy': 'sfa_dummy',
}
'sfa/planetlab',
'sfa/nitos',
'sfa/dummy',
- 'sfa/openstack',
'sfa/iotlab',
'sfatables',
'sfatables/commands',
%files plc
%defattr(-,root,root)
%{python_sitelib}/sfa/planetlab
-%{python_sitelib}/sfa/openstack
/etc/sfa/pl.rng
/etc/sfa/credential.xsd
/etc/sfa/top.xsd
+++ /dev/null
-from sfa.generic import Generic
-
-import sfa.server.sfaapi
-import sfa.openstack.nova_driver
-import sfa.managers.registry_manager_openstack
-import sfa.managers.aggregate_manager
-import sfa.managers.slice_manager
-
-# use pl as a model so we only redefine what's different
-from sfa.generic.pl import pl
-
-
-class openstack (pl):
-
- # the importer class
- def importer_class(self):
- import sfa.importer.openstackimporter
- return sfa.importer.openstackimporter.OpenstackImporter
-
- # the manager classes for the server-side services
- def registry_manager_class(self):
- return sfa.managers.registry_manager_openstack.RegistryManager
-
- def aggregate_manager_class(self):
- return sfa.managers.aggregate_manager.AggregateManager
-
- # driver class for server-side services, talk to the whole testbed
- def driver_class(self):
- return sfa.openstack.nova_driver.NovaDriver
+++ /dev/null
-import os
-
-from sfa.util.config import Config
-from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
-from sfa.trust.gid import create_uuid
-from sfa.trust.certificate import convert_public_key, Keypair
-# using global alchemy.session() here is fine
-# as importer is on standalone one-shot process
-from sfa.storage.alchemy import global_dbsession
-from sfa.storage.model import RegRecord, RegAuthority, RegUser, RegSlice, RegNode
-from sfa.openstack.osxrn import OSXrn
-from sfa.openstack.shell import Shell
-
-
-def load_keys(filename):
- keys = {}
- tmp_dict = {}
- try:
- execfile(filename, tmp_dict)
- if 'keys' in tmp_dict:
- keys = tmp_dict['keys']
- return keys
- except:
- return keys
-
-
-def save_keys(filename, keys):
- f = open(filename, 'w')
- f.write("keys = %s" % str(keys))
- f.close()
-
-
-class OpenstackImporter:
-
- def __init__(self, auth_hierarchy, logger):
- self.auth_hierarchy = auth_hierarchy
- self.logger = logger
- self.config = Config()
- self.interface_hrn = self.config.SFA_INTERFACE_HRN
- self.root_auth = self.config.SFA_REGISTRY_ROOT_AUTH
- self.shell = Shell(self.config)
-
- def add_options(self, parser):
- self.logger.debug("OpenstackImporter: no options yet")
- pass
-
- def import_users(self, existing_hrns, existing_records):
- # Get all users
- users = self.shell.auth_manager.users.list()
- users_dict = {}
- keys_filename = self.config.config_path + os.sep + 'person_keys.py'
- old_user_keys = load_keys(keys_filename)
- user_keys = {}
- for user in users:
- auth_hrn = self.config.SFA_INTERFACE_HRN
- if user.tenantId is not None:
- tenant = self.shell.auth_manager.tenants.find(id=user.tenantId)
- auth_hrn = OSXrn(
- name=tenant.name, auth=self.config.SFA_INTERFACE_HRN, type='authority').get_hrn()
- hrn = OSXrn(name=user.name, auth=auth_hrn, type='user').get_hrn()
- users_dict[hrn] = user
- old_keys = old_user_keys.get(hrn, [])
- keyname = OSXrn(xrn=hrn, type='user').get_slicename()
- keys = [
- k.public_key for k in self.shell.nova_manager.keypairs.findall(name=keyname)]
- user_keys[hrn] = keys
- update_record = False
- if old_keys != keys:
- update_record = True
- if hrn not in existing_hrns or \
- (hrn, 'user') not in existing_records or update_record:
- urn = OSXrn(xrn=hrn, type='user').get_urn()
-
- if keys:
- try:
- pkey = convert_public_key(keys[0])
- except:
- self.logger.log_exc(
- 'unable to convert public key for %s' % hrn)
- pkey = Keypair(create=True)
- else:
- self.logger.warning(
- "OpenstackImporter: person %s does not have a PL public key" % hrn)
- pkey = Keypair(create=True)
- user_gid = self.auth_hierarchy.create_gid(
- urn, create_uuid(), pkey, email=user.email)
- user_record = RegUser()
- user_record.type = 'user'
- user_record.hrn = hrn
- user_record.gid = user_gid
- user_record.authority = get_authority(hrn)
- global_dbsession.add(user_record)
- global_dbsession.commit()
- self.logger.info(
- "OpenstackImporter: imported person %s" % user_record)
-
- return users_dict, user_keys
-
- def import_tenants(self, existing_hrns, existing_records):
- # Get all tenants
- # A tenant can represent an organizational group (site) or a
- # slice. If a tenant's authorty/parent matches the root authority it is
- # considered a group/site. All other tenants are considered slices.
- tenants = self.shell.auth_manager.tenants.list()
- tenants_dict = {}
- for tenant in tenants:
- hrn = self.config.SFA_INTERFACE_HRN + '.' + tenant.name
- tenants_dict[hrn] = tenant
- authority_hrn = OSXrn(
- xrn=hrn, type='authority').get_authority_hrn()
-
- if hrn in existing_hrns:
- continue
-
- if authority_hrn == self.config.SFA_INTERFACE_HRN:
- # import group/site
- record = RegAuthority()
- urn = OSXrn(xrn=hrn, type='authority').get_urn()
- if not self.auth_hierarchy.auth_exists(urn):
- self.auth_hierarchy.create_auth(urn)
- auth_info = self.auth_hierarchy.get_auth_info(urn)
- gid = auth_info.get_gid_object()
- record.type = 'authority'
- record.hrn = hrn
- record.gid = gid
- record.authority = get_authority(hrn)
- global_dbsession.add(record)
- global_dbsession.commit()
- self.logger.info(
- "OpenstackImporter: imported authority: %s" % record)
-
- else:
- record = RegSlice()
- urn = OSXrn(xrn=hrn, type='slice').get_urn()
- pkey = Keypair(create=True)
- gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
- record.type = 'slice'
- record.hrn = hrn
- record.gid = gid
- record.authority = get_authority(hrn)
- global_dbsession.add(record)
- global_dbsession.commit()
- self.logger.info(
- "OpenstackImporter: imported slice: %s" % record)
-
- return tenants_dict
-
- def run(self, options):
- # we don't have any options for now
- self.logger.info("OpenstackImporter.run : to do")
-
- # create dict of all existing sfa records
- existing_records = {}
- existing_hrns = []
- key_ids = []
- for record in global_dbsession.query(RegRecord):
- existing_records[(record.hrn, record.type,)] = record
- existing_hrns.append(record.hrn)
-
- tenants_dict = self.import_tenants(existing_hrns, existing_records)
- users_dict, user_keys = self.import_users(
- existing_hrns, existing_records)
-
- # remove stale records
- system_records = [self.interface_hrn, self.root_auth,
- self.interface_hrn + '.slicemanager']
- for (record_hrn, type) in existing_records.keys():
- if record_hrn in system_records:
- continue
-
- record = existing_records[(record_hrn, type)]
- if record.peer_authority:
- continue
-
- if type == 'user':
- if record_hrn in users_dict:
- continue
- elif type in['slice', 'authority']:
- if record_hrn in tenants_dict:
- continue
- else:
- continue
-
- record_object = existing_records[(record_hrn, type)]
- self.logger.info("OpenstackImporter: removing %s " % record)
- global_dbsession.delete(record_object)
- global_dbsession.commit()
-
- # save pub keys
- self.logger.info('OpenstackImporter: saving current pub keys')
- keys_filename = self.config.config_path + os.sep + 'person_keys.py'
- save_keys(keys_filename, user_keys)
+++ /dev/null
-from sfa.util.sfalogging import logger
-from keystoneclient.v2_0 import client as keystone_client
-from glance import client as glance_client
-from novaclient.v1_1 import client as nova_client
-from sfa.util.config import Config
-
-
-def parse_novarc(filename):
- opts = {}
- f = open(filename, 'r')
- for line in f:
- try:
- line = line.replace('export', '').strip()
- parts = line.split('=')
- if len(parts) > 1:
- value = parts[1].replace("\'", "")
- value = value.replace('\"', '')
- opts[parts[0]] = value
- except:
- pass
- f.close()
- return opts
-
-
-class KeystoneClient:
-
- def __init__(self, username=None, password=None, tenant=None, url=None, config=None):
- if not config:
- config = Config()
- opts = parse_novarc(config.SFA_NOVA_NOVARC)
- if username:
- opts['OS_USERNAME'] = username
- if password:
- opts['OS_PASSWORD'] = password
- if tenant:
- opts['OS_TENANT_NAME'] = tenant
- if url:
- opts['OS_AUTH_URL'] = url
- self.opts = opts
- self.client = keystone_client.Client(username=opts.get('OS_USERNAME'),
- password=opts.get('OS_PASSWORD'),
- tenant_name=opts.get(
- 'OS_TENANT_NAME'),
- auth_url=opts.get('OS_AUTH_URL'))
-
- def connect(self, *args, **kwds):
- self.__init__(*args, **kwds)
-
- def __getattr__(self, name):
- return getattr(self.client, name)
-
-
-class GlanceClient:
-
- def __init__(self, config=None):
- if not config:
- config = Config()
- opts = parse_novarc(config.SFA_NOVA_NOVARC)
- self.client = glance_client.get_client(host='0.0.0.0',
- username=opts.get(
- 'OS_USERNAME'),
- password=opts.get(
- 'OS_PASSWORD'),
- tenant=opts.get(
- 'OS_TENANT_NAME'),
- auth_url=opts.get('OS_AUTH_URL'))
-
- def __getattr__(self, name):
- return getattr(self.client, name)
-
-
-class NovaClient:
-
- def __init__(self, username=None, password=None, tenant=None, url=None, config=None):
- if not config:
- config = Config()
- opts = parse_novarc(config.SFA_NOVA_NOVARC)
- if username:
- opts['OS_USERNAME'] = username
- if password:
- opts['OS_PASSWORD'] = password
- if tenant:
- opts['OS_TENANT_NAME'] = tenant
- if url:
- opts['OS_AUTH_URL'] = url
- self.opts = opts
- self.client = nova_client.Client(username=opts.get('OS_USERNAME'),
- api_key=opts.get('OS_PASSWORD'),
- project_id=opts.get('OS_TENANT_NAME'),
- auth_url=opts.get('OS_AUTH_URL'),
- region_name='',
- extensions=[],
- service_type='compute',
- service_name='',
- )
-
- def connect(self, *args, **kwds):
- self.__init__(*args, **kwds)
-
- def __getattr__(self, name):
- return getattr(self.client, name)
+++ /dev/null
-try:
- import boto
- from boto.ec2.regioninfo import RegionInfo
- from boto.exception import EC2ResponseError
- has_boto = True
-except:
- has_boto = False
-
-from sfa.util.sfalogging import logger
-from sfa.openstack.nova_shell import NovaShell
-from sfa.util.config import Config
-
-
-class EucaShell:
- """
- A xmlrpc connection to the euca api.
- """
-
- def __init__(self, config):
- self.config = config
- self.nova_shell = NovaShell(config)
- self.access_key = None
- self.secret_key = None
-
- def init_context(self, project_name=None):
-
- # use the context of the specified project's project
- # manager.
- if project_name:
- project = self.nova_shell.auth_manager.get_project(project_name)
- self.access_key = "%s:%s" % (
- project.project_manager.name, project_name)
- self.secret_key = project.project_manager.secret
- else:
- # use admin user's context
- admin_user = self.nova_shell.auth_manager.get_user(
- self.config.SFA_NOVA_USER)
- #access_key = admin_user.access
- self.access_key = '%s' % admin_user.name
- self.secret_key = admin_user.secret
-
- def get_euca_connection(self, project_name=None):
- if not has_boto:
- logger.info('Unable to access EC2 API - boto library not found.')
- return None
-
- if not self.access_key or not self.secret_key:
- self.init_context(project_name)
-
- url = self.config.SFA_NOVA_API_URL
- host = None
- port = None
- path = "/"
- use_ssl = False
- # Split the url into parts
- if url.find('https://') >= 0:
- use_ssl = True
- url = url.replace('https://', '')
- elif url.find('http://') >= 0:
- use_ssl = False
- url = url.replace('http://', '')
- parts = url.split(':')
- host = parts[0]
- if len(parts) > 1:
- parts = parts[1].split('/')
- port = int(parts[0])
- parts = parts[1:]
- path = '/' + '/'.join(parts)
- return boto.connect_ec2(aws_access_key_id=self.access_key,
- aws_secret_access_key=self.secret_key,
- is_secure=use_ssl,
- region=RegionInfo(None, 'eucalyptus', host),
- host=host,
- port=port,
- path=path)
-
- def __getattr__(self, name):
- def func(*args, **kwds):
- conn = self.get_euca_connection()
+++ /dev/null
-from nova.exception import ImageNotFound
-from sfa.rspecs.elements.disk_image import DiskImage
-
-
-class Image:
-
- def __init__(self, image=None):
- if image is None:
- image = {}
- self.id = None
- self.container_format = None
- self.kernel_id = None
- self.ramdisk_id = None
- self.properties = None
- self.name = None
- self.description = None
- self.os = None
- self.version = None
-
- if image:
- self.parse_image(image)
-
- def parse_image(self, image):
- if isinstance(image, dict):
- self.id = image['id']
- self.name = image['name']
- self.container_format = image['container_format']
- self.properties = image['properties']
- if 'kernel_id' in self.properties:
- self.kernel_id = self.properties['kernel_id']
- if 'ramdisk_id' in self.properties:
- self.ramdisk_id = self.properties['ramdisk_id']
-
- def to_rspec_object(self):
- img = DiskImage()
- img['name'] = self.name
- img['description'] = self.name
- img['os'] = self.name
- img['version'] = self.name
- return img
-
-
-class ImageManager:
-
- def __init__(self, driver):
- self.driver = driver
-
- @staticmethod
- def disk_image_to_rspec_object(image):
- img = Image(image)
- return img.to_rspec_object()
-
- def get_available_disk_images(self):
- # get image records
- disk_images = []
- for img in self.driver.shell.image_manager.get_images_detailed():
- image = Image(img)
- if image.container_format in ['ami', 'ovf']:
- disk_images.append(image)
- return disk_images
-
- def get_disk_image(self, id=None, name=None):
- """
- Look up a image bundle using the specifeid id or name
- """
- disk_image = None
- try:
- if id:
- image = self.driver.shell.nova_manager.images.find(id=id)
- elif name:
- image = self.driver.shell.nova_manager.images.find(name=name)
- except ImageNotFound:
- pass
- return Image(image)
+++ /dev/null
-import time
-import datetime
-
-from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
- RecordNotFound, SfaNotImplemented, SfaInvalidArgument, UnsupportedOperation
-
-from sfa.util.sfalogging import logger
-from sfa.util.defaultdict import defaultdict
-from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
-from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf
-from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename, hrn_to_os_tenant_name
-from sfa.util.cache import Cache
-from sfa.trust.credential import Credential
-# used to be used in get_ticket
-#from sfa.trust.sfaticket import SfaTicket
-from sfa.rspecs.version_manager import VersionManager
-from sfa.rspecs.rspec import RSpec
-from sfa.storage.model import RegRecord, SliverAllocation
-
-# the driver interface, mostly provides default behaviours
-from sfa.managers.driver import Driver
-from sfa.openstack.shell import Shell
-from sfa.openstack.osaggregate import OSAggregate
-from sfa.planetlab.plslices import PlSlices
-
-
-def list_to_dict(recs, key):
- """
- convert a list of dictionaries into a dictionary keyed on the
- specified dictionary key
- """
- return dict([(rec[key], rec) for rec in recs])
-
-#
-# PlShell is just an xmlrpc serverproxy where methods
-# can be sent as-is; it takes care of authentication
-# from the global config
-#
-
-
-class NovaDriver(Driver):
-
- # the cache instance is a class member so it survives across incoming
- # requests
- cache = None
-
- def __init__(self, api):
- Driver.__init__(self, api)
- config = api.config
- self.shell = Shell(config=config)
- self.cache = None
- if config.SFA_AGGREGATE_CACHING:
- if NovaDriver.cache is None:
- NovaDriver.cache = Cache()
- self.cache = NovaDriver.cache
-
- def sliver_to_slice_xrn(self, xrn):
- sliver_id_parts = Xrn(xrn).get_sliver_id_parts()
- slice = self.shell.auth_manager.tenants.find(id=sliver_id_parts[0])
- if not slice:
- raise Forbidden(
- "Unable to locate slice record for sliver: %s" % xrn)
- slice_xrn = OSXrn(name=slice.name, type='slice')
- return slice_xrn
-
- def check_sliver_credentials(self, creds, urns):
- # build list of cred object hrns
- slice_cred_names = []
- for cred in creds:
- slice_cred_hrn = Credential(cred=cred).get_gid_object().get_hrn()
- slice_cred_names.append(OSXrn(xrn=slice_cred_hrn).get_slicename())
-
- # look up slice name of slivers listed in urns arg
- slice_ids = []
- for urn in urns:
- sliver_id_parts = Xrn(xrn=urn).get_sliver_id_parts()
- slice_ids.append(sliver_id_parts[0])
-
- if not slice_ids:
- raise Forbidden("sliver urn not provided")
-
- sliver_names = []
- for slice_id in slice_ids:
- slice = self.shell.auth_manager.tenants.find(slice_id)
- sliver_names.append(slice['name'])
-
- # make sure we have a credential for every specified sliver ierd
- for sliver_name in sliver_names:
- if sliver_name not in slice_cred_names:
- msg = "Valid credential not found for target: %s" % sliver_name
- raise Forbidden(msg)
-
- ########################################
- # registry oriented
- ########################################
-
- # disabled users
- def is_enabled(self, record):
- # all records are enabled
- return True
-
- def augment_records_with_testbed_info(self, sfa_records):
- return self.fill_record_info(sfa_records)
-
- ##########
- def register(self, sfa_record, hrn, pub_key):
-
- if sfa_record['type'] == 'slice':
- record = self.register_slice(sfa_record, hrn)
- elif sfa_record['type'] == 'user':
- record = self.register_user(sfa_record, hrn, pub_key)
- elif sfa_record['type'].startswith('authority'):
- record = self.register_authority(sfa_record, hrn)
- # We should be returning the records id as a pointer but
- # this is a string and the records table expects this to be an
- # int.
- # return record.id
- return -1
-
- def register_slice(self, sfa_record, hrn):
- # add slice description, name, researchers, PI
- name = hrn_to_os_tenant_name(hrn)
- description = sfa_record.get('description', None)
- self.shell.auth_manager.tenants.create(name, description)
- tenant = self.shell.auth_manager.tenants.find(name=name)
- auth_hrn = OSXrn(xrn=hrn, type='slice').get_authority_hrn()
- parent_tenant_name = OSXrn(
- xrn=auth_hrn, type='slice').get_tenant_name()
- parent_tenant = self.shell.auth_manager.tenants.find(
- name=parent_tenant_name)
- researchers = sfa_record.get('researchers', [])
- for researcher in researchers:
- name = Xrn(researcher).get_leaf()
- user = self.shell.auth_manager.users.find(name=name)
- self.shell.auth_manager.roles.add_user_role(user, 'Member', tenant)
- self.shell.auth_manager.roles.add_user_role(user, 'user', tenant)
-
- pis = sfa_record.get('pis', [])
- for pi in pis:
- name = Xrn(pi).get_leaf()
- user = self.shell.auth_manager.users.find(name=name)
- self.shell.auth_manager.roles.add_user_role(user, 'pi', tenant)
- self.shell.auth_manager.roles.add_user_role(
- user, 'pi', parent_tenant)
-
- return tenant
-
- def register_user(self, sfa_record, hrn, pub_key):
- # add person roles, projects and keys
- email = sfa_record.get('email', None)
- xrn = Xrn(hrn)
- name = xrn.get_leaf()
- auth_hrn = xrn.get_authority_hrn()
- tenant_name = OSXrn(xrn=auth_hrn, type='authority').get_tenant_name()
- tenant = self.shell.auth_manager.tenants.find(name=tenant_name)
- self.shell.auth_manager.users.create(
- name, email=email, tenant_id=tenant.id)
- user = self.shell.auth_manager.users.find(name=name)
- slices = sfa_records.get('slices', [])
- for slice in projects:
- slice_tenant_name = OSXrn(
- xrn=slice, type='slice').get_tenant_name()
- slice_tenant = self.shell.auth_manager.tenants.find(
- name=slice_tenant_name)
- self.shell.auth_manager.roles.add_user_role(
- user, slice_tenant, 'user')
- keys = sfa_records.get('keys', [])
- for key in keys:
- keyname = OSXrn(xrn=hrn, type='user').get_slicename()
- self.shell.nova_client.keypairs.create(keyname, key)
- return user
-
- def register_authority(self, sfa_record, hrn):
- name = OSXrn(xrn=hrn, type='authority').get_tenant_name()
- self.shell.auth_manager.tenants.create(
- name, sfa_record.get('description', ''))
- tenant = self.shell.auth_manager.tenants.find(name=name)
- return tenant
-
- ##########
- # xxx actually old_sfa_record comes filled with plc stuff as well in the
- # original code
- def update(self, old_sfa_record, new_sfa_record, hrn, new_key):
- type = new_sfa_record['type']
-
- # new_key implemented for users only
- if new_key and type not in ['user']:
- raise UnknownSfaType(type)
-
- elif type == "slice":
- # can update project manager and description
- name = hrn_to_os_slicename(hrn)
- researchers = sfa_record.get('researchers', [])
- pis = sfa_record.get('pis', [])
- project_manager = None
- description = sfa_record.get('description', None)
- if pis:
- project_manager = Xrn(pis[0], 'user').get_leaf()
- elif researchers:
- project_manager = Xrn(researchers[0], 'user').get_leaf()
- self.shell.auth_manager.modify_project(
- name, project_manager, description)
-
- elif type == "user":
- # can techinally update access_key and secret_key,
- # but that is not in our scope, so we do nothing.
- pass
- return True
-
- ##########
- def remove(self, sfa_record):
- type = sfa_record['type']
- if type == 'user':
- name = Xrn(sfa_record['hrn']).get_leaf()
- if self.shell.auth_manager.get_user(name):
- self.shell.auth_manager.delete_user(name)
- elif type == 'slice':
- name = hrn_to_os_slicename(sfa_record['hrn'])
- if self.shell.auth_manager.get_project(name):
- self.shell.auth_manager.delete_project(name)
- return True
-
- ####################
- def fill_record_info(self, records):
- """
- Given a (list of) SFA record, fill in the PLC specific
- and SFA specific fields in the record.
- """
- if not isinstance(records, list):
- records = [records]
-
- for record in records:
- if record['type'] == 'user':
- record = self.fill_user_record_info(record)
- elif record['type'] == 'slice':
- record = self.fill_slice_record_info(record)
- elif record['type'].startswith('authority'):
- record = self.fill_auth_record_info(record)
- else:
- continue
- record['geni_urn'] = hrn_to_urn(record['hrn'], record['type'])
- record['geni_certificate'] = record['gid']
- # if os_record.created_at is not None:
- # record['date_created'] = datetime_to_string(utcparse(os_record.created_at))
- # if os_record.updated_at is not None:
- # record['last_updated'] = datetime_to_string(utcparse(os_record.updated_at))
-
- return records
-
- def fill_user_record_info(self, record):
- xrn = Xrn(record['hrn'])
- name = xrn.get_leaf()
- record['name'] = name
- user = self.shell.auth_manager.users.find(name=name)
- record['email'] = user.email
- tenant = self.shell.auth_manager.tenants.find(id=user.tenantId)
- slices = []
- all_tenants = self.shell.auth_manager.tenants.list()
- for tmp_tenant in all_tenants:
- if tmp_tenant.name.startswith(tenant.name + "."):
- for tmp_user in tmp_tenant.list_users():
- if tmp_user.name == user.name:
- slice_hrn = ".".join([self.hrn, tmp_tenant.name])
- slices.append(slice_hrn)
- record['slices'] = slices
- roles = self.shell.auth_manager.roles.roles_for_user(user, tenant)
- record['roles'] = [role.name for role in roles]
- keys = self.shell.nova_manager.keypairs.findall(name=record['hrn'])
- record['keys'] = [key.public_key for key in keys]
- return record
-
- def fill_slice_record_info(self, record):
- tenant_name = hrn_to_os_tenant_name(record['hrn'])
- tenant = self.shell.auth_manager.tenants.find(name=tenant_name)
- parent_tenant_name = OSXrn(xrn=tenant_name).get_authority_hrn()
- parent_tenant = self.shell.auth_manager.tenants.find(
- name=parent_tenant_name)
- researchers = []
- pis = []
-
- # look for users and pis in slice tenant
- for user in tenant.list_users():
- for role in self.shell.auth_manager.roles.roles_for_user(user, tenant):
- if role.name.lower() == 'pi':
- user_tenant = self.shell.auth_manager.tenants.find(
- id=user.tenantId)
- hrn = ".".join([self.hrn, user_tenant.name, user.name])
- pis.append(hrn)
- elif role.name.lower() in ['user', 'member']:
- user_tenant = self.shell.auth_manager.tenants.find(
- id=user.tenantId)
- hrn = ".".join([self.hrn, user_tenant.name, user.name])
- researchers.append(hrn)
-
- # look for pis in the slice's parent (site/organization) tenant
- for user in parent_tenant.list_users():
- for role in self.shell.auth_manager.roles.roles_for_user(user, parent_tenant):
- if role.name.lower() == 'pi':
- user_tenant = self.shell.auth_manager.tenants.find(
- id=user.tenantId)
- hrn = ".".join([self.hrn, user_tenant.name, user.name])
- pis.append(hrn)
- record['name'] = tenant_name
- record['description'] = tenant.description
- record['PI'] = pis
- if pis:
- record['geni_creator'] = pis[0]
- else:
- record['geni_creator'] = None
- record['researcher'] = researchers
- return record
-
- def fill_auth_record_info(self, record):
- tenant_name = hrn_to_os_tenant_name(record['hrn'])
- tenant = self.shell.auth_manager.tenants.find(name=tenant_name)
- researchers = []
- pis = []
-
- # look for users and pis in slice tenant
- for user in tenant.list_users():
- for role in self.shell.auth_manager.roles.roles_for_user(user, tenant):
- hrn = ".".join([self.hrn, tenant.name, user.name])
- if role.name.lower() == 'pi':
- pis.append(hrn)
- elif role.name.lower() in ['user', 'member']:
- researchers.append(hrn)
-
- # look for slices
- slices = []
- all_tenants = self.shell.auth_manager.tenants.list()
- for tmp_tenant in all_tenants:
- if tmp_tenant.name.startswith(tenant.name + "."):
- slices.append(".".join([self.hrn, tmp_tenant.name]))
-
- record['name'] = tenant_name
- record['description'] = tenant.description
- record['PI'] = pis
- record['enabled'] = tenant.enabled
- record['researchers'] = researchers
- record['slices'] = slices
- return record
-
- ####################
- # plcapi works by changes, compute what needs to be added/deleted
- def update_relation(self, subject_type, target_type, subject_id, target_ids):
- # hard-wire the code for slice/user for now, could be smarter if needed
- if subject_type == 'slice' and target_type == 'user':
- subject = self.shell.project_get(subject_id)[0]
- current_target_ids = [user.name for user in subject.members]
- add_target_ids = list(
- set(target_ids).difference(current_target_ids))
- del_target_ids = list(
- set(current_target_ids).difference(target_ids))
- logger.debug("subject_id = %s (type=%s)" %
- (subject_id, type(subject_id)))
- for target_id in add_target_ids:
- self.shell.project_add_member(target_id, subject_id)
- logger.debug("add_target_id = %s (type=%s)" %
- (target_id, type(target_id)))
- for target_id in del_target_ids:
- logger.debug("del_target_id = %s (type=%s)" %
- (target_id, type(target_id)))
- self.shell.project_remove_member(target_id, subject_id)
- else:
- logger.info('unexpected relation to maintain, %s -> %s' %
- (subject_type, target_type))
-
- ########################################
- # aggregate oriented
- ########################################
-
- def testbed_name(self): return "openstack"
-
- def aggregate_version(self):
- return {}
-
- # first 2 args are None in case of resource discovery
- def list_resources(self, version=None, options=None):
- if options is None:
- options = {}
- aggregate = OSAggregate(self)
- rspec = aggregate.list_resources(version=version, options=options)
- return rspec
-
- def describe(self, urns, version=None, options=None):
- if options is None:
- options = {}
- aggregate = OSAggregate(self)
- return aggregate.describe(urns, version=version, options=options)
-
- def status(self, urns, options=None):
- if options is None:
- options = {}
- aggregate = OSAggregate(self)
- desc = aggregate.describe(urns)
- status = {'geni_urn': desc['geni_urn'],
- 'geni_slivers': desc['geni_slivers']}
- return status
-
- def allocate(self, urn, rspec_string, expiration, options=None):
- if options is None:
- options = {}
- xrn = Xrn(urn)
- aggregate = OSAggregate(self)
-
- # assume first user is the caller and use their context
- # for the ec2/euca api connection. Also, use the first users
- # key as the project key.
- key_name = None
- if len(users) > 1:
- key_name = aggregate.create_instance_key(xrn.get_hrn(), users[0])
-
- # collect public keys
- users = options.get('geni_users', [])
- pubkeys = []
- for user in users:
- pubkeys.extend(user['keys'])
-
- rspec = RSpec(rspec_string)
- instance_name = hrn_to_os_slicename(slice_hrn)
- tenant_name = OSXrn(xrn=slice_hrn, type='slice').get_tenant_name()
- slivers = aggregate.run_instances(instance_name, tenant_name,
- rspec_string, key_name, pubkeys)
-
- # update all sliver allocation states setting then to geni_allocated
- sliver_ids = [sliver.id for sliver in slivers]
- dbsession = self.api.dbsession()
- SliverAllocation.set_allocations(
- sliver_ids, 'geni_provisioned', dbsession)
-
- return aggregate.describe(urns=[urn], version=rspec.version)
-
- def provision(self, urns, options=None):
- if options is None:
- options = {}
- # update sliver allocation states and set them to geni_provisioned
- aggregate = OSAggregate(self)
- instances = aggregate.get_instances(urns)
- sliver_ids = []
- for instance in instances:
- sliver_hrn = "%s.%s" % (self.driver.hrn, instance.id)
- sliver_ids.append(Xrn(sliver_hrn, type='sliver').urn)
- dbsession = self.api.dbsession()
- SliverAllocation.set_allocations(
- sliver_ids, 'geni_provisioned', dbsession)
- version_manager = VersionManager()
- rspec_version = version_manager.get_version(
- options['geni_rspec_version'])
- return self.describe(urns, rspec_version, options=options)
-
- def delete(self, urns, options=None):
- if options is None:
- options = {}
- # collect sliver ids so we can update sliver allocation states after
- # we remove the slivers.
- aggregate = OSAggregate(self)
- instances = aggregate.get_instances(urns)
- sliver_ids = []
- for instance in instances:
- sliver_hrn = "%s.%s" % (self.driver.hrn, instance.id)
- sliver_ids.append(Xrn(sliver_hrn, type='sliver').urn)
-
- # delete the instance
- aggregate.delete_instance(instance)
-
- # delete sliver allocation states
- dbsession = self.api.dbsession()
- SliverAllocation.delete_allocations(sliver_ids, dbsession)
-
- # return geni_slivers
- geni_slivers = []
- for sliver_id in sliver_ids:
- geni_slivers.append(
- {'geni_sliver_urn': sliver['sliver_id'],
- 'geni_allocation_status': 'geni_unallocated',
- 'geni_expires': None})
- return geni_slivers
-
- def renew(self, urns, expiration_time, options=None):
- if options is None:
- options = {}
- description = self.describe(urns, None, options)
- return description['geni_slivers']
-
- def perform_operational_action(self, urns, action, options=None):
- if options is None:
- options = {}
- aggregate = OSAggregate(self)
- action = action.lower()
- if action == 'geni_start':
- action_method = aggregate.start_instances
- elif action == 'geni_stop':
- action_method = aggregate.stop_instances
- elif action == 'geni_restart':
- action_method = aggreate.restart_instances
- else:
- raise UnsupportedOperation(action)
-
- # fault if sliver is not full allocated (operational status is
- # geni_pending_allocation)
- description = self.describe(urns, None, options)
- for sliver in description['geni_slivers']:
- if sliver['geni_operational_status'] == 'geni_pending_allocation':
- raise UnsupportedOperation(
- action, "Sliver must be fully allocated (operational status is not geni_pending_allocation)")
- #
- # Perform Operational Action Here
- #
-
- instances = aggregate.get_instances(urns)
- for instance in instances:
- tenant_name = self.driver.shell.auth_manager.client.tenant_name
- action_method(tenant_name, instance.name, instance.id)
- description = self.describe(urns)
- geni_slivers = self.describe(urns, None, options)['geni_slivers']
- return geni_slivers
-
- def shutdown(self, xrn, options=None):
- if options is None:
- options = {}
- xrn = OSXrn(xrn=xrn, type='slice')
- tenant_name = xrn.get_tenant_name()
- name = xrn.get_slicename()
- self.driver.shell.nova_manager.connect(tenant=tenant_name)
- instances = self.driver.shell.nova_manager.servers.findall(name=name)
- for instance in instances:
- self.driver.shell.nova_manager.servers.shutdown(instance)
- return True
+++ /dev/null
-
-import os
-import socket
-import base64
-import string
-import random
-import time
-from collections import defaultdict
-from nova.exception import ImageNotFound
-from nova.api.ec2.cloud import CloudController
-from sfa.util.faults import SliverDoesNotExist
-from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
-from sfa.rspecs.rspec import RSpec
-from sfa.rspecs.elements.hardware_type import HardwareType
-from sfa.rspecs.elements.node import Node
-from sfa.rspecs.elements.sliver import Sliver
-from sfa.rspecs.elements.login import Login
-from sfa.rspecs.elements.disk_image import DiskImage
-from sfa.rspecs.elements.services import Services
-from sfa.rspecs.elements.interface import Interface
-from sfa.rspecs.elements.fw_rule import FWRule
-from sfa.util.xrn import Xrn
-from sfa.planetlab.plxrn import PlXrn
-from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename
-from sfa.rspecs.version_manager import VersionManager
-from sfa.openstack.security_group import SecurityGroup
-from sfa.client.multiclient import MultiClient
-from sfa.util.sfalogging import logger
-
-
-def pubkeys_to_user_data(pubkeys):
- user_data = "#!/bin/bash\n\n"
- for pubkey in pubkeys:
- pubkey = pubkey.replace('\n', '')
- user_data += "echo %s >> /root/.ssh/authorized_keys" % pubkey
- user_data += "\n"
- user_data += "echo >> /root/.ssh/authorized_keys"
- user_data += "\n"
- return user_data
-
-
-def image_to_rspec_disk_image(image):
- img = DiskImage()
- img['name'] = image['name']
- img['description'] = image['name']
- img['os'] = image['name']
- img['version'] = image['name']
- return img
-
-
-class OSAggregate:
-
- def __init__(self, driver):
- self.driver = driver
-
- def get_availability_zones(self):
- zones = self.driver.shell.nova_manager.dns_domains.domains()
- if not zones:
- zones = ['cloud']
- else:
- zones = [zone.name for zone in zones]
- return zones
-
- def list_resources(self, version=None, options=None):
- if options is None:
- options = {}
- version_manager = VersionManager()
- version = version_manager.get_version(version)
- rspec_version = version_manager._get_version(
- version.type, version.version, 'ad')
- rspec = RSpec(version=version, user_options=options)
- nodes = self.get_aggregate_nodes()
- rspec.version.add_nodes(nodes)
- return rspec.toxml()
-
- def describe(self, urns, version=None, options=None):
- if options is None:
- options = {}
- # update nova connection
- tenant_name = OSXrn(xrn=urns[0], type='slice').get_tenant_name()
- self.driver.shell.nova_manager.connect(tenant=tenant_name)
- instances = self.get_instances(urns)
- # lookup the sliver allocations
- sliver_ids = [sliver['sliver_id'] for sliver in slivers]
- constraint = SliverAllocation.sliver_id.in_(sliver_ids)
- sliver_allocations = self.driver.api.dbsession().query(
- SliverAllocation).filter(constraint)
- sliver_allocation_dict = {}
- for sliver_allocation in sliver_allocations:
- sliver_allocation_dict[
- sliver_allocation.sliver_id] = sliver_allocation
-
- geni_slivers = []
- rspec_nodes = []
- for instance in instances:
- rspec_nodes.append(self.instance_to_rspec_node(instance))
- geni_sliver = self.instance_to_geni_sliver(
- instance, sliver_sllocation_dict)
- geni_slivers.append(geni_sliver)
- version_manager = VersionManager()
- version = version_manager.get_version(version)
- rspec_version = version_manager._get_version(
- version.type, version.version, 'manifest')
- rspec = RSpec(version=rspec_version, user_options=options)
- rspec.xml.set('expires', datetime_to_string(utcparse(time.time())))
- rspec.version.add_nodes(rspec_nodes)
- result = {'geni_urn': Xrn(urns[0]).get_urn(),
- 'geni_rspec': rspec.toxml(),
- 'geni_slivers': geni_slivers}
-
- return result
-
- def get_instances(self, urns):
- # parse slice names and sliver ids
- names = set()
- ids = set()
- for urn in urns:
- xrn = OSXrn(xrn=urn)
- if xrn.type == 'slice':
- names.add(xrn.get_slice_name())
- elif xrn.type == 'sliver':
- ids.add(xrn.leaf)
-
- # look up instances
- instances = []
- filter = {}
- if names:
- filter['name'] = names
- if ids:
- filter['id'] = ids
- servers = self.driver.shell.nova_manager.servers.findall(**filter)
- instances.extend(servers)
-
- return instances
-
- def instance_to_rspec_node(self, instance):
- # determine node urn
- node_xrn = instance.metadata.get('component_id')
- if not node_xrn:
- node_xrn = OSXrn('cloud', type='node')
- else:
- node_xrn = OSXrn(xrn=node_xrn, type='node')
-
- rspec_node = Node()
- rspec_node['component_id'] = node_xrn.urn
- rspec_node['component_name'] = node_xrn.name
- rspec_node['component_manager_id'] = Xrn(
- self.driver.hrn, 'authority+cm').get_urn()
- rspec_node['sliver_id'] = OSXrn(
- name=instance.name, type='slice', id=instance.id).get_urn()
- if instance.metadata.get('client_id'):
- rspec_node['client_id'] = instance.metadata.get('client_id')
-
- # get sliver details
- flavor = self.driver.shell.nova_manager.flavors.find(
- id=instance.flavor['id'])
- sliver = self.instance_to_sliver(flavor)
- # get firewall rules
- fw_rules = []
- group_name = instance.metadata.get('security_groups')
- if group_name:
- group = self.driver.shell.nova_manager.security_groups.find(
- name=group_name)
- for rule in group.rules:
- port_range = "%s:%s" % (rule['from_port'], rule['to_port'])
- fw_rule = FWRule({'protocol': rule['ip_protocol'],
- 'port_range': port_range,
- 'cidr_ip': rule['ip_range']['cidr']})
- fw_rules.append(fw_rule)
- sliver['fw_rules'] = fw_rules
- rspec_node['slivers'] = [sliver]
-
- # get disk image
- image = self.driver.shell.image_manager.get_images(
- id=instance.image['id'])
- if isinstance(image, list) and len(image) > 0:
- image = image[0]
- disk_image = image_to_rspec_disk_image(image)
- sliver['disk_image'] = [disk_image]
-
- # get interfaces
- rspec_node['services'] = []
- rspec_node['interfaces'] = []
- addresses = instance.addresses
- # HACK: public ips are stored in the list of private, but
- # this seems wrong. Assume pub ip is the last in the list of
- # private ips until openstack bug is fixed.
- if addresses.get('private'):
- login = Login({'authentication': 'ssh-keys',
- 'hostname': addresses.get('private')[-1]['addr'],
- 'port': '22', 'username': 'root'})
- service = Services({'login': login})
- rspec_node['services'].append(service)
-
- for private_ip in addresses.get('private', []):
- if_xrn = PlXrn(auth=self.driver.hrn,
- interface='node%s' % (instance.hostId))
- if_client_id = Xrn(if_xrn.urn, type='interface',
- id="eth%s" % if_index).urn
- if_sliver_id = Xrn(
- rspec_node['sliver_id'], type='slice', id="eth%s" % if_index).urn
- interface = Interface({'component_id': if_xrn.urn,
- 'client_id': if_client_id,
- 'sliver_id': if_sliver_id})
- interface['ips'] = [{'address': private_ip['addr'],
- #'netmask': private_ip['network'],
- 'type': private_ip['version']}]
- rspec_node['interfaces'].append(interface)
-
- # slivers always provide the ssh service
- for public_ip in addresses.get('public', []):
- login = Login({'authentication': 'ssh-keys',
- 'hostname': public_ip['addr'],
- 'port': '22', 'username': 'root'})
- service = Services({'login': login})
- rspec_node['services'].append(service)
- return rspec_node
-
- def instance_to_sliver(self, instance, xrn=None):
- if xrn:
- sliver_hrn = '%s.%s' % (self.driver.hrn, instance.id)
- sliver_id = Xrn(sliver_hrn, type='sliver').urn
-
- sliver = Sliver({'sliver_id': sliver_id,
- 'name': instance.name,
- 'type': instance.name,
- 'cpus': str(instance.vcpus),
- 'memory': str(instance.ram),
- 'storage': str(instance.disk)})
- return sliver
-
- def instance_to_geni_sliver(self, instance, sliver_allocations=None):
- if sliver_allocations is None:
- sliver_allocations = {}
- sliver_hrn = '%s.%s' % (self.driver.hrn, instance.id)
- sliver_id = Xrn(sliver_hrn, type='sliver').urn
-
- # set sliver allocation and operational status
- sliver_allocation = sliver_allocations[sliver_id]
- if sliver_allocation:
- allocation_status = sliver_allocation.allocation_state
- if allocation_status == 'geni_allocated':
- op_status = 'geni_pending_allocation'
- elif allocation_status == 'geni_provisioned':
- state = instance.state.lower()
- if state == 'active':
- op_status = 'geni_ready'
- elif state == 'building':
- op_status = 'geni_notready'
- elif state == 'failed':
- op_status = ' geni_failed'
- else:
- op_status = 'geni_unknown'
- else:
- allocation_status = 'geni_unallocated'
- # required fields
- geni_sliver = {'geni_sliver_urn': sliver_id,
- 'geni_expires': None,
- 'geni_allocation_status': allocation_status,
- 'geni_operational_status': op_status,
- 'geni_error': None,
- 'plos_created_at': datetime_to_string(utcparse(instance.created)),
- 'plos_sliver_type': self.shell.nova_manager.flavors.find(id=instance.flavor['id']).name,
- }
-
- return geni_sliver
-
- def get_aggregate_nodes(self):
- zones = self.get_availability_zones()
- # available sliver/instance/vm types
- instances = self.driver.shell.nova_manager.flavors.list()
- if isinstance(instances, dict):
- instances = instances.values()
- # available images
- images = self.driver.shell.image_manager.get_images_detailed()
- disk_images = [image_to_rspec_disk_image(img) for img in images if img[
- 'container_format'] in ['ami', 'ovf']]
- rspec_nodes = []
- for zone in zones:
- rspec_node = Node()
- xrn = OSXrn(zone, type='node')
- rspec_node['component_id'] = xrn.urn
- rspec_node['component_name'] = xrn.name
- rspec_node['component_manager_id'] = Xrn(
- self.driver.hrn, 'authority+cm').get_urn()
- rspec_node['exclusive'] = 'false'
- rspec_node['hardware_types'] = [HardwareType({'name': 'plos-pc'}),
- HardwareType({'name': 'pc'})]
- slivers = []
- for instance in instances:
- sliver = self.instance_to_sliver(instance)
- sliver['disk_image'] = disk_images
- slivers.append(sliver)
- rspec_node['available'] = 'true'
- rspec_node['slivers'] = slivers
- rspec_nodes.append(rspec_node)
-
- return rspec_nodes
-
- def create_tenant(self, tenant_name):
- tenants = self.driver.shell.auth_manager.tenants.findall(
- name=tenant_name)
- if not tenants:
- self.driver.shell.auth_manager.tenants.create(
- tenant_name, tenant_name)
- tenant = self.driver.shell.auth_manager.tenants.find(
- name=tenant_name)
- else:
- tenant = tenants[0]
- return tenant
-
- def create_instance_key(self, slice_hrn, user):
- slice_name = Xrn(slice_hrn).leaf
- user_name = Xrn(user['urn']).leaf
- key_name = "%s_%s" % (slice_name, user_name)
- pubkey = user['keys'][0]
- key_found = False
- existing_keys = self.driver.shell.nova_manager.keypairs.findall(
- name=key_name)
- for existing_key in existing_keys:
- if existing_key.public_key != pubkey:
- self.driver.shell.nova_manager.keypairs.delete(existing_key)
- elif existing_key.public_key == pubkey:
- key_found = True
-
- if not key_found:
- self.driver.shell.nova_manager.keypairs.create(key_name, pubkey)
- return key_name
-
- def create_security_group(self, slicename, fw_rules=None):
- if fw_rules is None:
- fw_rules = []
- # use default group by default
- group_name = 'default'
- if isinstance(fw_rules, list) and fw_rules:
- # Each sliver get's its own security group.
- # Keep security group names unique by appending some random
- # characters on end.
- random_name = "".join([random.choice(string.letters + string.digits)
- for i in xrange(6)])
- group_name = slicename + random_name
- security_group = SecurityGroup(self.driver)
- security_group.create_security_group(group_name)
- for rule in fw_rules:
- security_group.add_rule_to_group(group_name,
- protocol=rule.get('protocol'),
- cidr_ip=rule.get('cidr_ip'),
- port_range=rule.get(
- 'port_range'),
- icmp_type_code=rule.get('icmp_type_code'))
- # Open ICMP by default
- security_group.add_rule_to_group(group_name,
- protocol="icmp",
- cidr_ip="0.0.0.0/0",
- icmp_type_code="-1:-1")
- return group_name
-
- def add_rule_to_security_group(self, group_name, **kwds):
- security_group = SecurityGroup(self.driver)
- security_group.add_rule_to_group(group_name=group_name,
- protocol=kwds.get('protocol'),
- cidr_ip=kwds.get('cidr_ip'),
- icmp_type_code=kwds.get('icmp_type_code'))
-
- def run_instances(self, instance_name, tenant_name, rspec, key_name, pubkeys):
- # logger.debug('Reserving an instance: image: %s, flavor: ' \
- # '%s, key: %s, name: %s' % \
- # (image_id, flavor_id, key_name, slicename))
-
- # make sure a tenant exists for this slice
- tenant = self.create_tenant(tenant_name)
-
- # add the sfa admin user to this tenant and update our nova client connection
- # to use these credentials for the rest of this session. This emsures that the instances
- # we create will be assigned to the correct tenant.
- sfa_admin_user = self.driver.shell.auth_manager.users.find(
- name=self.driver.shell.auth_manager.opts['OS_USERNAME'])
- user_role = self.driver.shell.auth_manager.roles.find(name='user')
- admin_role = self.driver.shell.auth_manager.roles.find(name='admin')
- self.driver.shell.auth_manager.roles.add_user_role(
- sfa_admin_user, admin_role, tenant)
- self.driver.shell.auth_manager.roles.add_user_role(
- sfa_admin_user, user_role, tenant)
- self.driver.shell.nova_manager.connect(tenant=tenant.name)
-
- authorized_keys = "\n".join(pubkeys)
- files = {'/root/.ssh/authorized_keys': authorized_keys}
- rspec = RSpec(rspec)
- requested_instances = defaultdict(list)
-
- # iterate over clouds/zones/nodes
- slivers = []
- for node in rspec.version.get_nodes_with_slivers():
- instances = node.get('slivers', [])
- if not instances:
- continue
- for instance in instances:
- try:
- metadata = {}
- flavor_id = self.driver.shell.nova_manager.flavors.find(name=instance[
- 'name'])
- image = instance.get('disk_image')
- if image and isinstance(image, list):
- image = image[0]
- else:
- raise InvalidRSpec(
- "Must specify a disk_image for each VM")
- image_id = self.driver.shell.nova_manager.images.find(name=image[
- 'name'])
- fw_rules = instance.get('fw_rules', [])
- group_name = self.create_security_group(
- instance_name, fw_rules)
- metadata['security_groups'] = group_name
- if node.get('component_id'):
- metadata['component_id'] = node['component_id']
- if node.get('client_id'):
- metadata['client_id'] = node['client_id']
- server = self.driver.shell.nova_manager.servers.create(
- flavor=flavor_id,
- image=image_id,
- key_name=key_name,
- security_groups=[group_name],
- files=files,
- meta=metadata,
- name=instance_name)
- slivers.append(server)
- except Exception as err:
- logger.log_exc(err)
-
- return slivers
-
- def delete_instance(self, instance):
-
- def _delete_security_group(inst):
- security_group = inst.metadata.get('security_groups', '')
- if security_group:
- manager = SecurityGroup(self.driver)
- timeout = 10.0 # wait a maximum of 10 seconds before forcing the security group delete
- start_time = time.time()
- instance_deleted = False
- while instance_deleted == False and (time.time() - start_time) < timeout:
- tmp_inst = self.driver.shell.nova_manager.servers.findall(
- id=inst.id)
- if not tmp_inst:
- instance_deleted = True
- time.sleep(.5)
- manager.delete_security_group(security_group)
-
- multiclient = MultiClient()
- tenant = self.driver.shell.auth_manager.tenants.find(
- id=instance.tenant_id)
- self.driver.shell.nova_manager.connect(tenant=tenant.name)
- args = {'name': instance.name,
- 'id': instance.id}
- instances = self.driver.shell.nova_manager.servers.findall(**args)
- security_group_manager = SecurityGroup(self.driver)
- for instance in instances:
- # destroy instance
- self.driver.shell.nova_manager.servers.delete(instance)
- # deleate this instance's security groups
- multiclient.run(_delete_security_group, instance)
- return 1
-
- def stop_instances(self, instance_name, tenant_name, id=None):
- self.driver.shell.nova_manager.connect(tenant=tenant_name)
- args = {'name': instance_name}
- if id:
- args['id'] = id
- instances = self.driver.shell.nova_manager.servers.findall(**args)
- for instance in instances:
- self.driver.shell.nova_manager.servers.pause(instance)
- return 1
-
- def start_instances(self, instance_name, tenant_name, id=None):
- self.driver.shell.nova_manager.connect(tenant=tenant_name)
- args = {'name': instance_name}
- if id:
- args['id'] = id
- instances = self.driver.shell.nova_manager.servers.findall(**args)
- for instance in instances:
- self.driver.shell.nova_manager.servers.resume(instance)
- return 1
-
- def restart_instances(self, instacne_name, tenant_name, id=None):
- self.stop_instances(instance_name, tenant_name, id)
- self.start_instances(instance_name, tenant_name, id)
- return 1
-
- def update_instances(self, project_name):
- pass
+++ /dev/null
-import re
-from sfa.util.xrn import Xrn
-from sfa.util.config import Config
-
-
-def hrn_to_os_slicename(hrn):
- return OSXrn(xrn=hrn, type='slice').get_slicename()
-
-
-def hrn_to_os_tenant_name(hrn):
- return OSXrn(xrn=hrn, type='slice').get_tenant_name()
-
-
-def cleanup_name(name):
- return name.replace(".", "_").replace("+", "_")
-
-
-class OSXrn(Xrn):
-
- def __init__(self, name=None, auth=None, **kwds):
-
- config = Config()
- self.id = id
- if name is not None:
- Xrn.__init__(self, **kwds)
- if 'type' in kwds:
- self.type = kwds['type']
- if auth is not None:
- self.hrn = '.'.join([auth, cleanup_name(name)])
- else:
- self.hrn = name.replace('_', '.')
- self.hrn_to_urn()
- else:
- Xrn.__init__(self, **kwds)
-
- self.name = self.get_name()
-
- def get_name(self):
- self._normalize()
- leaf = self.leaf
- sliver_id_parts = leaf.split(':')
- name = sliver_id_parts[0]
- name = re.sub('[^a-zA-Z0-9_]', '', name)
- return name
-
- def get_slicename(self):
- self._normalize()
- slicename = self.hrn
- slicename = slicename.split(':')[0]
- slicename = re.sub('[\.]', '_', slicename)
- return slicename
-
- def get_tenant_name(self):
- self._normalize()
- tenant_name = self.hrn.replace('\.', '')
- return tenant_name
+++ /dev/null
-from sfa.util.sfalogging import logger
-
-
-class SecurityGroup:
-
- def __init__(self, driver):
- self.client = driver.shell.nova_manager
-
- def create_security_group(self, name):
- try:
- self.client.security_groups.create(name=name, description=name)
- except Exception as ex:
- logger.log_exc("Failed to add security group")
- raise
-
- def delete_security_group(self, name):
- try:
- security_group = self.client.security_groups.find(name=name)
- self.client.security_groups.delete(security_group.id)
- except Exception as ex:
- logger.log_exc("Failed to delete security group")
-
- def _validate_port_range(self, port_range):
- from_port = to_port = None
- if isinstance(port_range, str):
- ports = port_range.split(':')
- if len(ports) > 1:
- from_port = int(ports[0])
- to_port = int(ports[1])
- else:
- from_port = to_port = int(ports[0])
- return (from_port, to_port)
-
- def _validate_icmp_type_code(self, icmp_type_code):
- from_port = to_port = None
- if isinstance(icmp_type_code, str):
- code_parts = icmp_type_code.split(':')
- if len(code_parts) > 1:
- try:
- from_port = int(code_parts[0])
- to_port = int(code_parts[1])
- except ValueError:
- logger.error('port must be an integer.')
- return (from_port, to_port)
-
- def add_rule_to_group(self, group_name=None, protocol='tcp', cidr_ip='0.0.0.0/0',
- port_range=None, icmp_type_code=None,
- source_group_name=None, source_group_owner_id=None):
-
- try:
- from_port, to_port = self._validate_port_range(port_range)
- icmp_type = self._validate_icmp_type_code(icmp_type_code)
- if icmp_type and icmp_type[0] and icmp_type[1]:
- from_port, to_port = icmp_type[0], icmp_type[1]
-
- group = self.client.security_groups.find(name=group_name)
- self.client.security_group_rules.create(group.id,
- protocol, from_port, to_port, cidr_ip)
- except Exception as ex:
- logger.log_exc("Failed to add rule to group %s" % group_name)
-
- def remove_rule_from_group(self, group_name=None, protocol='tcp', cidr_ip='0.0.0.0/0',
- port_range=None, icmp_type_code=None,
- source_group_name=None, source_group_owner_id=None):
- try:
- from_port, to_port = self._validate_port_range(port_range)
- icmp_type = self._validate_icmp_type_code(icmp_type_code)
- if icmp_type:
- from_port, to_port = icmp_type[0], icmp_type[1]
- group = self.client.security_groups.find(name=group_name)
- filter = {
- 'id': group.id,
- 'from_port': from_port,
- 'to_port': to_port,
- 'cidr_ip': ip,
- 'ip_protocol': protocol,
- }
- rule = self.client.security_group_rules.find(**filter)
- if rule:
- self.client.security_group_rules.delete(rule)
- except Exception as ex:
- logger.log_exc("Failed to remove rule from group %s" % group_name)
+++ /dev/null
-import sys
-import socket
-import gettext
-from urlparse import urlparse
-from sfa.util.sfalogging import logger
-from sfa.util.config import Config
-
-try:
- from sfa.openstack.client import GlanceClient, NovaClient, KeystoneClient
- has_nova = True
-except:
- has_nova = False
-
-
-class Shell:
- """
- A simple native shell to a nova backend.
- This class can receive all nova calls to the underlying testbed
- """
-
- # dont care about limiting calls yet
- direct_calls = []
- alias_calls = {}
-
- # use the 'capability' auth mechanism for higher performance when the PLC
- # db is local
- def __init__(self, config=None):
- if not config:
- config = Config()
- if has_nova:
- # instantiate managers
- self.auth_manager = KeystoneClient(config=config)
- self.image_manager = GlanceClient(config=config)
- self.nova_manager = NovaClient(config=config)
- else:
- logger.debug('nova access - REST')
- raise SfaNotImplemented('nova access - Rest')