<value>8773</value>
<description>The Nova/EC2 API port.</description>
</variable>
+ <variable id="novarc" type="string">
+ <name>novarc</name>
+ <value>/root/novarc</value>
+ <description>Path to novarc client config file</description>
+ </variable>
</variablelist>
</category>
help="type filter ([all]|user|slice|authority|node|aggregate)",
choices=("all", "user", "slice", "authority", "node", "aggregate"),
default="all")
+ if command in ("show"):
+ parser.add_option("-k","--key",dest="keys",action="append",default=[],
+ help="specify specific keys to be displayed from record")
if command in ("resources"):
# rspec version
parser.add_option("-r", "--rspec-version", dest="rspec_version", default="SFA 1",
def print_help (self):
+ print "==================== Generic sfi usage"
self.sfi_parser.print_help()
+ print "==================== Specific command usage"
self.command_parser.print_help()
#
self.dispatch(command, command_options, command_args)
except KeyError:
self.logger.critical ("Unknown command %s"%command)
- raise
sys.exit(1)
-
+
return
####################
record_dicts = filter_records(options.type, record_dicts)
if not record_dicts:
self.logger.error("No record of type %s"% options.type)
+ return
+ # user has required to focus on some keys
+ if options.keys:
+ def project (record):
+ projected={}
+ for key in options.keys:
+ try: projected[key]=record[key]
+ except: pass
+ return projected
+ record_dicts = [ project (record) for record in record_dicts ]
records = [ Record(dict=record_dict) for record_dict in record_dicts ]
for record in records:
if (options.format == "text"): record.dump(sort=True)
renew slice (RenewSliver)
"""
server = self.sliceapi()
+ if len(args) != 2:
+ self.print_help()
+ sys.exit(1)
+ [ slice_hrn, input_time ] = args
# slice urn
- slice_hrn = args[0]
slice_urn = hrn_to_urn(slice_hrn, 'slice')
+ # time: don't try to be smart on the time format, server-side will
# creds
slice_cred = self.slice_credential_string(args[0])
creds = [slice_cred]
if options.delegate:
delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
creds.append(delegated_cred)
- # time
- time = args[1]
# options and call_id when supported
api_options = {}
api_options['call_id']=unique_call_id()
- result = server.RenewSliver(slice_urn, creds, time, *self.ois(server,api_options))
+ result = server.RenewSliver(slice_urn, creds, input_time, *self.ois(server,api_options))
value = ReturnValue.get_value(result)
if self.options.raw:
save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
import sfa.federica.fddriver
+# the federica flavour behaves like pl, except for
+# the driver
+
class fd (pl):
-# the max flavour behaves like pl, except for
-# the aggregate
def driver_class (self) :
return sfa.federica.fddriver.FdDriver
from sfa.util.config import Config
from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
-
from sfa.trust.gid import create_uuid
from sfa.trust.certificate import convert_public_key, Keypair
from sfa.storage.alchemy import dbsession
from sfa.storage.model import RegRecord, RegAuthority, RegUser, RegSlice, RegNode
-
-from sfa.planetlab.plxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn
-from sfa.openstack.nova_shell import NovaShell
+from sfa.openstack.osxrn import OSXrn
+from sfa.openstack.shell import Shell
def load_keys(filename):
keys = {}
def __init__ (self, auth_hierarchy, logger):
self.auth_hierarchy = auth_hierarchy
self.logger=logger
+ self.config = Config ()
+ self.interface_hrn = self.config.SFA_INTERFACE_HRN
+ self.root_auth = self.config.SFA_REGISTRY_ROOT_AUTH
+ self.shell = Shell (self.config)
def add_options (self, parser):
self.logger.debug ("OpenstackImporter: no options yet")
pass
- def run (self, options):
- # we don't have any options for now
- self.logger.info ("OpenstackImporter.run : to do")
-
- config = Config ()
- interface_hrn = config.SFA_INTERFACE_HRN
- root_auth = config.SFA_REGISTRY_ROOT_AUTH
- shell = NovaShell (config)
-
- # create dict of all existing sfa records
- existing_records = {}
- existing_hrns = []
- key_ids = []
- for record in dbsession.query(RegRecord):
- existing_records[ (record.hrn, record.type,) ] = record
- existing_hrns.append(record.hrn)
-
+ def import_users(self, existing_hrns, existing_records):
# Get all users
- persons = shell.auth_manager.get_users()
- persons_dict = {}
- keys_filename = config.config_path + os.sep + 'person_keys.py'
- old_person_keys = load_keys(keys_filename)
- person_keys = {}
- for person in persons:
- hrn = config.SFA_INTERFACE_HRN + "." + person.id
- persons_dict[hrn] = person
- old_keys = old_person_keys.get(person.id, [])
- keys = [k.public_key for k in shell.db.key_pair_get_all_by_user(person.id)]
- person_keys[person.id] = keys
+ users = self.shell.auth_manager.users.list()
+ users_dict = {}
+ keys_filename = self.config.config_path + os.sep + 'person_keys.py'
+ old_user_keys = load_keys(keys_filename)
+ user_keys = {}
+ for user in users:
+ auth_hrn = self.config.SFA_INTERFACE_HRN
+ if user.tenantId is not None:
+ tenant = self.shell.auth_manager.tenants.find(id=user.tenantId)
+ auth_hrn = OSXrn(name=tenant.name, auth=self.config.SFA_INTERFACE_HRN, type='authority').get_hrn()
+ hrn = OSXrn(name=user.name, auth=auth_hrn, type='user').get_hrn()
+ users_dict[hrn] = user
+ old_keys = old_user_keys.get(hrn, [])
+ keys = [k.public_key for k in self.shell.nova_manager.keypairs.findall(name=hrn)]
+ user_keys[hrn] = keys
update_record = False
if old_keys != keys:
update_record = True
if hrn not in existing_hrns or \
- (hrn, 'user') not in existing_records or update_record:
- urn = hrn_to_urn(hrn, 'user')
-
+ (hrn, 'user') not in existing_records or update_record:
+ urn = OSXrn(xrn=hrn, type='user').get_urn()
+
if keys:
try:
pkey = convert_public_key(keys[0])
pkey = Keypair(create=True)
else:
self.logger.warn("OpenstackImporter: person %s does not have a PL public key"%hrn)
- pkey = Keypair(create=True)
- person_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
- person_record = RegUser ()
- person_record.type='user'
- person_record.hrn=hrn
- person_record.gid=person_gid
- person_record.authority=get_authority(hrn)
- dbsession.add(person_record)
+ pkey = Keypair(create=True)
+ user_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ user_record = RegUser ()
+ user_record.type='user'
+ user_record.hrn=hrn
+ user_record.gid=user_gid
+ user_record.authority=get_authority(hrn)
+ dbsession.add(user_record)
dbsession.commit()
- self.logger.info("OpenstackImporter: imported person %s" % person_record)
-
- # Get all projects
- projects = shell.auth_manager.get_projects()
- projects_dict = {}
- for project in projects:
- hrn = config.SFA_INTERFACE_HRN + '.' + project.id
- projects_dict[hrn] = project
- if hrn not in existing_hrns or \
- (hrn, 'slice') not in existing_records:
+ self.logger.info("OpenstackImporter: imported person %s" % user_record)
+
+ return users_dict, user_keys
+
+ def import_tenants(self, existing_hrns, existing_records):
+ # Get all tenants
+ # A tenant can represent an organizational group (site) or a
+ # slice. If a tenant's authorty/parent matches the root authority it is
+ # considered a group/site. All other tenants are considered slices.
+ tenants = self.shell.auth_manager.tenants.list()
+ tenants_dict = {}
+ for tenant in tenants:
+ hrn = self.config.SFA_INTERFACE_HRN + '.' + tenant.name
+ tenants_dict[hrn] = tenant
+ authority_hrn = OSXrn(xrn=hrn, type='authority').get_authority_hrn()
+
+ if hrn in existing_hrns:
+ continue
+
+ if authority_hrn == self.config.SFA_INTERFACE_HRN:
+ # import group/site
+ record = RegAuthority()
+ urn = OSXrn(xrn=hrn, type='authority').get_urn()
+ if not self.auth_hierarchy.auth_exists(urn):
+ self.auth_hierarchy.create_auth(urn)
+ auth_info = self.auth_hierarchy.get_auth_info(urn)
+ gid = auth_info.get_gid_object()
+ record.type='authority'
+ record.hrn=hrn
+ record.gid=gid
+ record.authority=get_authority(hrn)
+ dbsession.add(record)
+ dbsession.commit()
+ self.logger.info("OpenstackImporter: imported authority: %s" % record)
+
+ else:
+ record = RegSlice ()
+ urn = OSXrn(xrn=hrn, type='slice').get_urn()
pkey = Keypair(create=True)
- urn = hrn_to_urn(hrn, 'slice')
- project_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
- project_record = RegSlice ()
- project_record.type='slice'
- project_record.hrn=hrn
- project_record.gid=project_gid
- project_record.authority=get_authority(hrn)
- dbsession.add(project_record)
+ gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ record.type='slice'
+ record.hrn=hrn
+ record.gid=gid
+ record.authority=get_authority(hrn)
+ dbsession.add(record)
dbsession.commit()
- self.logger.info("OpenstackImporter: imported slice: %s" % project_record)
-
+ self.logger.info("OpenstackImporter: imported slice: %s" % record)
+
+ return tenants_dict
+
+ def run (self, options):
+ # we don't have any options for now
+ self.logger.info ("OpenstackImporter.run : to do")
+
+ # create dict of all existing sfa records
+ existing_records = {}
+ existing_hrns = []
+ key_ids = []
+ for record in dbsession.query(RegRecord):
+ existing_records[ (record.hrn, record.type,) ] = record
+ existing_hrns.append(record.hrn)
+
+
+ tenants_dict = self.import_tenants(existing_hrns, existing_records)
+ users_dict, user_keys = self.import_users(existing_hrns, existing_records)
+
# remove stale records
- system_records = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
+ system_records = [self.interface_hrn, self.root_auth, self.interface_hrn + '.slicemanager']
for (record_hrn, type) in existing_records.keys():
if record_hrn in system_records:
continue
continue
if type == 'user':
- if record_hrn in persons_dict:
+ if record_hrn in users_dict:
continue
- elif type == 'slice':
- if record_hrn in projects_dict:
+ elif type in['slice', 'authority']:
+ if record_hrn in tenants_dict:
continue
else:
continue
# save pub keys
self.logger.info('OpenstackImporter: saving current pub keys')
- save_keys(keys_filename, person_keys)
+ keys_filename = self.config.config_path + os.sep + 'person_keys.py'
+ save_keys(keys_filename, user_keys)
auth_hrn = hrn
auth_info = api.auth.get_auth_info(auth_hrn)
# get record info
- record=dbsession.query(RegRecord).filter_by(hrn=hrn).first()
+ filter = {'hrn': hrn}
if type:
- record = record.filter_by(type=type)
+ filter['type'] = type
+ record=dbsession.query(RegRecord).filter_by(**filter).first()
if not record:
raise RecordNotFound("hrn=%s, type=%s"%(hrn,type))
caller_gid = record.get_gid_object()
else:
caller_hrn, caller_type = urn_to_hrn(caller_xrn)
- caller_record = dbsession.query(RegRecord).filter_by(hrn=caller_hrn).first()
+ caller_filter = {'hrn': caller_hrn}
if caller_type:
- caller_record = caller_record.filter_by(type=caller_type)
+ caller_filter['type'] = caller_type
+ caller_record = dbsession.query(RegRecord).filter_by(**caller_filter).first()
if not caller_record:
raise RecordNotFound("Unable to associated caller (hrn=%s, type=%s) with credential for (hrn: %s, type: %s)"%(caller_hrn, caller_type, hrn, type))
caller_gid = GID(string=caller_record.gid)
object_hrn = record.get_gid_object().get_hrn()
- rights = api.auth.determine_user_rights(caller_hrn, record)
+ rights = api.auth.determine_user_rights(caller_hrn, record.todict())
# make sure caller has rights to this object
if rights.is_empty():
raise PermissionError(caller_hrn + " has no rights to " + record.hrn)
try:
result=server.RenewSliver(xrn, creds, expiration_time, options)
if type(result)!=dict:
- result = {"code": {"geni_code": 0}, value: result}
- result["aggregate"] = aggregate
+ result = {'code': {'geni_code': 0}, 'value': result}
+ result['aggregate'] = aggregate
return result
except:
logger.log_exc('Something wrong in _RenewSliver with URL %s'%server.url)
- return {"aggregate": aggregate, "exc_info": traceback.format_exc(), "code": {"geni_code": -1}, "value": False, "output": ""}
+ return {'aggregate': aggregate, 'exc_info': traceback.format_exc(),
+ 'code': {'geni_code': -1},
+ 'value': False, 'output': ""}
(hrn, urn_type) = urn_to_hrn(xrn)
# get the callers hrn
results = threads.get_results()
geni_code = 0
- geni_output = ",".join([x.get("output","") for x in results])
- geni_value = reduce (lambda x,y: x and y, [result.get("value",False) for result in results], True)
+ geni_output = ",".join([x.get('output',"") for x in results])
+ geni_value = reduce (lambda x,y: x and y, [result.get('value',False) for result in results], True)
for agg_result in results:
- agg_geni_code = agg_result["code"].get("geni_code",0)
+ agg_geni_code = agg_result['code'].get('geni_code',0)
if agg_geni_code:
geni_code = agg_geni_code
- results = {"aggregates": results, "code": {"geni_code": geni_code}, "value": geni_value, "output": geni_output}
+ results = {'aggregates': results, 'code': {'geni_code': geni_code}, 'value': geni_value, 'output': geni_output}
return results
# mmh, it is expected that all results carry the same urn
overall['geni_urn'] = results[0]['geni_urn']
- overall['pl_login'] = results[0]['pl_login']
+ overall['pl_login'] = None
+ for result in results:
+ if result.get('pl_login'):
+ overall['pl_login'] = result['pl_login']
+ break
+ elif isinstance(result.get('value'), dict) and result['value'].get('pl_login'):
+ overall['pl_login'] = result['value']['pl_login']
+ break
# append all geni_resources
overall['geni_resources'] = \
reduce (lambda x,y: x+y, [ result['geni_resources'] for result in results] , [])
raise InsufficientRights('Renewsliver: Credential expires before requested expiration time')
if requested_time > datetime.datetime.utcnow() + datetime.timedelta(days=max_renew_days):
raise Exception('Cannot renew > %s days from now' % max_renew_days)
- return self.api.manager.RenewSliver(self.api, slice_xrn, valid_creds, expiration_time, options)
+ return self.api.manager.RenewSliver(self.api, slice_xrn, valid_creds, expiration_time, options)
--- /dev/null
+from sfa.util.sfalogging import logger
+from keystoneclient.v2_0 import client as keystone_client
+from glance import client as glance_client
+from novaclient.v1_1 import client as nova_client
+from sfa.util.config import Config
+
+
+def parse_novarc(filename):
+ opts = {}
+ f = open(filename, 'r')
+ for line in f:
+ try:
+ line = line.replace('export', '').strip()
+ parts = line.split('=')
+ if len(parts) > 1:
+ value = parts[1].replace("\'", "")
+ value = value.replace('\"', '')
+ opts[parts[0]] = value
+ except:
+ pass
+ f.close()
+ return opts
+
+
+class KeystoneClient:
+ def __init__(self, config=None):
+ if not config:
+ config = Config()
+ opts = parse_novarc(config.SFA_NOVA_NOVARC)
+ self.client = keystone_client.Client(username=opts.get('OS_USERNAME'),
+ password=opts.get('OS_PASSWORD'),
+ tenant_name=opts.get('OS_TENANT_NAME'),
+ auth_url=opts.get('OS_AUTH_URL'))
+
+ def __getattr__(self, name):
+ return getattr(self.client, name)
+
+
+class GlanceClient:
+ def __init__(self, config=None):
+ if not config:
+ config = Config()
+ opts = parse_novarc(config.SFA_NOVA_NOVARC)
+ self.client = glance_client.get_client(host='0.0.0.0',
+ username=opts.get('OS_USERNAME'),
+ password=opts.get('OS_PASSWORD'),
+ tenant=opts.get('OS_TENANT_NAME'),
+ auth_url=opts.get('OS_AUTH_URL'))
+ def __getattr__(self, name):
+ return getattr(self.client, name)
+
+class NovaClient:
+ def __init__(self, config=None):
+ if not config:
+ config = Config()
+ opts = parse_novarc(config.SFA_NOVA_NOVARC)
+ self.client = nova_client.Client(username=opts.get('OS_USERNAME'),
+ api_key=opts.get('OS_PASSWORD'),
+ project_id=opts.get('OS_TENANT_NAME'),
+ auth_url=opts.get('OS_AUTH_URL'),
+ region_name='',
+ extensions=[],
+ service_type='compute',
+ service_name='',
+ )
+
+ def __getattr__(self, name):
+ return getattr(self.client, name)
def get_available_disk_images(self):
# get image records
disk_images = []
- for img in self.driver.shell.image_manager.detail():
+ for img in self.driver.shell.image_manager.get_images_detailed():
image = Image(img)
if image.container_format in ['ami', 'ovf']:
disk_images.append(image)
disk_image = None
try:
if id:
- image = self.driver.shell.image_manager.show(id)
+ image = self.driver.shell.nova_manager.images.find(id=id)
elif name:
- image = self.driver.shell.image_manager.show_by_name(name)
- if image['container_format'] in ['ami', 'ovf']:
- disk_image = Image(image)
+ image = self.driver.shell.nova_manager.images.find(name=name)
except ImageNotFound:
pass
- return disk_image
+ return Image(image)
from sfa.util.defaultdict import defaultdict
from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf, urn_to_sliver_id
-from sfa.util.plxrn import PlXrn, hrn_to_pl_slicename
+from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename, hrn_to_os_tenant_name
from sfa.util.cache import Cache
from sfa.trust.credential import Credential
# used to be used in get_ticket
# the driver interface, mostly provides default behaviours
from sfa.managers.driver import Driver
-from sfa.openstack.nova_shell import NovaShell
-from sfa.openstack.euca_shell import EucaShell
+from sfa.openstack.shell import Shell
from sfa.openstack.osaggregate import OSAggregate
from sfa.planetlab.plslices import PlSlices
-from sfa.util.osxrn import OSXrn
-
def list_to_dict(recs, key):
"""
# can be sent as-is; it takes care of authentication
# from the global config
#
-class NovaDriver (Driver):
+class NovaDriver(Driver):
# the cache instance is a class member so it survives across incoming requests
cache = None
def __init__ (self, config):
- Driver.__init__ (self, config)
- self.shell = NovaShell (config)
- self.euca_shell = EucaShell(config)
+ Driver.__init__(self, config)
+ self.shell = Shell(config)
self.cache=None
if config.SFA_AGGREGATE_CACHING:
if NovaDriver.cache is None:
##########
def register (self, sfa_record, hrn, pub_key):
- type = sfa_record['type']
- #pl_record = self.sfa_fields_to_pl_fields(type dd , hrn, sfa_record)
-
- if type == 'slice':
- # add slice description, name, researchers, PI
- name = hrn_to_pl_slicename(hrn)
- researchers = sfa_record.get('researchers', [])
- pis = sfa_record.get('pis', [])
- project_manager = None
- description = sfa_record.get('description', None)
- if pis:
- project_manager = Xrn(pis[0], 'user').get_leaf()
- elif researchers:
- project_manager = Xrn(researchers[0], 'user').get_leaf()
- if not project_manager:
- err_string = "Cannot create a project without a project manager. " + \
- "Please specify at least one PI or researcher for project: " + \
- name
- raise SfaInvalidArgument(err_string)
-
- users = [Xrn(user, 'user').get_leaf() for user in \
- pis + researchers]
- self.shell.auth_manager.create_project(name, project_manager, description, users)
-
- elif type == 'user':
- # add person roles, projects and keys
- name = Xrn(hrn).get_leaf()
- self.shell.auth_manager.create_user(name)
- projects = sfa_records.get('slices', [])
- for project in projects:
- project_name = Xrn(project).get_leaf()
- self.shell.auth_manager.add_to_project(name, project_name)
- keys = sfa_records.get('keys', [])
- for key in keys:
- key_dict = {
- 'user_id': name,
- 'name': name,
- 'public': key,
- }
- self.shell.db.key_pair_create(key_dict)
-
- return name
+ if sfa_record['type'] == 'slice':
+ record = self.register_slice(sfa_record, hrn)
+ elif sfa_record['type'] == 'user':
+ record = self.register_user(sfa_record, hrn, pub_key)
+ elif sfa_record['type'].startswith('authority'):
+ record = self.register_authority(sfa_record, hrn)
+ # We should be returning the records id as a pointer but
+ # this is a string and the records table expects this to be an
+ # int.
+ #return record.id
+ return -1
+
+ def register_slice(self, sfa_record, hrn):
+ # add slice description, name, researchers, PI
+ name = hrn_to_os_tenant_name(hrn)
+ description = sfa_record.get('description', None)
+ self.shell.auth_manager.tenants.create(name, description)
+ tenant = self.shell.auth_manager.tenants.find(name=name)
+ auth_hrn = OSXrn(xrn=hrn, type='slice').get_authority_hrn()
+ parent_tenant_name = OSXrn(xrn=auth_hrn, type='slice').get_tenant_name()
+ parent_tenant = self.shell.auth_manager.tenants.find(name=parent_tenant_name)
+ researchers = sfa_record.get('researchers', [])
+ for researcher in researchers:
+ name = Xrn(researcher).get_leaf()
+ user = self.shell.auth_manager.users.find(name=name)
+ self.shell.auth_manager.roles.add_user_role(user, 'user', tenant)
+
+ pis = sfa_record.get('pis', [])
+ for pi in pis:
+ name = Xrn(pi).get_leaf()
+ user = self.shell.auth_manager.users.find(name=name)
+ self.shell.auth_manager.roles.add_user_role(user, 'pi', tenant)
+ self.shell.auth_manager.roles.add_user_role(user, 'pi', parent_tenant)
+
+ return tenant
+
+ def register_user(self, sfa_record, hrn, pub_key):
+ # add person roles, projects and keys
+ email = sfa_record.get('email', None)
+ xrn = Xrn(hrn)
+ name = xrn.get_leaf()
+ auth_hrn = xrn.get_authority_hrn()
+ tenant_name = OSXrn(xrn=auth_hrn, type='authority').get_tenant_name()
+ tenant = self.shell.auth_manager.tenants.find(name=tenant_name)
+ self.shell.auth_manager.users.create(name, email=email, tenant_id=tenant.id)
+ user = self.shell.auth_manager.users.find(name=name)
+ slices = sfa_records.get('slices', [])
+ for slice in projects:
+ slice_tenant_name = OSXrn(xrn=slice, type='slice').get_tenant_name()
+ slice_tenant = self.shell.auth_manager.tenants.find(name=slice_tenant_name)
+ self.shell.auth_manager.roles.add_user_role(user, slice_tenant, 'user')
+ keys = sfa_records.get('keys', [])
+ for key in keys:
+ self.shell.nova_client.keypairs.create(name, key)
+ return user
+
+ def register_authority(self, sfa_record, hrn):
+ name = OSXrn(xrn=hrn, type='authority').get_tenant_name()
+ self.shell.auth_manager.tenants.create(name, sfa_record.get('description', ''))
+ tenant = self.shell.auth_manager.tenants.find(name=name)
+ return tenant
+
##########
# xxx actually old_sfa_record comes filled with plc stuff as well in the original code
elif type == "slice":
# can update project manager and description
- name = hrn_to_pl_slicename(hrn)
+ name = hrn_to_os_slicename(hrn)
researchers = sfa_record.get('researchers', [])
pis = sfa_record.get('pis', [])
project_manager = None
if self.shell.auth_manager.get_user(name):
self.shell.auth_manager.delete_user(name)
elif type == 'slice':
- name = hrn_to_pl_slicename(sfa_record['hrn'])
+ name = hrn_to_os_slicename(sfa_record['hrn'])
if self.shell.auth_manager.get_project(name):
self.shell.auth_manager.delete_project(name)
return True
records = [records]
for record in records:
- os_record = None
if record['type'] == 'user':
- name = Xrn(record['hrn']).get_leaf()
- os_record = self.shell.auth_manager.get_user(name)
- projects = self.shell.db.project_get_by_user(name)
- record['slices'] = [self.hrn + "." + proj.name for \
- proj in projects]
- record['roles'] = self.shell.db.user_get_roles(name)
- keys = self.shell.db.key_pair_get_all_by_user(name)
- record['keys'] = [key.public_key for key in keys]
+ record = self.fill_user_record_info(record)
elif record['type'] == 'slice':
- name = hrn_to_pl_slicename(record['hrn'])
- os_record = self.shell.auth_manager.get_project(name)
- record['description'] = os_record.description
- record['PI'] = [self.hrn + "." + os_record.project_manager.name]
- record['geni_creator'] = record['PI']
- record['researcher'] = [self.hrn + "." + user for \
- user in os_record.member_ids]
+ record = self.fill_slice_record_info(record)
+ elif record['type'].startswith('authority'):
+ record = self.fill_auth_record_info(record)
else:
continue
record['geni_urn'] = hrn_to_urn(record['hrn'], record['type'])
record['geni_certificate'] = record['gid']
- record['name'] = os_record.name
#if os_record.created_at is not None:
# record['date_created'] = datetime_to_string(utcparse(os_record.created_at))
#if os_record.updated_at is not None:
return records
+ def fill_user_record_info(self, record):
+ xrn = Xrn(record['hrn'])
+ name = xrn.get_leaf()
+ record['name'] = name
+ user = self.shell.auth_manager.users.find(name=name)
+ record['email'] = user.email
+ tenant = self.shell.auth_manager.tenants.find(id=user.tenantId)
+ slices = []
+ all_tenants = self.shell.auth_manager.tenants.list()
+ for tmp_tenant in all_tenants:
+ if tmp_tenant.name.startswith(tenant.name +"."):
+ for tmp_user in tmp_tenant.list_users():
+ if tmp_user.name == user.name:
+ slice_hrn = ".".join([self.hrn, tmp_tenant.name])
+ slices.append(slice_hrn)
+ record['slices'] = slices
+ roles = self.shell.auth_manager.roles.roles_for_user(user, tenant)
+ record['roles'] = [role.name for role in roles]
+ keys = self.shell.nova_manager.keypairs.findall(name=record['hrn'])
+ record['keys'] = [key.public_key for key in keys]
+ return record
+
+ def fill_slice_record_info(self, record):
+ tenant_name = hrn_to_os_tenant_name(record['hrn'])
+ tenant = self.shell.auth_manager.tenants.find(name=tenant_name)
+ parent_tenant_name = OSXrn(xrn=tenant_name).get_authority_hrn()
+ parent_tenant = self.shell.auth_manager.tenants.find(name=parent_tenant_name)
+ researchers = []
+ pis = []
+
+ # look for users and pis in slice tenant
+ for user in tenant.list_users():
+ for role in self.shell.auth_manager.roles.roles_for_user(user, tenant):
+ if role.name.lower() == 'pi':
+ user_tenant = self.shell.auth_manager.tenants.find(id=user.tenantId)
+ hrn = ".".join([self.hrn, user_tenant.name, user.name])
+ pis.append(hrn)
+ elif role.name.lower() in ['user', 'member']:
+ user_tenant = self.shell.auth_manager.tenants.find(id=user.tenantId)
+ hrn = ".".join([self.hrn, user_tenant.name, user.name])
+ researchers.append(hrn)
+
+ # look for pis in the slice's parent (site/organization) tenant
+ for user in parent_tenant.list_users():
+ for role in self.shell.auth_manager.roles.roles_for_user(user, parent_tenant):
+ if role.name.lower() == 'pi':
+ user_tenant = self.shell.auth_manager.tenants.find(id=user.tenantId)
+ hrn = ".".join([self.hrn, user_tenant.name, user.name])
+ pis.append(hrn)
+ record['name'] = tenant_name
+ record['description'] = tenant.description
+ record['PI'] = pis
+ if pis:
+ record['geni_creator'] = pis[0]
+ else:
+ record['geni_creator'] = None
+ record['researcher'] = researchers
+ return record
+
+ def fill_auth_record_info(self, record):
+ tenant_name = hrn_to_os_tenant_name(record['hrn'])
+ tenant = self.shell.auth_manager.tenants.find(name=tenant_name)
+ researchers = []
+ pis = []
+
+ # look for users and pis in slice tenant
+ for user in tenant.list_users():
+ for role in self.shell.auth_manager.roles.roles_for_user(user, tenant):
+ hrn = ".".join([self.hrn, tenant.name, user.name])
+ if role.name.lower() == 'pi':
+ pis.append(hrn)
+ elif role.name.lower() in ['user', 'member']:
+ researchers.append(hrn)
+
+ # look for slices
+ slices = []
+ all_tenants = self.shell.auth_manager.tenants.list()
+ for tmp_tenant in all_tenants:
+ if tmp_tenant.name.startswith(tenant.name+"."):
+ slices.append(".".join([self.hrn, tmp_tenant.name]))
+
+ record['name'] = tenant_name
+ record['description'] = tenant.description
+ record['PI'] = pis
+ record['enabled'] = tenant.enabled
+ record['researchers'] = researchers
+ record['slices'] = slices
+ return record
####################
# plcapi works by changes, compute what needs to be added/deleted
def sliver_status (self, slice_urn, slice_hrn):
# find out where this slice is currently running
- project_name = hrn_to_pl_slicename(slice_hrn)
+ project_name = hrn_to_os_slicename(slice_hrn)
project = self.shell.auth_manager.get_project(project_name)
instances = self.shell.db.instance_get_all_by_project(project_name)
if len(instances) == 0:
def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
- project_name = hrn_to_pl_slicename(slice_hrn)
aggregate = OSAggregate(self)
- # parse rspec
rspec = RSpec(rspec_string)
+ instance_name = hrn_to_os_slicename(slice_hrn)
- # ensure project and users exist in local db
- aggregate.create_project(project_name, users, options=options)
-
- # collect publick keys
+ # assume first user is the caller and use their context
+ # for the ec2/euca api connection. Also, use the first users
+ # key as the project key.
+ key_name = None
+ if len(users) > 1:
+ key_name = aggregate.create_instance_key(slice_hrn, users[0])
+
+ # collect public keys
pubkeys = []
- project_key = None
for user in users:
pubkeys.extend(user['keys'])
- # assume first user is the caller and use their context
- # for the ec2/euca api connection. Also, use the first users
- # key as the project key.
- if not project_key:
- username = Xrn(user['urn']).get_leaf()
- user_keys = self.shell.db.key_pair_get_all_by_user(username)
- if user_keys:
- project_key = user_keys[0].name
-
- # ensure person records exists
- self.euca_shell.init_context(project_name)
- aggregate.run_instances(project_name, rspec_string, project_key, pubkeys)
+
+ aggregate.run_instances(instance_name, rspec_string, key_name, pubkeys)
return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
def delete_sliver (self, slice_urn, slice_hrn, creds, options):
- # we need to do this using the context of one of the slice users
- project_name = hrn_to_pl_slicename(slice_hrn)
- self.euca_shell.init_context(project_name)
aggregate = OSAggregate(self)
+ project_name = hrn_to_os_slicename(slice_hrn)
return aggregate.delete_instances(project_name)
def update_sliver(self, slice_urn, slice_hrn, rspec, creds, options):
- name = hrn_to_pl_slicename(slice_hrn)
+ name = hrn_to_os_slicename(slice_hrn)
aggregate = OSAggregate(self)
return aggregate.update_instances(name)
+++ /dev/null
-import sys
-import xmlrpclib
-import socket
-from urlparse import urlparse
-from sfa.util.sfalogging import logger
-try:
- from nova import db
- from nova import flags
- from nova import context
- from nova.auth.manager import AuthManager
- from nova.compute.manager import ComputeManager
- from nova.network.manager import NetworkManager
- from nova.scheduler.manager import SchedulerManager
- from nova.image.glance import GlanceImageService
- has_nova = True
-except:
- has_nova = False
-
-
-class InjectContext:
- """
- Wraps the module and injects the context when executing methods
- """
- def __init__(self, proxy, context):
- self.proxy = proxy
- self.context = context
-
- def __getattr__(self, name):
- def func(*args, **kwds):
- result=getattr(self.proxy, name)(self.context, *args, **kwds)
- return result
- return func
-
-class NovaShell:
- """
- A simple native shell to a nova backend.
- This class can receive all nova calls to the underlying testbed
- """
-
- # dont care about limiting calls yet
- direct_calls = []
- alias_calls = {}
-
-
- # use the 'capability' auth mechanism for higher performance when the PLC db is local
- def __init__ ( self, config ) :
- self.auth_manager = None
- self.compute_manager = None
- self.network_manager = None
- self.scheduler_manager = None
- self.db = None
- self.image_manager = None
-
- if has_nova:
- logger.debug('nova access - native')
- # load the config
- flags.FLAGS(['foo', '--flagfile=/etc/nova/nova.conf', 'foo', 'foo'])
- # instantiate managers
- self.auth_manager = AuthManager()
- self.compute_manager = ComputeManager()
- self.network_manager = NetworkManager()
- self.scheduler_manager = SchedulerManager()
- self.db = InjectContext(db, context.get_admin_context())
- self.image_manager = InjectContext(GlanceImageService(), context.get_admin_context())
- else:
- logger.debug('nova access - REST')
- raise SfaNotImplemented('nova access - Rest')
from sfa.rspecs.elements.services import Services
from sfa.rspecs.elements.interface import Interface
from sfa.util.xrn import Xrn
-from sfa.util.plxrn import PlXrn, hrn_to_pl_slicename
-from sfa.util.osxrn import OSXrn
+from sfa.planetlab.plxrn import PlXrn
+from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename
from sfa.rspecs.version_manager import VersionManager
-from sfa.openstack.image import ImageManager
from sfa.openstack.security_group import SecurityGroup
from sfa.util.sfalogging import logger
+def pubkeys_to_user_data(pubkeys):
+ user_data = "#!/bin/bash\n\n"
+ for pubkey in pubkeys:
+ pubkey = pubkey.replace('\n', '')
+ user_data += "echo %s >> /root/.ssh/authorized_keys" % pubkey
+ user_data += "\n"
+ user_data += "echo >> /root/.ssh/authorized_keys"
+ user_data += "\n"
+ return user_data
+
def instance_to_sliver(instance, slice_xrn=None):
- # should include?
- # * instance.image_ref
- # * instance.kernel_id
- # * instance.ramdisk_id
- import nova.db.sqlalchemy.models
- name=None
- type=None
sliver_id = None
- if isinstance(instance, dict):
- # this is an isntance type dict
- name = instance['name']
- type = instance['name']
- elif isinstance(instance, nova.db.sqlalchemy.models.Instance):
- # this is an object that describes a running instance
- name = instance.display_name
- type = instance.instance_type.name
- else:
- raise SfaAPIError("instnace must be an instance_type dict or" + \
- " a nova.db.sqlalchemy.models.Instance object")
if slice_xrn:
xrn = Xrn(slice_xrn, 'slice')
sliver_id = xrn.get_sliver_id(instance.project_id, instance.hostname, instance.id)
sliver = Sliver({'slice_id': sliver_id,
- 'name': name,
- 'type': type,
- 'tags': []})
+ 'name': instance.name,
+ 'type': instance.name,
+ 'cpus': str(instance.vcpus),
+ 'memory': str(instance.ram),
+ 'storage': str(instance.disk)})
return sliver
-
-
-def ec2_id(id=None, type=None):
- ec2_id = None
- if type == 'ovf':
- type = 'ami'
- if id and type:
- ec2_id = CloudController.image_ec2_id(id, type)
- return ec2_id
-
+def image_to_rspec_disk_image(image):
+ img = DiskImage()
+ img['name'] = image['name']
+ img['description'] = image['name']
+ img['os'] = image['name']
+ img['version'] = image['name']
+ return img
+
class OSAggregate:
def __init__(self, driver):
return rspec.toxml()
def get_availability_zones(self):
- zones = self.driver.shell.db.zone_get_all()
+ # essex release
+ zones = self.driver.shell.nova_manager.dns_domains.domains()
+
if not zones:
zones = ['cloud']
else:
zones = [zone.name for zone in zones]
+ return zones
def get_slice_nodes(self, slice_xrn):
- image_manager = ImageManager(self.driver)
-
zones = self.get_availability_zones()
- name = hrn_to_pl_slicename(slice_xrn)
- instances = self.driver.shell.db.instance_get_all_by_project(name)
+ name = hrn_to_os_slicename(slice_xrn)
+ instances = self.driver.shell.nova_manager.servers.findall(name=name)
rspec_nodes = []
for instance in instances:
rspec_node = Node()
- interfaces = []
- for fixed_ip in instance.fixed_ips:
- if_xrn = PlXrn(auth=self.driver.hrn,
- interface='node%s:eth0' % (instance.hostname))
- interface = Interface({'component_id': if_xrn.urn})
- interface['ips'] = [{'address': fixed_ip['address'],
- 'netmask': fixed_ip['network'].netmask,
- 'type': 'ipv4'}]
- interfaces.append(interface)
- if instance.availability_zone:
- node_xrn = OSXrn(instance.availability_zone, 'node')
- else:
- node_xrn = OSXrn('cloud', 'node')
+
+ #TODO: find a way to look up an instances availability zone in essex
+ #if instance.availability_zone:
+ # node_xrn = OSXrn(instance.availability_zone, 'node')
+ #else:
+ # node_xrn = OSXrn('cloud', 'node')
+ node_xrn = instance.metatata.get('component_id')
+ if not node_xrn:
+ node_xrn = OSXrn('cloud', 'node')
rspec_node['component_id'] = node_xrn.urn
rspec_node['component_name'] = node_xrn.name
- rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
- sliver = instance_to_sliver(instance)
- disk_image = image_manager.get_disk_image(instance.image_ref)
- sliver['disk_image'] = [disk_image.to_rspec_object()]
+ rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
+ flavor = self.driver.shell.nova_manager.flavors.find(id=instance.flavor['id'])
+ sliver = instance_to_sliver(flavor)
rspec_node['slivers'] = [sliver]
- rspec_node['interfaces'] = interfaces
+ image = self.driver.shell.image_manager.get_images(id=instance.image['id'])
+ if isinstance(image, list) and len(image) > 0:
+ image = image[0]
+ disk_image = image_to_rspec_disk_image(image)
+ sliver['disk_image'] = [disk_image]
+
+ # build interfaces
+ interfaces = []
+ addresses = instance.addresses
+ for private_ip in addresses.get('private', []):
+ if_xrn = PlXrn(auth=self.driver.hrn,
+ interface='node%s:eth0' % (instance.hostId))
+ interface = Interface({'component_id': if_xrn.urn})
+ interface['ips'] = [{'address': private_ip['addr'],
+ #'netmask': private_ip['network'],
+ 'type': private_ip['version']}]
+ interfaces.append(interface)
+ rspec_node['interfaces'] = interfaces
+
# slivers always provide the ssh service
- hostname = None
- for interface in interfaces:
- if 'ips' in interface and interface['ips'] and \
- isinstance(interface['ips'], list):
- if interface['ips'][0].get('address'):
- hostname = interface['ips'][0].get('address')
- break
- login = Login({'authentication': 'ssh-keys',
- 'hostname': hostname,
- 'port':'22', 'username': 'root'})
- service = Services({'login': login})
- rspec_node['services'] = [service]
+ rspec_node['services'] = []
+ for public_ip in addresses.get('public', []):
+ login = Login({'authentication': 'ssh-keys',
+ 'hostname': public_ip['addr'],
+ 'port':'22', 'username': 'root'})
+ service = Services({'login': login})
+ rspec_node['services'].append(service)
rspec_nodes.append(rspec_node)
return rspec_nodes
def get_aggregate_nodes(self):
zones = self.get_availability_zones()
# available sliver/instance/vm types
- instances = self.driver.shell.db.instance_type_get_all().values()
+ instances = self.driver.shell.nova_manager.flavors.list()
+ if isinstance(instances, dict):
+ instances = instances.values()
# available images
- image_manager = ImageManager(self.driver)
- disk_images = image_manager.get_available_disk_images()
- disk_image_objects = [image.to_rspec_object() \
- for image in disk_images]
+ images = self.driver.shell.image_manager.get_images_detailed()
+ disk_images = [image_to_rspec_disk_image(img) for img in images if img['container_format'] in ['ami', 'ovf']]
rspec_nodes = []
for zone in zones:
rspec_node = Node()
- xrn = OSXrn(zone, 'node')
+ xrn = OSXrn(zone, type='node')
rspec_node['component_id'] = xrn.urn
rspec_node['component_name'] = xrn.name
rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
slivers = []
for instance in instances:
sliver = instance_to_sliver(instance)
- sliver['disk_image'] = disk_image_objects
+ sliver['disk_image'] = disk_images
slivers.append(sliver)
rspec_node['slivers'] = slivers
return rspec_nodes
- def create_project(self, slicename, users, options={}):
- """
- Create the slice if it doesn't alredy exist. Create user
- accounts that don't already exist
- """
- from nova.exception import ProjectNotFound, UserNotFound
- for user in users:
- username = Xrn(user['urn']).get_leaf()
- try:
- self.driver.shell.auth_manager.get_user(username)
- except nova.exception.UserNotFound:
- self.driver.shell.auth_manager.create_user(username)
- self.verify_user_keys(username, user['keys'], options)
-
- try:
- slice = self.driver.shell.auth_manager.get_project(slicename)
- except ProjectNotFound:
- # assume that the first user is the project manager
- proj_manager = Xrn(users[0]['urn']).get_leaf()
- self.driver.shell.auth_manager.create_project(slicename, proj_manager)
-
- def verify_user_keys(self, username, keys, options={}):
- """
- Add requested keys.
- """
- append = options.get('append', True)
- existing_keys = self.driver.shell.db.key_pair_get_all_by_user(username)
- existing_pub_keys = [key.public_key for key in existing_keys]
- removed_pub_keys = set(existing_pub_keys).difference(keys)
- added_pub_keys = set(keys).difference(existing_pub_keys)
- pubkeys = []
- # add new keys
- for public_key in added_pub_keys:
- key = {}
- key['user_id'] = username
- key['name'] = username
- key['public_key'] = public_key
- self.driver.shell.db.key_pair_create(key)
-
- # remove old keys
- if not append:
- for key in existing_keys:
- if key.public_key in removed_pub_keys:
- self.driver.shell.db.key_pair_destroy(username, key.name)
+ def create_instance_key(self, slice_hrn, user):
+ key_name = "%s:%s" (slice_name, Xrn(user['urn']).get_hrn())
+ pubkey = user['keys'][0]
+ key_found = False
+ existing_keys = self.driver.shell.nova_manager.keypairs.findall(name=key_name)
+ for existing_key in existing_keys:
+ if existing_key.public_key != pubkey:
+ self.driver.shell.nova_manager.keypairs.delete(existing_key)
+ elif existing_key.public_key == pubkey:
+ key_found = True
+ if not key_found:
+ self.driver.shll.nova_manager.keypairs.create(key_name, pubkey)
+ return key_name
+
def create_security_group(self, slicename, fw_rules=[]):
# use default group by default
icmp_type_code = kwds.get('icmp_type_code'))
- def reserve_instance(self, image_id, kernel_id, ramdisk_id, \
- instance_type, key_name, user_data, group_name):
- conn = self.driver.euca_shell.get_euca_connection()
- logger.info('Reserving an instance: image: %s, kernel: ' \
- '%s, ramdisk: %s, type: %s, key: %s' % \
- (image_id, kernel_id, ramdisk_id,
- instance_type, key_name))
- try:
- reservation = conn.run_instances(image_id=image_id,
- kernel_id=kernel_id,
- ramdisk_id=ramdisk_id,
- instance_type=instance_type,
- key_name=key_name,
- user_data = user_data,
- security_groups=[group_name])
- #placement=zone,
- #min_count=min_count,
- #max_count=max_count,
-
- except Exception, err:
- logger.log_exc(err)
-
-
- def run_instances(self, slicename, rspec, keyname, pubkeys):
- """
- Create the security groups and instances.
- """
- # the default image to use for instnaces that dont
- # explicitly request an image.
- # Just choose the first available image for now.
- image_manager = ImageManager(self.driver)
- available_images = image_manager.get_available_disk_images()
- default_image_id = None
- default_aki_id = None
- default_ari_id = None
- default_image = available_images[0]
- default_image_id = ec2_id(default_image.id, default_image.container_format)
- default_aki_id = ec2_id(default_image.kernel_id, 'aki')
- default_ari_id = ec2_id(default_image.ramdisk_id, 'ari')
- # get requested slivers
+ def run_instances(self, slicename, rspec, key_name, pubkeys):
+ #logger.debug('Reserving an instance: image: %s, flavor: ' \
+ # '%s, key: %s, name: %s' % \
+ # (image_id, flavor_id, key_name, slicename))
+
+ authorized_keys = "\n".join(pubkeys)
+ files = {'/root/.ssh/authorized_keys': authorized_keys}
rspec = RSpec(rspec)
- user_data = "\n".join(pubkeys)
requested_instances = defaultdict(list)
# iterate over clouds/zones/nodes
for node in rspec.version.get_nodes_with_slivers():
- instance_types = node.get('slivers', [])
- if isinstance(instance_types, list):
- # iterate over sliver/instance types
- for instance_type in instance_types:
- fw_rules = instance_type.get('fw_rules', [])
- group_name = self.create_security_group(slicename, fw_rules)
- ami_id = default_image_id
- aki_id = default_aki_id
- ari_id = default_ari_id
- req_image = instance_type.get('disk_image')
- if req_image and isinstance(req_image, list):
- req_image_name = req_image[0]['name']
- disk_image = image_manager.get_disk_image(name=req_image_name)
- if disk_image:
- ami_id = ec2_id(disk_image.id, disk_image.container_format)
- aki_id = ec2_id(disk_image.kernel_id, 'aki')
- ari_id = ec2_id(disk_image.ramdisk_id, 'ari')
- # start the instance
- self.reserve_instance(image_id=ami_id,
- kernel_id=aki_id,
- ramdisk_id=ari_id,
- instance_type=instance_type['name'],
- key_name=keyname,
- user_data=user_data,
- group_name=group_name)
+ instances = node.get('slivers', [])
+ if not instances:
+ continue
+ for instance in instances:
+ metadata = {}
+ flavor_id = self.driver.shell.nova_manager.flavors.find(name=instance['name'])
+ image = instance.get('disk_image')
+ if image and isinstance(image, list):
+ image = image[0]
+ image_id = self.driver.shell.nova_manager.images.find(name=image['name'])
+ fw_rules = instance.get('fw_rules', [])
+ group_name = self.create_security_group(slicename, fw_rules)
+ metadata['security_groups'] = [group_name]
+ metadata['component_id'] = node['component_id']
+ try:
+ self.driver.shell.nova_manager.servers.create(flavor=flavor_id,
+ image=image_id,
+ key_name = key_name,
+ security_group = group_name,
+ files=files,
+ meta=metadata,
+ name=slicename)
+ except Exception, err:
+ logger.log_exc(err)
+
- def delete_instances(self, project_name):
- instances = self.driver.shell.db.instance_get_all_by_project(project_name)
+ def delete_instances(self, instance_name):
+ instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
security_group_manager = SecurityGroup(self.driver)
for instance in instances:
# deleate this instance's security groups
- for security_group in instance.security_groups:
+ for security_group in instance.metadata.get('security_groups', []):
# dont delete the default security group
- if security_group.name != 'default':
- security_group_manager.delete_security_group(security_group.name)
+ if security_group != 'default':
+ security_group_manager.delete_security_group(security_group)
# destroy instance
- self.driver.shell.db.instance_destroy(instance.id)
+ self.driver.shell.nova_manager.servers.delete(instance)
return 1
- def stop_instances(self, project_name):
- instances = self.driver.shell.db.instance_get_all_by_project(project_name)
+ def stop_instances(self, instance_name):
+ instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
for instance in instances:
- self.driver.shell.db.instance_stop(instance.id)
+ self.driver.shell.nova_manager.servers.pause(instance)
return 1
def update_instances(self, project_name):
--- /dev/null
+import re
+from sfa.util.xrn import Xrn
+from sfa.util.config import Config
+
+def hrn_to_os_slicename(hrn):
+ return OSXrn(xrn=hrn, type='slice').get_slicename()
+
+
+def hrn_to_os_tenant_name(hrn):
+ return OSXrn(xrn=hrn, type='slice').get_tenant_name()
+
+def cleanup_name(name):
+ return name.replace(".", "_").replace("+", "_")
+
+class OSXrn(Xrn):
+
+ def __init__(self, name=None, auth=None, **kwds):
+
+ config = Config()
+ if name is not None:
+ if 'type' in kwds:
+ self.type = kwds['type']
+ if auth is not None:
+ self.hrn='.'.join([auth, cleanup_name(name)])
+ else:
+ self.hrn = config.SFA_INTERFACE_HRN + "." + cleanup_name(name)
+ self.hrn_to_urn()
+ else:
+ Xrn.__init__(self, **kwds)
+
+ self.name = self.get_name()
+
+ def get_name(self):
+ self._normalize()
+ leaf = self.leaf
+ sliver_id_parts = leaf.split(':')
+ name = sliver_id_parts[0]
+ name = re.sub('[^a-zA-Z0-9_]', '', name)
+ return name
+
+
+ def get_slicename(self):
+ self._normalize()
+ slicename = self.hrn
+ slicename = slicename.split(':')[0]
+ slicename = re.sub('[\.]', '_', slicename)
+ return slicename
+
+ def get_tenant_name(self):
+ self._normalize()
+ tenant_name = self.hrn
+ tenant_name = ".".join(tenant_name.split('.')[1:])
+ return tenant_name
+
+
+
class SecurityGroup:
def __init__(self, driver):
- self.driver = driver
+ self.client = driver.shell.nova_manager
def create_security_group(self, name):
- conn = self.driver.euca_shell.get_euca_connection()
try:
- conn.create_security_group(name=name, description="")
+ self.client.security_groups.create(name=name, description="")
except Exception, ex:
logger.log_exc("Failed to add security group")
def delete_security_group(self, name):
- conn = self.driver.euca_shell.get_euca_connection()
try:
- conn.delete_security_group(name=name)
+ self.client.security_groups(name=name)
except Exception, ex:
logger.log_exc("Failed to delete security group")
port_range=None, icmp_type_code=None,
source_group_name=None, source_group_owner_id=None):
- from_port, to_port = self._validate_port_range(port_range)
- icmp_type = self._validate_icmp_type_code(icmp_type_code)
- if icmp_type and icmp_type[0] and icmp_type[1]:
- from_port, to_port = icmp_type[0], icmp_type[1]
+ try:
+ from_port, to_port = self._validate_port_range(port_range)
+ icmp_type = self._validate_icmp_type_code(icmp_type_code)
+ if icmp_type and icmp_type[0] and icmp_type[1]:
+ from_port, to_port = icmp_type[0], icmp_type[1]
- if group_name:
- conn = self.driver.euca_shell.get_euca_connection()
- try:
- conn.authorize_security_group(
- group_name=group_name,
- src_security_group_name=source_group_name,
- src_security_group_owner_id=source_group_owner_id,
+ group = self.client.security_groups.find(name=group_name)
+ self.client.security_group_rules.create(
+ group_id=group.id,
ip_protocol=protocol,
from_port=from_port,
to_port=to_port,
cidr_ip=cidr_ip,
)
- except Exception, ex:
- logger.log_exc("Failed to add rule to group %s" % group_name)
+ except Exception, ex:
+ logger.log_exc("Failed to add rule to group %s" % group_name)
def remove_rule_from_group(self, group_name=None, protocol='tcp', cidr_ip='0.0.0.0/0',
port_range=None, icmp_type_code=None,
source_group_name=None, source_group_owner_id=None):
-
- from_port, to_port = self._validate_port_range(port_range)
- icmp_type = self._validate_icmp_type_code(icmp_type_code)
- if icmp_type:
- from_port, to_port = icmp_type[0], icmp_type[1]
-
- if group_name:
- conn = self.driver.euca_shell.get_euca_connection()
- try:
- conn.revoke_security_group(
- group_name=group_name,
- src_security_group_name=source_group_name,
- src_security_group_owner_id=source_group_owner_id,
- ip_protocol=protocol,
- from_port=from_port,
- to_port=to_port,
- cidr_ip=ip,
- )
- except Exception, ex:
- logger.log_exc("Failed to remove rule from group %s" % group_name)
+ try:
+ from_port, to_port = self._validate_port_range(port_range)
+ icmp_type = self._validate_icmp_type_code(icmp_type_code)
+ if icmp_type:
+ from_port, to_port = icmp_type[0], icmp_type[1]
+ group = self.client.security_groups.find(name=group_name)
+ filter = {
+ 'id': group.id,
+ 'from_port': from_port,
+ 'to_port': to_port,
+ 'cird_ip': ip,
+ 'ip_protocol':protocol,
+ }
+ rule = self.client.security_group_rules.find(**filter)
+ if rule:
+ self.client.security_group_rules.delete(rule)
+ except Exception, ex:
+ logger.log_exc("Failed to remove rule from group %s" % group_name)
--- /dev/null
+import sys
+import xmlrpclib
+import socket
+import gettext
+from urlparse import urlparse
+from sfa.util.sfalogging import logger
+from sfa.util.config import Config
+
+try:
+ from sfa.openstack.client import GlanceClient, NovaClient, KeystoneClient
+ has_nova = True
+except:
+ has_nova = False
+
+
+
+class Shell:
+ """
+ A simple native shell to a nova backend.
+ This class can receive all nova calls to the underlying testbed
+ """
+
+ # dont care about limiting calls yet
+ direct_calls = []
+ alias_calls = {}
+
+
+ # use the 'capability' auth mechanism for higher performance when the PLC db is local
+ def __init__ ( self, config=None) :
+ if not config:
+ config = Config()
+ if has_nova:
+ # instantiate managers
+ self.auth_manager = KeystoneClient(config)
+ self.image_manager = GlanceClient(config)
+ self.nova_manager = NovaClient(config)
+ else:
+ logger.debug('nova access - REST')
+ raise SfaNotImplemented('nova access - Rest')
filter.update({'name':slice['name']})
return_fields = ['lease_id', 'hostname', 'site_id', 'name', 't_from', 't_until']
leases = self.driver.shell.GetLeases(filter)
+ grain = self.driver.shell.GetLeaseGranularity()
site_ids = []
for lease in leases:
slice_hrn = slicename_to_hrn(self.driver.hrn, lease['name'])
slice_urn = hrn_to_urn(slice_hrn, 'slice')
rspec_lease['slice_id'] = slice_urn
- rspec_lease['t_from'] = lease['t_from']
- rspec_lease['t_until'] = lease['t_until']
+ rspec_lease['start_time'] = lease['t_from']
+ rspec_lease['duration'] = (lease['t_until'] - lease['t_from']) / grain
rspec_leases.append(rspec_lease)
return rspec_leases
requested_lease = {}
if not lease.get('lease_id'):
requested_lease['hostname'] = xrn_to_hostname(lease.get('component_id').strip())
- requested_lease['t_from'] = lease.get('t_from')
- requested_lease['t_until'] = lease.get('t_until')
+ requested_lease['start_time'] = lease.get('start_time')
+ requested_lease['duration'] = lease.get('duration')
else:
kept_leases.append(int(lease['lease_id']))
if requested_lease.get('hostname'):
def verify_slice_leases(self, slice, requested_leases, kept_leases, peer):
leases = self.driver.shell.GetLeases({'name':slice['name']}, ['lease_id'])
+ grain = self.driver.shell.GetLeaseGranularity()
current_leases = [lease['lease_id'] for lease in leases]
deleted_leases = list(set(current_leases).difference(kept_leases))
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
deleted=self.driver.shell.DeleteLeases(deleted_leases)
for lease in requested_leases:
- added=self.driver.shell.AddLeases(lease['hostname'], slice['name'], int(lease['t_from']), int(lease['t_until']))
+ added=self.driver.shell.AddLeases(lease['hostname'], slice['name'], int(lease['start_time']), int(lease['duration']) * grain + int(lease['start_time']))
except:
logger.log_exc('Failed to add/remove slice leases')
def pl_login_base (self):
self._normalize()
- base = self.authority[-1]
+ if self.type and self.type.startswith('authority'):
+ base = self.leaf
+ else:
+ base = self.authority[-1]
# Fix up names of GENI Federates
base = base.lower()
'lease_id',
'component_id',
'slice_id'
- 't_from',
- 't_until',
+ 'start_time',
+ 'duration',
]
@staticmethod
def add_interfaces(xml, interfaces):
- for interface in interfaces:
- if_elem = xml.add_instance('interface', interface, ['component_id', 'client_id'])
- ips = interface.get('ips', [])
- for ip in ips:
- if_elem.add_instance('ip', {'address': ip.get('address'),
- 'netmask': ip.get('netmask'),
- 'type': ip.get('type')})
+ if isinstance(interfaces, list):
+ for interface in interfaces:
+ if_elem = xml.add_instance('interface', interface, ['component_id', 'client_id'])
+ ips = interface.get('ips', [])
+ for ip in ips:
+ if_elem.add_instance('ip', {'address': ip.get('address'),
+ 'netmask': ip.get('netmask'),
+ 'type': ip.get('type')})
@staticmethod
def get_interfaces(xml):
sliver_elem = xml.add_element('sliver_type')
if sliver.get('type'):
sliver_elem.set('name', sliver['type'])
- if sliver.get('client_id'):
- sliver_elem.set('client_id', sliver['client_id'])
+ attrs = ['client_id', 'cpus', 'memory', 'storage']
+ for attr in attrs:
+ if sliver.get(attr):
+ sliver_elem.set(attr, sliver[attr])
+
images = sliver.get('disk_image')
if images and isinstance(images, list):
PGv2DiskImage.add_images(sliver_elem, images)
lease_elems = []
for lease in leases:
- lease_fields = ['lease_id', 'component_id', 'slice_id', 't_from', 't_until']
+ lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
lease_elem = network_elem.add_instance('lease', lease, lease_fields)
lease_elems.append(lease_elem)
lease['lease_id'] = lease_elem.attrib['lease_id']
lease['component_id'] = lease_elem.attrib['component_id']
lease['slice_id'] = lease_elem.attrib['slice_id']
- lease['t_from'] = lease_elem.attrib['t_from']
- lease['t_until'] = lease_elem.attrib['t_until']
+ lease['start_time'] = lease_elem.attrib['start_time']
+ lease['duration'] = lease_elem.attrib['duration']
leases.append(lease)
return leases
--- /dev/null
+#
+# Extensions for PlanetLab Resources
+# Version 1
+#
+
+namespace plos = "http://www.planet-lab.org/resources/sfa/ext/plos/1"
+
+FWRuleSpec = element plos:attribute {
+ attribute protocol { text },
+ attribute port_range { text },
+ attribute cidr_ip { text }
+}
+
+start = FWRuleSpec
+
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+
+ Extensions for PlanetLab Resources
+ Version 1
+
+-->
+<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" elementFormDefault="qualified" targetNamespace="http://www.planet-lab.org/resources/sfa/ext/plos/1" xmlns:plos="http://www.planet-lab.org/resources/sfa/ext/plos/1">
+ <xs:element name="attribute">
+ <xs:complexType>
+ <xs:attribute name="protocol" use="required"/>
+ <xs:attribute name="port_range" use="required"/>
+ <xs:attribute name="cidr_ip" use="required"/>
+ </xs:complexType>
+ </xs:element>
+</xs:schema>
metadata=MetaData()
-# this is needed my migrate so it can locate 'records.record_id'
+# this is needed by migrate so it can locate 'records.record_id'
records = \
Table ( 'records', metadata,
Column ('record_id', Integer, primary_key=True),
def validate_datetime (self, key, incoming):
if isinstance (incoming, datetime): return incoming
elif isinstance (incoming, (int,float)):return datetime.fromtimestamp (incoming)
+ else: logger.info("Cannot validate datetime for key %s with input %s"%\
+ (key,incoming))
@validates ('date_created')
def validate_date_created (self, key, incoming): return self.validate_datetime (key, incoming)
auth_record = dbsession.query(RegAuthority).filter_by(hrn=authority_hrn).first()
return auth_record.reg_pis
+ @validates ('expires')
+ def validate_expires (self, key, incoming): return self.validate_datetime (key, incoming)
####################
class RegNode (RegRecord):
# handle gid
if attrib_name == 'gid':
print " gid:"
- print GID(attrib).dump_string(8, dump_parents)
+ print GID(string=attrib).dump_string(8, dump_parents)
elif attrib_name in ['date created', 'last updated']:
print " %s: %s" % (attrib_name, self.date_repr(attrib_name))
else:
# @param create_parents if true, also create the parents if they do not exist
def create_auth(self, xrn, create_parents=False):
- hrn, type = urn_to_hrn(xrn)
+ hrn, type = urn_to_hrn(str(xrn))
logger.debug("Hierarchy: creating authority: %s"% hrn)
# create the parent authority if necessary
raise SfaAPIError, "Xrn.hrn_to_urn, hrn=%s"%self.hrn
if self.type and self.type.startswith('authority'):
- self.authority = Xrn.hrn_split(self.hrn)
+ self.authority = Xrn.hrn_auth_list(self.hrn)
+ leaf = self.get_leaf()
+ if not self.authority:
+ self.authority = [self.hrn]
type_parts = self.type.split("+")
self.type = type_parts[0]
name = 'sa'
if len(type_parts) > 1:
name = type_parts[1]
+ authority_string = ":".join([self.get_authority_urn(), leaf])
else:
self.authority = Xrn.hrn_auth_list(self.hrn)
name = Xrn.hrn_leaf(self.hrn)
-
- authority_string = self.get_authority_urn()
+ authority_string = self.get_authority_urn()
if self.type == None:
urn = "+".join(['',authority_string,Xrn.unescape(name)])
def add_request_context_to_rspec(self, doc):
p = doc.xpathNewContext()
- context = p.xpathEval("//RSpec")
- if (not context):
+ context = p.xpathEval("//*")
+ if not context or context[0].name not in ['RSpec', 'rspec']:
raise Exception('Request is not an rspec')
else:
# Add the request context
def add_rule_context_to_rspec(self, doc):
p = doc.xpathNewContext()
- context = p.xpathEval("//RSpec")
- if (not context):
+ context = p.xpathEval("//*")
+ if not context or context[0].name not in ['RSpec', 'rspec']:
raise Exception('Request is not an rspec')
else:
# Add the request context