echo "host $SFA_DB_NAME $SFA_DB_USER 127.0.0.1/32 password"
[ -n "$registry_ip" ] && echo "host $SFA_DB_NAME $SFA_DB_USER ${registry_ip}/32 password"
) >>$pghba_conf
+
+ if [ "$SFA_GENERIC_FLAVOUR" == "openstack" ] ; then
+ [ -n "$registry_ip" ] && echo "host nova nova ${registry_ip}/32 password" >> $pghba_conf
+ fi
# Fix ownership (sed -i changes it)
chown postgres:postgres $postgresql_conf $pghba_conf
packages = [
'sfa',
+ 'sfa/openstack',
'sfa/trust',
'sfa/storage',
'sfa/util',
from sfa.generic import Generic
-import sfa.server.sfaapi
-import sfa.plc.pldriver
-import sfa.managers.registry_manager
-import sfa.managers.slice_manager
-import sfa.managers.aggregate_manager
class pl (Generic):
# use the standard api class
def api_class (self):
+ import sfa.server.sfaapi
return sfa.server.sfaapi.SfaApi
# the manager classes for the server-side services
def registry_manager_class (self) :
+ import sfa.managers.registry_manager
return sfa.managers.registry_manager.RegistryManager
def slicemgr_manager_class (self) :
+ import sfa.managers.slice_manager
return sfa.managers.slice_manager.SliceManager
def aggregate_manager_class (self) :
+ import sfa.managers.aggregate_manager
return sfa.managers.aggregate_manager.AggregateManager
# driver class for server-side services, talk to the whole testbed
def driver_class (self):
+ import sfa.plc.pldriver
return sfa.plc.pldriver.PlDriver
# for the component mode, to be run on board planetlab nodes
from sfa.trust.gid import create_uuid
from sfa.importer.sfaImport import sfaImport, _cleanup_string
from sfa.util.sfalogging import logger
-try:
- from nova.auth.manager import AuthManager, db, context
-except ImportError:
- AuthManager = None
-
+from sfa.openstack.openstack_shell import OpenstackShell
def process_options():
sys.exit(0)
root_auth = config.SFA_REGISTRY_ROOT_AUTH
interface_hrn = config.SFA_INTERFACE_HRN
- if AuthManager:
- auth_manager = AuthManager()
- else:
- logger.info("Unable to import nova.auth.manager. Doesn't look like openstack-copute is installed. Exiting...")
- sys.exit(0)
+ shell = OpenstackShell(config)
sfaImporter.create_top_level_records()
# create dict of all existing sfa records
# Get all users
- persons = auth_manager.get_users()
+ persons = shell.user_get_all()
persons_dict = {}
keys_filename = config.config_path + os.sep + 'person_keys.py'
old_person_keys = load_keys(keys_filename)
person_keys = {}
for person in persons:
hrn = config.SFA_INTERFACE_HRN + "." + person.id
+ persons_dict[hrn] = person
old_keys = old_person_keys.get(person.id, [])
- keys = db.key_pair_get_all_by_user(context.get_admin_context(), person.id)
- person_keys[person.id] = [key.public_key for key in keys]
+ keys = [k.public_key for k in shell.key_pair_get_all_by_user(person.id)]
+ person_keys[person.id] = keys
update_record = False
if old_keys != keys:
update_record = True
if hrn not in existing_hrns or \
(hrn, 'user') not in existing_records or update_record:
urn = hrn_to_urn(hrn, 'user')
+
if keys:
try:
- pkey = convert_public_key(key)
+ pkey = convert_public_key(keys[0])
except:
- logger.warn('unable to convert public key for %s' % hrn)
+ logger.log_exc('unable to convert public key for %s' % hrn)
pkey = Keypair(create=True)
else:
logger.warn("Import: person %s does not have a PL public key"%hrn)
pkey = Keypair(create=True)
- person_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
- person_record = SfaRecord(hrn=hrn, gid=person_gid, type="user", \
- authority=get_authority(hrn))
- persons_dict[person_record['hrn']] = person_record
- person_record.sync()
+ person_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+ person_record = SfaRecord(hrn=hrn, gid=person_gid, type="user", \
+ authority=get_authority(hrn))
+ logger.info("Import: importing %s " % person_record.summary_string())
+ person_record.sync()
# Get all projects
- projects = db.project_get_all(context.get_admin_context())
+ projects = shell.project_get_all()
projects_dict = {}
for project in projects:
hrn = config.SFA_INTERFACE_HRN + '.' + project.id
+ projects_dict[hrn] = project
if hrn not in existing_hrns or \
(hrn, 'slice') not in existing_records:
pkey = Keypair(create=True)
project_record = SfaRecord(hrn=hrn, gid=project_gid, type="slice",
authority=get_authority(hrn))
projects_dict[project_record['hrn']] = project_record
- project_record.sync(verbose=True)
+ logger.info("Import: importing %s " % project_record.summary_string())
+ project_record.sync()
# remove stale records
system_records = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
record_object = existing_records[(record_hrn, type)]
record = SfaRecord(dict=record_object)
+ logger.info("Import: removing %s " % record.summary_string())
record.delete()
# save pub keys
--- /dev/null
+from sfa.util.version import version_core
+from sfa.util.xrn import Xrn
+from sfa.util.callids import Callids
+from sfa.managers import aggregate_manager
+
+class AggregateManager(aggregate_manager.AggregateManager):
+
+ def __init__ (self, config): pass
+
+ # essentially a union of the core version, the generic version (this code) and
+ # whatever the driver needs to expose
+ def GetVersion(self, api, options):
+
+ xrn=Xrn(api.hrn)
+ version = version_core()
+ version_generic = {
+ 'interface':'aggregate',
+ 'sfa': 2,
+ 'geni_api': 2,
+ 'geni_api_versions': {'2': 'http://%s:%s' % (api.config.SFA_AGGREGATE_HOST, api.config.SFA_AGGREGATE_PORT)},
+ 'hrn':xrn.get_hrn(),
+ 'urn':xrn.get_urn(),
+ }
+ version.update(version_generic)
+ testbed_version = self.driver.aggregate_version()
+ version.update(testbed_version)
+ return version
+
+ def ListSlices(self, api, creds, options):
+ call_id = options.get('call_id')
+ if Callids().already_handled(call_id): return []
+ return self.driver.list_slices (creds, options)
+
+ def ListResources(self, api, creds, options):
+ call_id = options.get('call_id')
+ if Callids().already_handled(call_id): return ""
+
+ # get slice's hrn from options
+ slice_xrn = options.get('geni_slice_urn', None)
+ # pass None if no slice is specified
+ if not slice_xrn:
+ slice_hrn, slice_urn = None, None
+ else:
+ xrn = Xrn(slice_xrn)
+ slice_urn=xrn.get_urn()
+ slice_hrn=xrn.get_hrn()
+
+ return self.driver.list_resources (slice_urn, slice_hrn, creds, options)
+
+ def SliverStatus (self, api, xrn, creds, options):
+ call_id = options.get('call_id')
+ if Callids().already_handled(call_id): return {}
+
+ xrn = Xrn(xrn)
+ slice_urn=xrn.get_urn()
+ slice_hrn=xrn.get_hrn()
+ return self.driver.sliver_status (slice_urn, slice_hrn)
+
+ def CreateSliver(self, api, xrn, creds, rspec_string, users, options):
+ """
+ Create the sliver[s] (slice) at this aggregate.
+ Verify HRN and initialize the slice record in PLC if necessary.
+ """
+ call_id = options.get('call_id')
+ if Callids().already_handled(call_id): return ""
+
+ xrn = Xrn(xrn)
+ slice_urn=xrn.get_urn()
+ slice_hrn=xrn.get_hrn()
+
+ return self.driver.create_sliver (slice_urn, slice_hrn, creds, rspec_string, users, options)
+
+ def DeleteSliver(self, api, xrn, creds, options):
+ call_id = options.get('call_id')
+ if Callids().already_handled(call_id): return True
+
+ xrn = Xrn(xrn)
+ slice_urn=xrn.get_urn()
+ slice_hrn=xrn.get_hrn()
+ return self.driver.delete_sliver (slice_urn, slice_hrn, creds, options)
+
+ def RenewSliver(self, api, xrn, creds, expiration_time, options):
+ call_id = options.get('call_id')
+ if Callids().already_handled(call_id): return True
+
+ xrn = Xrn(xrn)
+ slice_urn=xrn.get_urn()
+ slice_hrn=xrn.get_hrn()
+ return self.driver.renew_sliver (slice_urn, slice_hrn, creds, expiration_time, options)
+
+ ### these methods could use an options extension for at least call_id
+ def start_slice(self, api, xrn, creds):
+ xrn = Xrn(xrn)
+ slice_urn=xrn.get_urn()
+ slice_hrn=xrn.get_hrn()
+ return self.driver.start_slice (slice_urn, slice_hrn, creds)
+
+ def stop_slice(self, api, xrn, creds):
+ xrn = Xrn(xrn)
+ slice_urn=xrn.get_urn()
+ slice_hrn=xrn.get_hrn()
+ return self.driver.stop_slice (slice_urn, slice_hrn, creds)
+
+ def reset_slice(self, api, xrn):
+ xrn = Xrn(xrn)
+ slice_urn=xrn.get_urn()
+ slice_hrn=xrn.get_hrn()
+ return self.driver.reset_slice (slice_urn, slice_hrn)
+
+ def GetTicket(self, api, xrn, creds, rspec, users, options):
+
+ xrn = Xrn(xrn)
+ slice_urn=xrn.get_urn()
+ slice_hrn=xrn.get_hrn()
+
+ return self.driver.get_ticket (slice_urn, slice_hrn, creds, rspec, options)
+
--- /dev/null
+import types
+import time
+# for get_key_from_incoming_ip
+import tempfile
+import os
+import commands
+
+from sfa.util.faults import RecordNotFound, AccountNotEnabled, PermissionError, MissingAuthority, \
+ UnknownSfaType, ExistingRecord, NonExistingRecord
+from sfa.util.sfatime import utcparse, datetime_to_epoch
+from sfa.util.prefixTree import prefixTree
+from sfa.util.xrn import Xrn, get_authority, hrn_to_urn, urn_to_hrn
+from sfa.util.plxrn import hrn_to_pl_login_base
+from sfa.util.version import version_core
+from sfa.util.sfalogging import logger
+from sfa.trust.gid import GID
+from sfa.trust.credential import Credential
+from sfa.trust.certificate import Certificate, Keypair, convert_public_key
+from sfa.trust.gid import create_uuid
+from sfa.storage.record import SfaRecord
+from sfa.storage.table import SfaTable
+from sfa.managers import registry_manager
+
+class RegistryManager(registry_manager.RegistryManager):
+
+ def __init__ (self, config): pass
+
+ # The GENI GetVersion call
+ def GetVersion(self, api, options):
+ peers = dict ( [ (hrn,interface.get_url()) for (hrn,interface) in api.registries.iteritems()
+ if hrn != api.hrn])
+ xrn=Xrn(api.hrn)
+ return version_core({'interface':'registry',
+ 'hrn':xrn.get_hrn(),
+ 'urn':xrn.get_urn(),
+ 'peers':peers})
+
+ def GetCredential(self, api, xrn, type, is_self=False):
+ # convert xrn to hrn
+ if type:
+ hrn = urn_to_hrn(xrn)[0]
+ else:
+ hrn, type = urn_to_hrn(xrn)
+
+ # Is this a root or sub authority
+ auth_hrn = api.auth.get_authority(hrn)
+ if not auth_hrn or hrn == api.config.SFA_INTERFACE_HRN:
+ auth_hrn = hrn
+ # get record info
+ auth_info = api.auth.get_auth_info(auth_hrn)
+ table = SfaTable()
+ records = table.findObjects({'type': type, 'hrn': hrn})
+ if not records:
+ raise RecordNotFound(hrn)
+ record = records[0]
+
+ # verify_cancreate_credential requires that the member lists
+ # (researchers, pis, etc) be filled in
+ self.driver.augment_records_with_testbed_info (record)
+ if not self.driver.is_enabled (record):
+ raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record['email']))
+
+ # get the callers gid
+ # if this is a self cred the record's gid is the caller's gid
+ if is_self:
+ caller_hrn = hrn
+ caller_gid = record.get_gid_object()
+ else:
+ caller_gid = api.auth.client_cred.get_gid_caller()
+ caller_hrn = caller_gid.get_hrn()
+
+ object_hrn = record.get_gid_object().get_hrn()
+ rights = api.auth.determine_user_rights(caller_hrn, record)
+ # make sure caller has rights to this object
+ if rights.is_empty():
+ raise PermissionError(caller_hrn + " has no rights to " + record['name'])
+
+ object_gid = GID(string=record['gid'])
+ new_cred = Credential(subject = object_gid.get_subject())
+ new_cred.set_gid_caller(caller_gid)
+ new_cred.set_gid_object(object_gid)
+ new_cred.set_issuer_keys(auth_info.get_privkey_filename(), auth_info.get_gid_filename())
+ #new_cred.set_pubkey(object_gid.get_pubkey())
+ new_cred.set_privileges(rights)
+ new_cred.get_privileges().delegate_all_privileges(True)
+ if 'expires' in record:
+ date = utcparse(record['expires'])
+ expires = datetime_to_epoch(date)
+ new_cred.set_expiration(int(expires))
+ auth_kind = "authority,ma,sa"
+ # Parent not necessary, verify with certs
+ #new_cred.set_parent(api.auth.hierarchy.get_auth_cred(auth_hrn, kind=auth_kind))
+ new_cred.encode()
+ new_cred.sign()
+
+ return new_cred.save_to_string(save_parents=True)
+
+
+ def Resolve(self, api, xrns, type=None, full=True):
+
+ if not isinstance(xrns, types.ListType):
+ xrns = [xrns]
+ # try to infer type if not set and we get a single input
+ if not type:
+ type = Xrn(xrns).get_type()
+ hrns = [urn_to_hrn(xrn)[0] for xrn in xrns]
+ # load all known registry names into a prefix tree and attempt to find
+ # the longest matching prefix
+ # create a dict where key is a registry hrn and its value is a
+ # hrns at that registry (determined by the known prefix tree).
+ xrn_dict = {}
+ registries = api.registries
+ tree = prefixTree()
+ registry_hrns = registries.keys()
+ tree.load(registry_hrns)
+ for xrn in xrns:
+ registry_hrn = tree.best_match(urn_to_hrn(xrn)[0])
+ if registry_hrn not in xrn_dict:
+ xrn_dict[registry_hrn] = []
+ xrn_dict[registry_hrn].append(xrn)
+
+ records = []
+ for registry_hrn in xrn_dict:
+ # skip the hrn without a registry hrn
+ # XX should we let the user know the authority is unknown?
+ if not registry_hrn:
+ continue
+
+ # if the best match (longest matching hrn) is not the local registry,
+ # forward the request
+ xrns = xrn_dict[registry_hrn]
+ if registry_hrn != api.hrn:
+ credential = api.getCredential()
+ interface = api.registries[registry_hrn]
+ server_proxy = api.server_proxy(interface, credential)
+ peer_records = server_proxy.Resolve(xrns, credential)
+ records.extend([SfaRecord(dict=record).as_dict() for record in peer_records])
+
+ # try resolving the remaining unfound records at the local registry
+ local_hrns = list ( set(hrns).difference([record['hrn'] for record in records]) )
+ #
+ table = SfaTable()
+ local_records = table.findObjects({'hrn': local_hrns})
+
+ if full:
+ # in full mode we get as much info as we can, which involves contacting the
+ # testbed for getting implementation details about the record
+ self.driver.augment_records_with_testbed_info(local_records)
+ # also we fill the 'url' field for known authorities
+ # used to be in the driver code, sounds like a poorman thing though
+ def solve_neighbour_url (record):
+ if not record['type'].startswith('authority'): return
+ hrn=record['hrn']
+ for neighbour_dict in [ api.aggregates, api.registries ]:
+ if hrn in neighbour_dict:
+ record['url']=neighbour_dict[hrn].get_url()
+ return
+ [ solve_neighbour_url (record) for record in local_records ]
+
+
+
+ # convert local record objects to dicts
+ records.extend([dict(record) for record in local_records])
+ if type:
+ records = filter(lambda rec: rec['type'] in [type], records)
+
+ if not records:
+ raise RecordNotFound(str(hrns))
+
+ return records
+
+ def List(self, api, xrn, origin_hrn=None):
+ hrn, type = urn_to_hrn(xrn)
+ # load all know registry names into a prefix tree and attempt to find
+ # the longest matching prefix
+ records = []
+ registries = api.registries
+ registry_hrns = registries.keys()
+ tree = prefixTree()
+ tree.load(registry_hrns)
+ registry_hrn = tree.best_match(hrn)
+
+ #if there was no match then this record belongs to an unknow registry
+ if not registry_hrn:
+ raise MissingAuthority(xrn)
+ # if the best match (longest matching hrn) is not the local registry,
+ # forward the request
+ records = []
+ if registry_hrn != api.hrn:
+ credential = api.getCredential()
+ interface = api.registries[registry_hrn]
+ server_proxy = api.server_proxy(interface, credential)
+ record_list = server_proxy.List(xrn, credential)
+ records = [SfaRecord(dict=record).as_dict() for record in record_list]
+
+ # if we still have not found the record yet, try the local registry
+ if not records:
+ if not api.auth.hierarchy.auth_exists(hrn):
+ raise MissingAuthority(hrn)
+
+ table = SfaTable()
+ records = table.find({'authority': hrn})
+
+ return records
+
+
+ def CreateGid(self, api, xrn, cert):
+ # get the authority
+ authority = Xrn(xrn=xrn).get_authority_hrn()
+ auth_info = api.auth.get_auth_info(authority)
+ if not cert:
+ pkey = Keypair(create=True)
+ else:
+ certificate = Certificate(string=cert)
+ pkey = certificate.get_pubkey()
+ gid = api.auth.hierarchy.create_gid(xrn, create_uuid(), pkey)
+ return gid.save_to_string(save_parents=True)
+
+ ####################
+ # utility for handling relationships among the SFA objects
+ # given that the SFA db does not handle this sort of relationsships
+ # it will rely on side-effects in the testbed to keep this persistent
+
+ # subject_record describes the subject of the relationships
+ # ref_record contains the target values for the various relationships we need to manage
+ # (to begin with, this is just the slice x person relationship)
+ def update_relations (self, subject_record, ref_record):
+ type=subject_record['type']
+ if type=='slice':
+ self.update_relation(subject_record, 'researcher', ref_record.get('researcher'), 'user')
+
+ # field_key is the name of one field in the record, typically 'researcher' for a 'slice' record
+ # hrns is the list of hrns that should be linked to the subject from now on
+ # target_type would be e.g. 'user' in the 'slice' x 'researcher' example
+ def update_relation (self, sfa_record, field_key, hrns, target_type):
+ # locate the linked objects in our db
+ subject_type=sfa_record['type']
+ subject_id=sfa_record['pointer']
+ table = SfaTable()
+ link_sfa_records = table.find ({'type':target_type, 'hrn': hrns})
+ link_ids = [ rec.get('pointer') for rec in link_sfa_records ]
+ self.driver.update_relation (subject_type, target_type, subject_id, link_ids)
+
+
+ def Register(self, api, record):
+
+ hrn, type = record['hrn'], record['type']
+ urn = hrn_to_urn(hrn,type)
+ # validate the type
+ if type not in ['authority', 'slice', 'node', 'user']:
+ raise UnknownSfaType(type)
+
+ # check if record already exists
+ table = SfaTable()
+ existing_records = table.find({'type': type, 'hrn': hrn})
+ if existing_records:
+ raise ExistingRecord(hrn)
+
+ record = SfaRecord(dict = record)
+ record['authority'] = get_authority(record['hrn'])
+ auth_info = api.auth.get_auth_info(record['authority'])
+ pub_key = None
+ # make sure record has a gid
+ if 'gid' not in record:
+ uuid = create_uuid()
+ pkey = Keypair(create=True)
+ if 'keys' in record and record['keys']:
+ pub_key=record['keys']
+ # use only first key in record
+ if isinstance(record['keys'], types.ListType):
+ pub_key = record['keys'][0]
+ pkey = convert_public_key(pub_key)
+
+ gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
+ gid = gid_object.save_to_string(save_parents=True)
+ record['gid'] = gid
+ record.set_gid(gid)
+
+ if type in ["authority"]:
+ # update the tree
+ if not api.auth.hierarchy.auth_exists(hrn):
+ api.auth.hierarchy.create_auth(hrn_to_urn(hrn,'authority'))
+
+ # get the GID from the newly created authority
+ gid = auth_info.get_gid_object()
+ record.set_gid(gid.save_to_string(save_parents=True))
+
+ # update testbed-specific data if needed
+ pointer = self.driver.register (record, hrn, pub_key)
+
+ record.set_pointer(pointer)
+ record_id = table.insert(record)
+ record['record_id'] = record_id
+
+ # update membership for researchers, pis, owners, operators
+ self.update_relations (record, record)
+
+ return record.get_gid_object().save_to_string(save_parents=True)
+
+ def Update(self, api, record_dict):
+ new_record = SfaRecord(dict = record_dict)
+ type = new_record['type']
+ hrn = new_record['hrn']
+ urn = hrn_to_urn(hrn,type)
+ table = SfaTable()
+ # make sure the record exists
+ records = table.findObjects({'type': type, 'hrn': hrn})
+ if not records:
+ raise RecordNotFound(hrn)
+ record = records[0]
+ record['last_updated'] = time.gmtime()
+
+ # validate the type
+ if type not in ['authority', 'slice', 'node', 'user']:
+ raise UnknownSfaType(type)
+
+ # Use the pointer from the existing record, not the one that the user
+ # gave us. This prevents the user from inserting a forged pointer
+ pointer = record['pointer']
+
+ # is the a change in keys ?
+ new_key=None
+ if type=='user':
+ if 'keys' in new_record and new_record['keys']:
+ new_key=new_record['keys']
+ if isinstance (new_key,types.ListType):
+ new_key=new_key[0]
+
+ # update the PLC information that was specified with the record
+ if not self.driver.update (record, new_record, hrn, new_key):
+ logger.warning("driver.update failed")
+
+ # take new_key into account
+ if new_key:
+ # update the openssl key and gid
+ pkey = convert_public_key(new_key)
+ uuid = create_uuid()
+ gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
+ gid = gid_object.save_to_string(save_parents=True)
+ record['gid'] = gid
+ record = SfaRecord(dict=record)
+ table.update(record)
+
+ # update membership for researchers, pis, owners, operators
+ self.update_relations (record, new_record)
+
+ return 1
+
+ # expecting an Xrn instance
+ def Remove(self, api, xrn, origin_hrn=None):
+
+ table = SfaTable()
+ filter = {'hrn': xrn.get_hrn()}
+ hrn=xrn.get_hrn()
+ type=xrn.get_type()
+ if type and type not in ['all', '*']:
+ filter['type'] = type
+
+ records = table.find(filter)
+ if not records: raise RecordNotFound(hrn)
+ record = records[0]
+ type = record['type']
+
+ if type not in ['slice', 'user', 'node', 'authority'] :
+ raise UnknownSfaType(type)
+
+ credential = api.getCredential()
+ registries = api.registries
+
+ # Try to remove the object from the PLCDB of federated agg.
+ # This is attempted before removing the object from the local agg's PLCDB and sfa table
+ if hrn.startswith(api.hrn) and type in ['user', 'slice', 'authority']:
+ for registry in registries:
+ if registry not in [api.hrn]:
+ try:
+ result=registries[registry].remove_peer_object(credential, record, origin_hrn)
+ except:
+ pass
+
+ # call testbed callback first
+ # IIUC this is done on the local testbed TOO because of the refreshpeer link
+ if not self.driver.remove(record):
+ logger.warning("driver.remove failed")
+
+ # delete from sfa db
+ table.remove(record)
+
+ return 1
+
+ # This is a PLC-specific thing...
+ def get_key_from_incoming_ip (self, api):
+ # verify that the callers's ip address exist in the db and is an interface
+ # for a node in the db
+ (ip, port) = api.remote_addr
+ interfaces = self.driver.shell.GetInterfaces({'ip': ip}, ['node_id'])
+ if not interfaces:
+ raise NonExistingRecord("no such ip %(ip)s" % locals())
+ nodes = self.driver.shell.GetNodes([interfaces[0]['node_id']], ['node_id', 'hostname'])
+ if not nodes:
+ raise NonExistingRecord("no such node using ip %(ip)s" % locals())
+ node = nodes[0]
+
+ # look up the sfa record
+ table = SfaTable()
+ records = table.findObjects({'type': 'node', 'pointer': node['node_id']})
+ if not records:
+ raise RecordNotFound("pointer:" + str(node['node_id']))
+ record = records[0]
+
+ # generate a new keypair and gid
+ uuid = create_uuid()
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(record['hrn'], record['type'])
+ gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
+ gid = gid_object.save_to_string(save_parents=True)
+ record['gid'] = gid
+ record.set_gid(gid)
+
+ # update the record
+ table.update(record)
+
+ # attempt the scp the key
+ # and gid onto the node
+ # this will only work for planetlab based components
+ (kfd, key_filename) = tempfile.mkstemp()
+ (gfd, gid_filename) = tempfile.mkstemp()
+ pkey.save_to_file(key_filename)
+ gid_object.save_to_file(gid_filename, save_parents=True)
+ host = node['hostname']
+ key_dest="/etc/sfa/node.key"
+ gid_dest="/etc/sfa/node.gid"
+ scp = "/usr/bin/scp"
+ #identity = "/etc/planetlab/root_ssh_key.rsa"
+ identity = "/etc/sfa/root_ssh_key"
+ scp_options=" -i %(identity)s " % locals()
+ scp_options+="-o StrictHostKeyChecking=no " % locals()
+ scp_key_command="%(scp)s %(scp_options)s %(key_filename)s root@%(host)s:%(key_dest)s" %\
+ locals()
+ scp_gid_command="%(scp)s %(scp_options)s %(gid_filename)s root@%(host)s:%(gid_dest)s" %\
+ locals()
+
+ all_commands = [scp_key_command, scp_gid_command]
+
+ for command in all_commands:
+ (status, output) = commands.getstatusoutput(command)
+ if status:
+ raise Exception, output
+
+ for filename in [key_filename, gid_filename]:
+ os.unlink(filename)
+
+ return 1
forward_options['rspec_version'] = version_manager.get_version('SFA 1').to_dict()
else:
forward_options['rspec_version'] = version_manager.get_version('ProtoGENI 2').to_dict()
+ forward_options['geni_rspec_version'] = {'type': 'geni', 'version': '3.0'}
rspec = server.ListResources(credential, forward_options)
return {"aggregate": aggregate, "rspec": rspec, "elapsed": time.time()-tStart, "status": "success"}
except Exception, e:
call_id = options.get('call_id')
if Callids().already_handled(call_id): return True
- def _RenewSliver(server, xrn, creds, expiration_time, options):
- return server.RenewSliver(xrn, creds, expiration_time, options)
-
- (hrn, type) = urn_to_hrn(xrn)
+ def _RenewSliver(aggregate, server, xrn, creds, expiration_time, options):
+ try:
+ result=server.RenewSliver(xrn, creds, expiration_time, options)
+ if type(result)!=dict:
+ result = {"code": {"geni_code": 0}, value: result}
+ result["aggregate"] = aggregate
+ return result
+ except:
+ logger.log_exc('Something wrong in _RenewSliver with URL %s'%server.url)
+ return {"aggregate": aggregate, "exc_info": traceback.format_exc(), "code": {"geni_code": -1}, "value": False, "output": ""}
+
+ (hrn, urn_type) = urn_to_hrn(xrn)
# get the callers hrn
valid_cred = api.auth.checkCredentials(creds, 'renewsliver', hrn)[0]
caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
-
+
# attempt to use delegated credential first
cred = api.getDelegatedCredential(creds)
if not cred:
- cred = api.getCredential()
+ cred = api.getCredential(minimumExpiration=31*86400)
threads = ThreadManager()
for aggregate in api.aggregates:
# prevent infinite loop. Dont send request back to caller
continue
interface = api.aggregates[aggregate]
server = api.server_proxy(interface, cred)
- threads.run(_RenewSliver, server, xrn, [cred], expiration_time, options)
- # 'and' the results
- results = [ReturnValue.get_value(result) for result in threads.get_results()]
- return reduce (lambda x,y: x and y, results , True)
-
+ threads.run(_RenewSliver, aggregate, server, xrn, [cred], expiration_time, options)
+
+ results = threads.get_results()
+
+ geni_code = 0
+ geni_output = ",".join([x.get("output","") for x in results])
+ geni_value = reduce (lambda x,y: x and y, [result.get("value",False) for result in results], True)
+ for agg_result in results:
+ agg_geni_code = agg_result["code"].get("geni_code",0)
+ if agg_geni_code:
+ geni_code = agg_geni_code
+
+ results = {"aggregates": results, "code": {"geni_code": geni_code}, "value": geni_value, "output": geni_output}
+
+ return results
+
def DeleteSliver(self, api, xrn, creds, options):
- call_id = options.get('call_id')
+ call_id = options.get('call_id')
if Callids().already_handled(call_id): return ""
def _DeleteSliver(server, xrn, creds, options):
# get the callers hrn
valid_cred = api.auth.checkCredentials(creds, 'deletesliver', hrn)[0]
caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
-
+
# attempt to use delegated credential first
cred = api.getDelegatedCredential(creds)
if not cred:
--- /dev/null
+import time
+import datetime
+#
+from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
+ RecordNotFound, SfaNotImplemented, SliverDoesNotExist
+from sfa.util.sfalogging import logger
+from sfa.util.defaultdict import defaultdict
+from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
+from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf, urn_to_sliver_id
+from sfa.util.cache import Cache
+# one would think the driver should not need to mess with the SFA db, but..
+from sfa.storage.table import SfaTable
+# used to be used in get_ticket
+#from sfa.trust.sfaticket import SfaTicket
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec
+# the driver interface, mostly provides default behaviours
+from sfa.managers.driver import Driver
+from sfa.openstack.openstack_shell import OpenstackShell
+import sfa.plc.peers as peers
+from sfa.plc.plaggregate import PlAggregate
+from sfa.plc.plslices import PlSlices
+from sfa.util.plxrn import slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename, hrn_to_pl_login_base
+
+
+def list_to_dict(recs, key):
+ """
+ convert a list of dictionaries into a dictionary keyed on the
+ specified dictionary key
+ """
+ return dict ( [ (rec[key],rec) for rec in recs ] )
+
+#
+# PlShell is just an xmlrpc serverproxy where methods
+# can be sent as-is; it takes care of authentication
+# from the global config
+#
+class OpenstackDriver (Driver):
+
+ # the cache instance is a class member so it survives across incoming requests
+ cache = None
+
+ def __init__ (self, config):
+ Driver.__init__ (self, config)
+ self.shell = OpenstackShell (config)
+ self.cache=None
+ if config.SFA_AGGREGATE_CACHING:
+ if OpenstackDriver.cache is None:
+ OpenstackDriver.cache = Cache()
+ self.cache = OpenstackDriver.cache
+
+ ########################################
+ ########## registry oriented
+ ########################################
+
+ ########## disabled users
+ def is_enabled (self, record):
+ # all records are enabled
+ return True
+
+ def augment_records_with_testbed_info (self, sfa_records):
+ return self.fill_record_info (sfa_records)
+
+ ##########
+ def register (self, sfa_record, hrn, pub_key):
+ type = sfa_record['type']
+ pl_record = self.sfa_fields_to_pl_fields(type, hrn, sfa_record)
+
+ if type == 'slice':
+ acceptable_fields=['url', 'instantiation', 'name', 'description']
+ # add slice description, name, researchers, PI
+ pass
+
+ elif type == 'user':
+ # add person roles, projects and keys
+ pass
+ return pointer
+
+ ##########
+ # xxx actually old_sfa_record comes filled with plc stuff as well in the original code
+ def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
+ pointer = old_sfa_record['pointer']
+ type = old_sfa_record['type']
+
+ # new_key implemented for users only
+ if new_key and type not in [ 'user' ]:
+ raise UnknownSfaType(type)
+
+ elif type == "slice":
+ # can update description, researchers and PI
+ pass
+ elif type == "user":
+ # can update slices, keys and roles
+ pass
+ return True
+
+
+ ##########
+ def remove (self, sfa_record):
+ type=sfa_record['type']
+ name = Xrn(sfa_record['hrn']).get_leaf()
+ if type == 'user':
+ if self.shell.user_get(name):
+ self.shell.user_delete(name)
+ elif type == 'slice':
+ if self.shell.project_get(name):
+ self.shell.project_delete(name)
+ return True
+
+
+ ####################
+ def fill_record_info(self, records):
+ """
+ Given a (list of) SFA record, fill in the PLC specific
+ and SFA specific fields in the record.
+ """
+ if not isinstance(records, list):
+ records = [records]
+
+ for record in records:
+ name = Xrn(record['hrn']).get_leaf()
+ os_record = None
+ if record['type'] == 'user':
+ os_record = self.shell.user_get(name)
+ record['slices'] = [self.hrn + "." + proj.name for \
+ proj in os_record.projects]
+ record['roles'] = [role for role in os_record.roles]
+ keys = self.shell.key_pair_get_all_by_user(name)
+ record['keys'] = [key.public_key for key in keys]
+ elif record['type'] == 'slice':
+ os_record = self.shell.project_get(name)
+ record['description'] = os_record.description
+ record['PI'] = self.hrn + "." + os_record.project_manager
+ record['geni_creator'] = record['PI']
+ record['researcher'] = [self.hrn + "." + user.name for \
+ user in os_record.members]
+ else:
+ continue
+ record['geni_urn'] = hrn_to_urn(record['hrn'], record['type'])
+ record['geni_certificate'] = record['gid']
+ record['name'] = os_record.name
+ if os_record.created_at is not None:
+ record['date_created'] = datetime_to_string(utcparse(os_record.created_at))
+ if os_record.updated_at is not None:
+ record['last_updated'] = datetime_to_string(utcparse(os_record.updated_at))
+
+ return records
+
+
+ ####################
+ # plcapi works by changes, compute what needs to be added/deleted
+ def update_relation (self, subject_type, target_type, subject_id, target_ids):
+ # hard-wire the code for slice/user for now, could be smarter if needed
+ if subject_type =='slice' and target_type == 'user':
+ subject=self.shell.project_get(subject_id)[0]
+ current_target_ids = [user.name for user in subject.members]
+ add_target_ids = list ( set (target_ids).difference(current_target_ids))
+ del_target_ids = list ( set (current_target_ids).difference(target_ids))
+ logger.debug ("subject_id = %s (type=%s)"%(subject_id,type(subject_id)))
+ for target_id in add_target_ids:
+ self.shell.project_add_member(target_id,subject_id)
+ logger.debug ("add_target_id = %s (type=%s)"%(target_id,type(target_id)))
+ for target_id in del_target_ids:
+ logger.debug ("del_target_id = %s (type=%s)"%(target_id,type(target_id)))
+ self.shell.project_remove_member(target_id, subject_id)
+ else:
+ logger.info('unexpected relation to maintain, %s -> %s'%(subject_type,target_type))
+
+
+ ########################################
+ ########## aggregate oriented
+ ########################################
+
+ def testbed_name (self): return "openstack"
+
+ # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
+ def aggregate_version (self):
+ version_manager = VersionManager()
+ ad_rspec_versions = []
+ request_rspec_versions = []
+ for rspec_version in version_manager.versions:
+ if rspec_version.content_type in ['*', 'ad']:
+ ad_rspec_versions.append(rspec_version.to_dict())
+ if rspec_version.content_type in ['*', 'request']:
+ request_rspec_versions.append(rspec_version.to_dict())
+ return {
+ 'testbed':self.testbed_name(),
+ 'geni_request_rspec_versions': request_rspec_versions,
+ 'geni_ad_rspec_versions': ad_rspec_versions,
+ }
+
+ def list_slices (self, creds, options):
+ # look in cache first
+ if self.cache:
+ slices = self.cache.get('slices')
+ if slices:
+ logger.debug("PlDriver.list_slices returns from cache")
+ return slices
+
+ # get data from db
+ slices = self.shell.GetSlices({'peer_id': None}, ['name'])
+ slice_hrns = [slicename_to_hrn(self.hrn, slice['name']) for slice in slices]
+ slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
+
+ # cache the result
+ if self.cache:
+ logger.debug ("PlDriver.list_slices stores value in cache")
+ self.cache.add('slices', slice_urns)
+
+ return slice_urns
+
+ # first 2 args are None in case of resource discovery
+ def list_resources (self, slice_urn, slice_hrn, creds, options):
+ cached_requested = options.get('cached', True)
+
+ version_manager = VersionManager()
+ # get the rspec's return format from options
+ rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
+ version_string = "rspec_%s" % (rspec_version)
+
+ #panos adding the info option to the caching key (can be improved)
+ if options.get('info'):
+ version_string = version_string + "_"+options.get('info', 'default')
+
+ # look in cache first
+ if cached_requested and self.cache and not slice_hrn:
+ rspec = self.cache.get(version_string)
+ if rspec:
+ logger.debug("PlDriver.ListResources: returning cached advertisement")
+ return rspec
+
+ #panos: passing user-defined options
+ #print "manager options = ",options
+ aggregate = PlAggregate(self)
+ rspec = aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version,
+ options=options)
+
+ # cache the result
+ if self.cache and not slice_hrn:
+ logger.debug("PlDriver.ListResources: stores advertisement in cache")
+ self.cache.add(version_string, rspec)
+
+ return rspec
+
+ def sliver_status (self, slice_urn, slice_hrn):
+ # find out where this slice is currently running
+ slicename = hrn_to_pl_slicename(slice_hrn)
+
+ slices = self.shell.GetSlices([slicename], ['slice_id', 'node_ids','person_ids','name','expires'])
+ if len(slices) == 0:
+ raise SliverDoesNotExist("%s (used %s as slicename internally)" % (slice_hrn, slicename))
+ slice = slices[0]
+
+ # report about the local nodes only
+ nodes = self.shell.GetNodes({'node_id':slice['node_ids'],'peer_id':None},
+ ['node_id', 'hostname', 'site_id', 'boot_state', 'last_contact'])
+
+ if len(nodes) == 0:
+ raise SliverDoesNotExist("You have not allocated any slivers here")
+
+ site_ids = [node['site_id'] for node in nodes]
+
+ result = {}
+ top_level_status = 'unknown'
+ if nodes:
+ top_level_status = 'ready'
+ result['geni_urn'] = slice_urn
+ result['pl_login'] = slice['name']
+ result['pl_expires'] = datetime_to_string(utcparse(slice['expires']))
+
+ resources = []
+ for node in nodes:
+ res = {}
+ res['pl_hostname'] = node['hostname']
+ res['pl_boot_state'] = node['boot_state']
+ res['pl_last_contact'] = node['last_contact']
+ if node['last_contact'] is not None:
+
+ res['pl_last_contact'] = datetime_to_string(utcparse(node['last_contact']))
+ sliver_id = urn_to_sliver_id(slice_urn, slice['slice_id'], node['node_id'])
+ res['geni_urn'] = sliver_id
+ if node['boot_state'] == 'boot':
+ res['geni_status'] = 'ready'
+ else:
+ res['geni_status'] = 'failed'
+ top_level_status = 'failed'
+
+ res['geni_error'] = ''
+
+ resources.append(res)
+
+ result['geni_status'] = top_level_status
+ result['geni_resources'] = resources
+ return result
+
+ def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
+
+ aggregate = PlAggregate(self)
+ slices = PlSlices(self)
+ peer = slices.get_peer(slice_hrn)
+ sfa_peer = slices.get_sfa_peer(slice_hrn)
+ slice_record=None
+ if users:
+ slice_record = users[0].get('slice_record', {})
+
+ # parse rspec
+ rspec = RSpec(rspec_string)
+ requested_attributes = rspec.version.get_slice_attributes()
+
+ # ensure site record exists
+ site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer, options=options)
+ # ensure slice record exists
+ slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer, options=options)
+ # ensure person records exists
+ persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer, options=options)
+ # ensure slice attributes exists
+ slices.verify_slice_attributes(slice, requested_attributes, options=options)
+
+ # add/remove slice from nodes
+ requested_slivers = [node.get('component_name') for node in rspec.version.get_nodes_with_slivers()]
+ nodes = slices.verify_slice_nodes(slice, requested_slivers, peer)
+
+ # add/remove links links
+ slices.verify_slice_links(slice, rspec.version.get_link_requests(), nodes)
+
+ # handle MyPLC peer association.
+ # only used by plc and ple.
+ slices.handle_peer(site, slice, persons, peer)
+
+ return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
+
+ def delete_sliver (self, slice_urn, slice_hrn, creds, options):
+ slicename = hrn_to_pl_slicename(slice_hrn)
+ slices = self.shell.GetSlices({'name': slicename})
+ if not slices:
+ return 1
+ slice = slices[0]
+
+ # determine if this is a peer slice
+ # xxx I wonder if this would not need to use PlSlices.get_peer instead
+ # in which case plc.peers could be deprecated as this here
+ # is the only/last call to this last method in plc.peers
+ peer = peers.get_peer(self, slice_hrn)
+ try:
+ if peer:
+ self.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
+ self.shell.DeleteSliceFromNodes(slicename, slice['node_ids'])
+ finally:
+ if peer:
+ self.shell.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
+ return 1
+
+ def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
+ slicename = hrn_to_pl_slicename(slice_hrn)
+ slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
+ if not slices:
+ raise RecordNotFound(slice_hrn)
+ slice = slices[0]
+ requested_time = utcparse(expiration_time)
+ record = {'expires': int(datetime_to_epoch(requested_time))}
+ try:
+ self.shell.UpdateSlice(slice['slice_id'], record)
+ return True
+ except:
+ return False
+
+ # remove the 'enabled' tag
+ def start_slice (self, slice_urn, slice_hrn, creds):
+ slicename = hrn_to_pl_slicename(slice_hrn)
+ slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
+ if not slices:
+ raise RecordNotFound(slice_hrn)
+ slice_id = slices[0]['slice_id']
+ slice_tags = self.shell.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'}, ['slice_tag_id'])
+ # just remove the tag if it exists
+ if slice_tags:
+ self.shell.DeleteSliceTag(slice_tags[0]['slice_tag_id'])
+ return 1
+
+ # set the 'enabled' tag to 0
+ def stop_slice (self, slice_urn, slice_hrn, creds):
+ slicename = hrn_to_pl_slicename(slice_hrn)
+ slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
+ if not slices:
+ raise RecordNotFound(slice_hrn)
+ slice_id = slices[0]['slice_id']
+ slice_tags = self.shell.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'})
+ if not slice_tags:
+ self.shell.AddSliceTag(slice_id, 'enabled', '0')
+ elif slice_tags[0]['value'] != "0":
+ tag_id = slice_tags[0]['slice_tag_id']
+ self.shell.UpdateSliceTag(tag_id, '0')
+ return 1
+
+ def reset_slice (self, slice_urn, slice_hrn, creds):
+ raise SfaNotImplemented ("reset_slice not available at this interface")
+
+ # xxx this code is quite old and has not run for ages
+ # it is obviously totally broken and needs a rewrite
+ def get_ticket (self, slice_urn, slice_hrn, creds, rspec_string, options):
+ raise SfaNotImplemented,"PlDriver.get_ticket needs a rewrite"
+# please keep this code for future reference
+# slices = PlSlices(self)
+# peer = slices.get_peer(slice_hrn)
+# sfa_peer = slices.get_sfa_peer(slice_hrn)
+#
+# # get the slice record
+# credential = api.getCredential()
+# interface = api.registries[api.hrn]
+# registry = api.server_proxy(interface, credential)
+# records = registry.Resolve(xrn, credential)
+#
+# # make sure we get a local slice record
+# record = None
+# for tmp_record in records:
+# if tmp_record['type'] == 'slice' and \
+# not tmp_record['peer_authority']:
+# #Error (E0602, GetTicket): Undefined variable 'SliceRecord'
+# slice_record = SliceRecord(dict=tmp_record)
+# if not record:
+# raise RecordNotFound(slice_hrn)
+#
+# # similar to CreateSliver, we must verify that the required records exist
+# # at this aggregate before we can issue a ticket
+# # parse rspec
+# rspec = RSpec(rspec_string)
+# requested_attributes = rspec.version.get_slice_attributes()
+#
+# # ensure site record exists
+# site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer)
+# # ensure slice record exists
+# slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer)
+# # ensure person records exists
+# # xxx users is undefined in this context
+# persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer)
+# # ensure slice attributes exists
+# slices.verify_slice_attributes(slice, requested_attributes)
+#
+# # get sliver info
+# slivers = slices.get_slivers(slice_hrn)
+#
+# if not slivers:
+# raise SliverDoesNotExist(slice_hrn)
+#
+# # get initscripts
+# initscripts = []
+# data = {
+# 'timestamp': int(time.time()),
+# 'initscripts': initscripts,
+# 'slivers': slivers
+# }
+#
+# # create the ticket
+# object_gid = record.get_gid_object()
+# new_ticket = SfaTicket(subject = object_gid.get_subject())
+# new_ticket.set_gid_caller(api.auth.client_gid)
+# new_ticket.set_gid_object(object_gid)
+# new_ticket.set_issuer(key=api.key, subject=self.hrn)
+# new_ticket.set_pubkey(object_gid.get_pubkey())
+# new_ticket.set_attributes(data)
+# new_ticket.set_rspec(rspec)
+# #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
+# new_ticket.encode()
+# new_ticket.sign()
+#
+# return new_ticket.save_to_string(save_parents=True)
--- /dev/null
+import sys
+import xmlrpclib
+import socket
+from urlparse import urlparse
+
+from sfa.util.sfalogging import logger
+
+class OpenstackShell:
+ """
+ A simple xmlrpc shell to a myplc instance
+ This class can receive all Openstack calls to the underlying testbed
+ """
+
+ # dont care about limiting calls yet
+ direct_calls = []
+ alias_calls = {}
+
+
+ # use the 'capability' auth mechanism for higher performance when the PLC db is local
+ def __init__ ( self, config ) :
+ url = config.SFA_PLC_URL
+ # try to figure if the url is local
+ hostname=urlparse(url).hostname
+ is_local=False
+ if hostname == 'localhost': is_local=True
+ # otherwise compare IP addresses;
+ # this might fail for any number of reasons, so let's harden that
+ try:
+ # xxx todo this seems to result in a DNS request for each incoming request to the AM
+ # should be cached or improved
+ url_ip=socket.gethostbyname(hostname)
+ local_ip=socket.gethostbyname(socket.gethostname())
+ if url_ip==local_ip: is_local=True
+ except:
+ pass
+
+
+ # Openstack provides a RESTful api but it is very limited, so we will
+ # ignore it for now and always use the native openstack (nova) library.
+ # This of course will not work if sfa is not installed on the same machine
+ # as the openstack-compute package.
+ if is_local:
+ try:
+ from nova.auth.manager import AuthManager, db, context
+ direct_access=True
+ except:
+ direct_access=False
+ if is_local and direct_access:
+
+ logger.debug('openstack access - native')
+ self.auth = context.get_admin_context()
+ # AuthManager isnt' really useful for much yet but it's
+ # more convenient to use than the db reference which requires
+ # a context. Lets hold onto the AuthManager reference for now.
+ #self.proxy = AuthManager()
+ self.auth_manager = AuthManager()
+ self.proxy = db
+
+ else:
+ self.auth = None
+ self.proxy = None
+ logger.debug('openstack access - REST')
+ raise SfaNotImplemented('openstack access - Rest')
+
+ def __getattr__(self, name):
+ def func(*args, **kwds):
+ result=getattr(self.proxy, name)(self.auth, *args, **kwds)
+ return result
+ return func
from sfa.client.return_value import ReturnValue
-# thgen xxx fixme this is wrong all right, but temporary, will use generic
-from sfa.storage.table import SfaTable
####################
class SfaApi (XmlrpcApi):
return server
- def getCredential(self):
+ def getCredential(self, minimumExpiration=0):
"""
- Return a valid credential for this interface.
+ Return a valid credential for this interface.
"""
type = 'authority'
path = self.config.SFA_DATA_DIR
cred = Credential(filename = cred_filename)
# make sure cred isnt expired
if not cred.get_expiration or \
- datetime.datetime.utcnow() < cred.get_expiration():
+ datetime.datetime.utcnow() + datetime.timedelta(seconds=minimumExpiration) < cred.get_expiration():
return cred.save_to_string(save_parents=True)
# get a new credential
auth_hrn = hrn
auth_info = self.auth.get_auth_info(auth_hrn)
# xxx thgen fixme - use SfaTable hardwired for now
- #table = self.SfaTable()
+ # thgen xxx fixme this is wrong all right, but temporary, will use generic
+ from sfa.storage.table import SfaTable
table = SfaTable()
records = table.findObjects({'hrn': hrn, 'type': 'authority+sa'})
if not records:
"""
from sfa.storage.table import SfaTable
table = SfaTable()
+ filter = {}
if self.get('record_id'):
filter['record_id'] = self.get('record_id')
if self.get('hrn') and self.get('type'):
filter['type'] = self.get('type')
if self.get('pointer'):
filter['pointer'] = self.get('pointer')
- existing_records = table.find(filter)
- for record in existing_records:
- table.remove(record)
+ if filter:
+ existing_records = table.find(filter)
+ for record in existing_records:
+ table.remove(record)
class UserRecord(SfaRecord):
from sfa.util.xrn import urn_to_hrn, hrn_authfor_hrn
# 2 weeks, in seconds
-DEFAULT_CREDENTIAL_LIFETIME = 86400 * 14
+DEFAULT_CREDENTIAL_LIFETIME = 86400 * 31
# TODO: