From: Sandrine Avakian Date: Wed, 7 Mar 2012 09:10:48 +0000 (+0100) Subject: Cleaning up unused senslab files. X-Git-Tag: sfa-2.1-24~3^2~212 X-Git-Url: http://git.onelab.eu/?p=sfa.git;a=commitdiff_plain;h=a5c00c5bbd90082f705877fb0167d744e6e5ffa2 Cleaning up unused senslab files. --- diff --git a/sfa/managers/aggregate_manager_slab.py b/sfa/managers/aggregate_manager_slab.py deleted file mode 100644 index 7a483d31..00000000 --- a/sfa/managers/aggregate_manager_slab.py +++ /dev/null @@ -1,418 +0,0 @@ -import datetime -import time -import sys - -from sfa.util.sfalogging import logger -from sfa.util.faults import RecordNotFound, SliverDoesNotExist -from sfa.util.xrn import get_authority, hrn_to_urn, urn_to_hrn, Xrn, urn_to_sliver_id -from sfa.util.plxrn import slicename_to_hrn, hrn_to_pl_slicename -from sfa.util.version import version_core -from sfa.util.sfatime import utcparse -from sfa.util.callids import Callids - -from sfa.trust.sfaticket import SfaTicket -from sfa.trust.credential import Credential -from sfa.rspecs.version_manager import VersionManager -from sfa.rspecs.rspec import RSpec - -from sfa.server.sfaapi import SfaApi -from sfa.senslab.slabaggregate import SlabAggregate -import sfa.plc.peers as peers - -from sfa.senslab.slabslices import SlabSlices - -class AggregateManager: - def __init__ (self, config): pass - # essentially a union of the core version, the generic version (this code) and - # whatever the driver needs to expose - - - def GetVersion(self, api, options): - - xrn=Xrn(api.hrn) - version = version_core() - version_generic = {'interface':'aggregate', - 'sfa': 2, - 'geni_api': 2, - 'hrn':xrn.get_hrn(), - 'urn':xrn.get_urn(), - } - version.update(version_generic) - testbed_version = self.driver.aggregate_version() - version.update(testbed_version) - return version - - #def GetVersion(self, api, options={}): - - #version_manager = VersionManager() - #ad_rspec_versions = [] - #request_rspec_versions = [] - #for rspec_version in version_manager.versions: - #if rspec_version.content_type in ['*', 'ad']: - #ad_rspec_versions.append(rspec_version.to_dict()) - #if rspec_version.content_type in ['*', 'request']: - #request_rspec_versions.append(rspec_version.to_dict()) - #xrn=Xrn(api.hrn) - #version_more = {'interface':'aggregate', - #'sfa': 2, - #'geni_api': api.config.SFA_AGGREGATE_API_VERSION, - #'testbed':'myplc', - #'hrn':xrn.get_hrn(), - #'geni_request_rspec_versions': request_rspec_versions, - #'geni_ad_rspec_versions': ad_rspec_versions, - #} - #return version_core(version_more) - - def _get_registry_objects(self, slice_xrn, creds, users): - """ - - """ - hrn, _ = urn_to_hrn(slice_xrn) - - hrn_auth = get_authority(hrn) - - # Build up objects that an SFA registry would return if SFA - # could contact the slice's registry directly - reg_objects = None - - if users: - # dont allow special characters in the site login base - #only_alphanumeric = re.compile('[^a-zA-Z0-9]+') - #login_base = only_alphanumeric.sub('', hrn_auth[:20]).lower() - slicename = hrn_to_pl_slicename(hrn) - login_base = slicename.split('_')[0] - reg_objects = {} - site = {} - site['site_id'] = 0 - site['name'] = 'geni.%s' % login_base - site['enabled'] = True - site['max_slices'] = 100 - - # Note: - # Is it okay if this login base is the same as one already at this myplc site? - # Do we need uniqueness? Should use hrn_auth instead of just the leaf perhaps? - site['login_base'] = login_base - site['abbreviated_name'] = login_base - site['max_slivers'] = 1000 - reg_objects['site'] = site - - slice = {} - - # get_expiration always returns a normalized datetime - no need to utcparse - extime = Credential(string=creds[0]).get_expiration() - # If the expiration time is > 60 days from now, set the expiration time to 60 days from now - if extime > datetime.datetime.utcnow() + datetime.timedelta(days=60): - extime = datetime.datetime.utcnow() + datetime.timedelta(days=60) - slice['expires'] = int(time.mktime(extime.timetuple())) - slice['hrn'] = hrn - slice['name'] = hrn_to_pl_slicename(hrn) - slice['url'] = hrn - slice['description'] = hrn - slice['pointer'] = 0 - reg_objects['slice_record'] = slice - - reg_objects['users'] = {} - for user in users: - user['key_ids'] = [] - hrn, _ = urn_to_hrn(user['urn']) - user['email'] = hrn_to_pl_slicename(hrn) + "@geni.net" - user['first_name'] = hrn - user['last_name'] = hrn - reg_objects['users'][user['email']] = user - - return reg_objects - - def SliverStatus(self, api, slice_xrn, creds, options={}): - call_id = options.get('call_id') - if Callids().already_handled(call_id): return {} - - (hrn, _) = urn_to_hrn(slice_xrn) - # find out where this slice is currently running - slicename = hrn_to_pl_slicename(hrn) - - slices = api.driver.GetSlices([slicename], ['slice_id', 'node_ids','person_ids','name','expires']) - if len(slices) == 0: - raise Exception("Slice %s not found (used %s as slicename internally)" % (slice_xrn, slicename)) - slice = slices[0] - - # report about the local nodes only - nodes = api.driver.GetNodes({'node_id':slice['node_ids'],'peer_id':None}, - ['node_id', 'hostname', 'site_id', 'boot_state', 'last_contact']) - site_ids = [node['site_id'] for node in nodes] - - result = {} - top_level_status = 'unknown' - if nodes: - top_level_status = 'ready' - slice_urn = Xrn(slice_xrn, 'slice').get_urn() - result['geni_urn'] = slice_urn - result['pl_login'] = slice['name'] - result['pl_expires'] = datetime.datetime.fromtimestamp(slice['expires']).ctime() - - resources = [] - for node in nodes: - res = {} - res['pl_hostname'] = node['hostname'] - res['pl_boot_state'] = node['boot_state'] - res['pl_last_contact'] = node['last_contact'] - if node['last_contact'] is not None: - res['pl_last_contact'] = datetime.datetime.fromtimestamp(node['last_contact']).ctime() - sliver_id = urn_to_sliver_id(slice_urn, slice['slice_id'], node['node_id']) - res['geni_urn'] = sliver_id - if node['boot_state'] == 'boot': - res['geni_status'] = 'ready' - else: - res['geni_status'] = 'failed' - top_level_status = 'failed' - - res['geni_error'] = '' - - resources.append(res) - - result['geni_status'] = top_level_status - result['geni_resources'] = resources - return result - - def CreateSliver(self, api, slice_xrn, creds, rspec_string, users, options={}): - """ - Create the sliver[s] (slice) at this aggregate. - Verify HRN and initialize the slice record in PLC if necessary. - """ - call_id = options.get('call_id') - if Callids().already_handled(call_id): return "" - aggregate = SlabAggregate(api) - #aggregate = Aggregate(api) - slices = SlabSlices(api) - (hrn, _) = urn_to_hrn(slice_xrn) - peer = slices.get_peer(hrn) - sfa_peer = slices.get_sfa_peer(hrn) - slice_record=None - if users: - slice_record = users[0].get('slice_record', {}) - print >>sys.stderr, " \r\n \t AGGREGATESLAB.PY Slice slice_record : ", slice_record - - # parse rspec - rspec = RSpec(rspec_string) - requested_attributes = rspec.version.get_slice_attributes() - - # ensure site record exists - site = slices.verify_site(hrn, slice_record, peer, sfa_peer) - # ensure slice record exists - print>>sys.stderr, " \r\n \t AGGREGATESLAB.PY Slice users : ", users - slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer) - print >>sys.stderr, " \r\n \t AGGREGATESLAB.PY Slice slice : ", slice - # ensure person records exists - persons = slices.verify_persons(hrn, slice, users) - #persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer) - # ensure slice attributes exists - #slices.verify_slice_attributes(slice, requested_attributes) - - # add/remove slice from nodes - requested_slivers = [node.get('component_name') for node in rspec.version.get_nodes_with_slivers()] - print >>sys.stderr, " \r\n \t AGGREGATESLAB.PY Slice requested_slivers : ", requested_slivers - slices.verify_slice_nodes(slice, requested_slivers, peer) - - # add/remove links links - #slices.verify_slice_links(slice, rspec.version.get_link_requests(), aggregate) - - # handle MyPLC peer association. - # only used by plc and ple. - slices.handle_peer(site, slice, persons, peer) - - return aggregate.get_rspec(slice_xrn=slice_xrn, version=rspec.version) - - - def RenewSliver(self, api, xrn, creds, expiration_time, options={}): - call_id = options.get('call_id') - if Callids().already_handled(call_id): return True - (hrn, _) = urn_to_hrn(xrn) - slicename = hrn_to_pl_slicename(hrn) - slices = api.driver.GetSlices({'name': slicename}, ['slice_id']) - if not slices: - raise RecordNotFound(hrn) - slice = slices[0] - requested_time = utcparse(expiration_time) - record = {'expires': int(time.mktime(requested_time.timetuple()))} - try: - api.driver.UpdateSlice(slice['slice_id'], record) - return True - except: - return False - - def start_slice(self, api, xrn, creds): - (hrn, _) = urn_to_hrn(xrn) - slicename = hrn_to_pl_slicename(hrn) - slices = api.driver.GetSlices({'name': slicename}, ['slice_id']) - if not slices: - raise RecordNotFound(hrn) - slice_id = slices[0]['slice_id'] - slice_tags = api.driver.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'}, ['slice_tag_id']) - # just remove the tag if it exists - if slice_tags: - api.driver.DeleteSliceTag(slice_tags[0]['slice_tag_id']) - - return 1 - - def stop_slice(self, api, xrn, creds): - hrn, _ = urn_to_hrn(xrn) - slicename = hrn_to_pl_slicename(hrn) - slices = api.driver.GetSlices({'name': slicename}, ['slice_id']) - if not slices: - raise RecordNotFound(hrn) - slice_id = slices[0]['slice_id'] - slice_tags = api.driver.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'}) - if not slice_tags: - api.driver.AddSliceTag(slice_id, 'enabled', '0') - elif slice_tags[0]['value'] != "0": - tag_id = slice_tags[0]['slice_tag_id'] - api.driver.UpdateSliceTag(tag_id, '0') - return 1 - - def reset_slice(self, api, xrn): - # XX not implemented at this interface - return 1 - - def DeleteSliver(self, api, xrn, creds, options={}): - call_id = options.get('call_id') - if Callids().already_handled(call_id): return "" - (hrn, _) = urn_to_hrn(xrn) - slicename = hrn_to_pl_slicename(hrn) - slices = api.driver.GetSlices({'name': slicename}) - if not slices: - return 1 - slice = slices[0] - - # determine if this is a peer slice - peer = peers.get_peer(api, hrn) - try: - if peer: - api.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer) - api.driver.DeleteSliceFromNodes(slicename, slice['node_ids']) - finally: - if peer: - api.driver.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id']) - return 1 - - def ListSlices(self, api, creds, options={}): - call_id = options.get('call_id') - if Callids().already_handled(call_id): return [] - # look in cache first - #if self.caching and api.cache: - #slices = api.cache.get('slices') - #if slices: - #return slices - - # get data from db - slices = api.driver.GetSlices({'peer_id': None}, ['name']) - slice_hrns = [slicename_to_hrn(api.hrn, slice['name']) for slice in slices] - slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns] - - # cache the result - #if self.caching and api.cache: - #api.cache.add('slices', slice_urns) - - return slice_urns - - def ListResources(self, api, creds, options={}): - call_id = options.get('call_id') - if Callids().already_handled(call_id): return "" - # get slice's hrn from options - xrn = options.get('geni_slice_urn', None) - cached = options.get('cached', True) - (hrn, _) = urn_to_hrn(xrn) - - version_manager = VersionManager() - # get the rspec's return format from options - rspec_version = version_manager.get_version(options.get('geni_rspec_version')) - version_string = "rspec_%s" % (rspec_version) - - #panos adding the info option to the caching key (can be improved) - if options.get('info'): - version_string = version_string + "_"+options.get('info', 'default') - - # look in cache first - #if self.cache and api.cache and not xrn and cached: - #rspec = api.cache.get(version_string) - #if rspec: - #api.logger.info("aggregate.ListResources: returning cached value for hrn %s"%hrn) - #return rspec - - #panos: passing user-defined options - #print "manager options = ",options - aggregate = SlabAggregate(api) - #aggregate = Aggregate(api) - rspec = aggregate.get_rspec(slice_xrn=xrn, version=rspec_version, options=options) - - # cache the result - #if self.caching and api.cache and not xrn: - #api.cache.add(version_string, rspec) - - return rspec - - - def GetTicket(self, api, xrn, creds, rspec, users, options={}): - - (slice_hrn, _) = urn_to_hrn(xrn) - slices = SlabSlices(api) - peer = slices.get_peer(slice_hrn) - sfa_peer = slices.get_sfa_peer(slice_hrn) - - # get the slice record - credential = api.getCredential() - interface = api.registries[api.hrn] - registry = api.server_proxy(interface, credential) - records = registry.Resolve(xrn, credential) - - # make sure we get a local slice record - record = None - for tmp_record in records: - if tmp_record['type'] == 'slice' and \ - not tmp_record['peer_authority']: - #Error (E0602, GetTicket): Undefined variable 'SliceRecord' - record = SliceRecord(dict=tmp_record) - if not record: - raise RecordNotFound(slice_hrn) - - # similar to CreateSliver, we must verify that the required records exist - # at this aggregate before we can issue a ticket - # parse rspec - rspec = RSpec(rspec_string) - requested_attributes = rspec.version.get_slice_attributes() - - # ensure site record exists - site = slices.verify_site(hrn, slice_record, peer, sfa_peer) - # ensure slice record exists - slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer) - # ensure person records exists - persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer) - # ensure slice attributes exists - slices.verify_slice_attributes(slice, requested_attributes) - - # get sliver info - slivers = slices.get_slivers(slice_hrn) - - if not slivers: - raise SliverDoesNotExist(slice_hrn) - - # get initscripts - initscripts = [] - data = { - 'timestamp': int(time.time()), - 'initscripts': initscripts, - 'slivers': slivers - } - - # create the ticket - object_gid = record.get_gid_object() - new_ticket = SfaTicket(subject = object_gid.get_subject()) - new_ticket.set_gid_caller(api.auth.client_gid) - new_ticket.set_gid_object(object_gid) - new_ticket.set_issuer(key=api.key, subject=api.hrn) - new_ticket.set_pubkey(object_gid.get_pubkey()) - new_ticket.set_attributes(data) - new_ticket.set_rspec(rspec) - #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn)) - new_ticket.encode() - new_ticket.sign() - - return new_ticket.save_to_string(save_parents=True) diff --git a/sfa/managers/registry_manager_slab.py b/sfa/managers/registry_manager_slab.py deleted file mode 100644 index 2e85f0d4..00000000 --- a/sfa/managers/registry_manager_slab.py +++ /dev/null @@ -1,488 +0,0 @@ -import types -import time -import sys - -from sfa.util.faults import RecordNotFound, AccountNotEnabled, PermissionError, MissingAuthority, \ - UnknownSfaType, ExistingRecord -from sfa.util.prefixTree import prefixTree -from sfa.storage.record import SfaRecord -from sfa.storage.table import SfaTable -from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn, urn_to_hrn -from sfa.util.version import version_core - -from sfa.trust.gid import GID -from sfa.trust.credential import Credential -from sfa.trust.certificate import Certificate, Keypair, convert_public_key -from sfa.trust.gid import create_uuid - -#myapi=SfaAPI() -# The GENI GetVersion call -def GetVersion(api): - - # Bugfix TP 09/11/2011 - #peers =dict ([ (peername,v._ServerProxy__host) for (peername,v) in api.registries.iteritems() - peers =dict ([ (peername,v.get_url()) for (peername,v) in api.registries.iteritems() - if peername != api.hrn]) - xrn=Xrn(api.hrn) - return version_core({'interface':'registry', - 'hrn':xrn.get_hrn(), - 'urn':xrn.get_urn(), - 'peers':peers}) - -def GetCredential(api, xrn, type, is_self=False): - # convert xrn to hrn - if type: - hrn = urn_to_hrn(xrn)[0] - else: - hrn, type = urn_to_hrn(xrn) - - # Is this a root or sub authority - auth_hrn = api.auth.get_authority(hrn) - print>> sys.stderr , " \r\n REGISTRY get_credential auth_hrn:" , auth_hrn,"hrn : ", hrn, " Type : ", type, "is self : " , is_self,"<<" - if not auth_hrn or hrn == api.config.SFA_INTERFACE_HRN: - auth_hrn = hrn - # get record info - auth_info = api.auth.get_auth_info(auth_hrn) - table = SfaTable() - print >> sys.stderr , " findObject ", type, hrn - records = table.findObjects({'type': type, 'hrn': hrn}) - print>> sys.stderr , " \r\n ++ REGISTRY get_credential hrn %s records %s " %(hrn, records) - if not records: - raise RecordNotFound(hrn) - record = records[0] - - # verify_cancreate_credential requires that the member lists - # (researchers, pis, etc) be filled in - #api.driver.fill_record_info(record, api.aggregates) - api.driver.fill_record_info(record) - record['enabled'] = True - print>> sys.stderr , " \r\n ++ REGISTRY get_credential hrn %s record['enabled'] %s is_self %s" %(hrn, record['enabled'], is_self) - if record['type']=='user': - if not record['enabled']: - print>> sys.stderr , " \r\n ++ REGISTRY get_credential hrn %s ACCOUNT Not enabled" - raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record['email'])) - - # get the callers gid - # if this is a self cred the record's gid is the caller's gid - if is_self: - caller_hrn = hrn - caller_gid = record.get_gid_object() - print>>sys.stderr, " \r\n REGISTRY IS SELF OK caller_hrn %s--- \r\n caller_gid %s---------" %(caller_hrn,caller_gid) - else: - print>> sys.stderr , " \r\n ++ ELSE " - caller_gid = api.auth.client_cred.get_gid_caller() - print>> sys.stderr , " \r\n ++ ELSE caller_gid %s record %s" %(caller_gid, record) - caller_hrn = caller_gid.get_hrn() - print>> sys.stderr , " \r\n ++ ELSE caller_hrn %s " %(caller_hrn) - - object_hrn = record.get_gid_object().get_hrn() - print>> sys.stderr , " \r\n ++ ELSE object_hrn %s " %(object_hrn) - - rights = api.auth.determine_user_rights(caller_hrn, record) - print>> sys.stderr , " \r\n ++ After rights record: %s \r\n ====RIGHTS %s " %(record , rights) - - # make sure caller has rights to this object - if rights.is_empty(): - raise PermissionError(caller_hrn + " has no rights to " + record['name']) - - object_gid = GID(string=record['gid']) - new_cred = Credential(subject = object_gid.get_subject()) - new_cred.set_gid_caller(caller_gid) - new_cred.set_gid_object(object_gid) - new_cred.set_issuer_keys(auth_info.get_privkey_filename(), auth_info.get_gid_filename()) - #new_cred.set_pubkey(object_gid.get_pubkey()) - new_cred.set_privileges(rights) - new_cred.get_privileges().delegate_all_privileges(True) - if 'expires' in record: - new_cred.set_expiration(int(record['expires'])) - auth_kind = "authority,ma,sa" - # Parent not necessary, verify with certs - #new_cred.set_parent(api.auth.hierarchy.get_auth_cred(auth_hrn, kind=auth_kind)) - new_cred.encode() - new_cred.sign() - - return new_cred.save_to_string(save_parents=True) - - -def Resolve(api, xrns, type=None, full=True): - - # load all known registry names into a prefix tree and attempt to find - # the longest matching prefix - print >>sys.stderr , '\t\t REGISTRY MANAGER : resolve=========api ', api - print >>sys.stderr , '\t\t REGISTRY MANAGER : resolve=========xrns ', xrns - if not isinstance(xrns, types.ListType): - if not type: - type = Xrn(xrns).get_type() - xrns = [xrns] - hrns = [urn_to_hrn(xrn)[0] for xrn in xrns] - print >>sys.stderr , '\t\t =========hrns ', hrns - # create a dict where key is a registry hrn and its value is a - # hrns at that registry (determined by the known prefix tree). - xrn_dict = {} - print >>sys.stderr, '\r\n REGISTRY MANAGER : resolve xrns ' , xrns #api.__dict__.keys() - registries = api.registries - tree = prefixTree() - registry_hrns = registries.keys() - print >>sys.stderr, '\r\n \t\t REGISTRY MANAGER registry_hrns' , registry_hrns - tree.load(registry_hrns) - for xrn in xrns: - registry_hrn = tree.best_match(urn_to_hrn(xrn)[0]) - print >>sys.stderr, '\t\tREGISTRY MANAGER *****tree.best_match ', registry_hrn - if registry_hrn not in xrn_dict: - xrn_dict[registry_hrn] = [] - xrn_dict[registry_hrn].append(xrn) - print >>sys.stderr, '\t\tREGISTRY MANAGER *****xrn_dict[registry_hrn] ',xrn_dict[registry_hrn] - records = [] - for registry_hrn in xrn_dict: - # skip the hrn without a registry hrn - # XX should we let the user know the authority is unknown? - print >>sys.stderr, '\t\t registry_hrn in xrn_dict ', registry_hrn - if not registry_hrn: - continue - - # if the best match (longest matching hrn) is not the local registry, - # forward the request - xrns = xrn_dict[registry_hrn] - if registry_hrn != api.hrn: - credential = api.getCredential() - interface = api.registries[registry_hrn] - server = api.server_proxy(interface, credential) - peer_records = server.Resolve(xrns, credential) - print >>sys.stderr , '\t\t peer_records ', peer_records - records.extend([SfaRecord(dict=record).as_dict() for record in peer_records]) - - print >>sys.stderr,'\t\t hrns ' , hrns - # try resolving the remaining unfound records at the local registry - remaining_hrns = set(hrns).difference([record['hrn'] for record in records]) - # convert set to list - remaining_hrns = [hrn for hrn in remaining_hrns] - print >>sys.stderr, '\t\t remaining_hrns', remaining_hrns - table = SfaTable() - local_records = table.findObjects({'hrn': remaining_hrns}) - - print >>sys.stderr, '\t\t LOCAL REC !', local_records - for rec in local_records: - print >>sys.stderr, '\t\t resolve regmanager : rec ', rec - - if full: - print >>sys.stderr, '\r\n \r\n REGISTRY:_FULL', api - - api.driver.fill_record_info(local_records) - - # convert local record objects to dicts - records.extend([dict(record) for record in local_records]) - #print >>sys.stderr, "\r\n \t\t records extends %s" %(records) - if not records: - raise RecordNotFound(str(hrns)) - - if type: - records = filter(lambda rec: rec['type'] in [type], records) - - return records - -def List(api, xrn, origin_hrn=None): - hrn, type = urn_to_hrn(xrn) - # load all know registry names into a prefix tree and attempt to find - # the longest matching prefix - records = [] - registries = api.registries - registry_hrns = registries.keys() - tree = prefixTree() - tree.load(registry_hrns) - registry_hrn = tree.best_match(hrn) - - #if there was no match then this record belongs to an unknow registry - if not registry_hrn: - raise MissingAuthority(xrn) - # if the best match (longest matching hrn) is not the local registry, - # forward the request - records = [] - if registry_hrn != api.hrn: - credential = api.getCredential() - print>>sys.stderr, "Registries : ", registries - interface = api.registries[registry_hrn] - server = api.server_proxy(interface, credential) - record_list = server.List(xrn, credential) - records = [SfaRecord(dict=record).as_dict() for record in record_list] - - # if we still have not found the record yet, try the local registry - if not records: - if not api.auth.hierarchy.auth_exists(hrn): - raise MissingAuthority(hrn) - - table = SfaTable() - records = table.find({'authority': hrn}) - - return records - - -def Register(api, record): - - - #hrn, type = record['hrn'], record['type'] - hrn = str(record['hrn']).strip("['']") - type = str( record['type']).strip("['']") - urn = hrn_to_urn(hrn,type) - # validate the type - if type not in ['authority', 'slice', 'node', 'user']: - raise UnknownSfaType(type) - - # check if record already exists - table = SfaTable() - existing_records = table.find({'type': type, 'hrn': hrn}) - if existing_records: - raise ExistingRecord(hrn) - - record = SfaRecord(dict = record) - - print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAN.PY register SfaRecordrecord %s" %(record) - #record['authority'] = get_authority(record['hrn']) - record['authority'] = get_authority(hrn) - - #type_of_rec = record['type'] - #hrn = record['hrn'] - - #api.auth.verify_object_permission(hrn) - api.auth.verify_object_permission( record['hrn']) - auth_info = api.auth.get_auth_info(record['authority']) - - - - pub_key = None - # make sure record has a gid - if 'gid' not in record: - uuid = create_uuid() - pkey = Keypair(create=True) - if 'key' in record and record['key']: - if isinstance(record['key'], types.ListType): - pub_key = record['key'][0] - else: - pub_key = record['key'] - pkey = convert_public_key(pub_key) - - gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey) - gid = gid_object.save_to_string(save_parents=True) - record['gid'] = gid - record.set_gid(gid) - print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAB.PY record['gid'] %s" %(record['gid']) - print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAB.PY register type %s"%(type) - - if type in ["authority"]: - # update the tree - if not api.auth.hierarchy.auth_exists(hrn): - api.auth.hierarchy.create_auth(hrn_to_urn(hrn,'authority')) - - # get the GID from the newly created authority - gid = auth_info.get_gid_object() - record.set_gid(gid.save_to_string(save_parents=True)) - - #pl_record = api.driver.sfa_fields_to_pl_fields(type, hrn, record) - print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAB.PY register : type in [authority ] sfa_fields_to_pl_fields FIELDS A CHANGER" - - # thierry: ideally we'd like to be able to write api.driver.GetSites - # in which case the code would become mostly the same as for pl - sites = api.driver.GetSites([pl_record['login_base']]) - if not sites: - # thierry - # Error (E0601, register): Using variable 'pl_record' before assignment - pointer = api.driver.AddSite( pl_record) - else: - pointer = sites[0]['site_id'] - - record.set_pointer(pointer) - record['pointer'] = pointer - - elif (type == "slice"): - acceptable_fields=['url', 'instantiation', 'name', 'description'] - pl_record = api.driver.sfa_fields_to_pl_fields(type, hrn, record) - print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAB.PY register slice pl_record %s"%(pl_record) - for key in pl_record.keys(): - if key not in acceptable_fields: - pl_record.pop(key) - slices = api.driver.GetSlices([pl_record['name']]) - if not slices: - pointer = api.driver.AddSlice(pl_record) - else: - pointer = slices[0]['slice_id'] - record.set_pointer(pointer) - record['pointer'] = pointer - - elif (type == "user"): - persons = api.driver.GetPersons([record['email']]) - if not persons: - print>>sys.stderr, " \r\n \r\n ----------- registry_manager_slab register NO PERSON ADD TO LDAP?" - - #if not persons: - #pointer = api.driver.AddPerson( dict(record)) - #else: - #pointer = persons[0]['person_id'] - - if 'enabled' in record and record['enabled']: - api.driver.UpdatePerson(pointer, {'enabled': record['enabled']}) - # add this persons to the site only if he is being added for the first - # time by sfa and doesont already exist in plc - if not persons or not persons[0]['site_ids']: - login_base = get_leaf(record['authority']) - - api.driver.AddPersonToSite(pointer, login_base) - - # What roles should this user have? - api.driver.AddRoleToPerson('user', pointer) - # Add the user's key - if pub_key: - api.driver.AddPersonKey(pointer, {'key_type' : 'ssh', 'key' : pub_key}) - - #elif (type == "node"): - #pl_record = api.driver.sfa_fields_to_pl_fields(type, hrn, record) - #login_base = hrn_to_pl_login_base(record['authority']) - #nodes = api.driver.GetNodes([pl_record['hostname']]) - #if not nodes: - #pointer = api.driver.AddNode(login_base, pl_record) - #else: - #pointer = nodes[0]['node_id'] - - ##record['pointer'] = pointer - ##record.set_pointer(pointer) - #record_id = table.insert(record) - #record['record_id'] = record_id - - # update membership for researchers, pis, owners, operators - api.driver.update_membership(None, record) - - return record.get_gid_object().save_to_string(save_parents=True) - -def Update(api, record_dict): - new_record = SfaRecord(dict = record_dict) - type = new_record['type'] - hrn = new_record['hrn'] - urn = hrn_to_urn(hrn,type) - api.auth.verify_object_permission(hrn) - table = SfaTable() - # make sure the record exists - records = table.findObjects({'type': type, 'hrn': hrn}) - if not records: - raise RecordNotFound(hrn) - record = records[0] - record['last_updated'] = time.gmtime() - - # Update_membership needs the membership lists in the existing record - # filled in, so it can see if members were added or removed - api.driver.fill_record_info(record) - - # Use the pointer from the existing record, not the one that the user - # gave us. This prevents the user from inserting a forged pointer - pointer = record['pointer'] - # update the PLC information that was specified with the record - - if (type == "authority"): - api.driver.UpdateSite(pointer, new_record) - - elif type == "slice": - pl_record=api.driver.sfa_fields_to_pl_fields(type, hrn, new_record) - if 'name' in pl_record: - pl_record.pop('name') - api.driver.UpdateSlice(pointer, pl_record) - - elif type == "user": - # SMBAKER: UpdatePerson only allows a limited set of fields to be - # updated. Ideally we should have a more generic way of doing - # this. I copied the field names from UpdatePerson.py... - update_fields = {} - all_fields = new_record - for key in all_fields.keys(): - if key in ['first_name', 'last_name', 'title', 'email', - 'password', 'phone', 'url', 'bio', 'accepted_aup', - 'enabled']: - update_fields[key] = all_fields[key] - api.driver.UpdatePerson(pointer, update_fields) - - if 'key' in new_record and new_record['key']: - # must check this key against the previous one if it exists - persons = api.driver.GetPersons([pointer], ['key_ids']) - person = persons[0] - keys = person['key_ids'] - keys = api.driver.GetKeys(person['key_ids']) - key_exists = False - if isinstance(new_record['key'], types.ListType): - new_key = new_record['key'][0] - else: - new_key = new_record['key'] - - # Delete all stale keys - for key in keys: - if new_record['key'] != key['key']: - api.driver.DeleteKey(key['key_id']) - else: - key_exists = True - if not key_exists: - api.driver.AddPersonKey(pointer, {'key_type': 'ssh', 'key': new_key}) - - # update the openssl key and gid - pkey = convert_public_key(new_key) - uuid = create_uuid() - gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey) - gid = gid_object.save_to_string(save_parents=True) - record['gid'] = gid - record = SfaRecord(dict=record) - table.update(record) - - elif type == "node": - api.driver.UpdateNode(pointer, new_record) - - else: - raise UnknownSfaType(type) - - # update membership for researchers, pis, owners, operators - api.driver.update_membership(record, new_record) - - return 1 - -# expecting an Xrn instance -def Remove(api, xrn, origin_hrn=None): - - table = SfaTable() - filter = {'hrn': xrn.get_hrn()} - hrn=xrn.get_hrn() - type=xrn.get_type() - if type and type not in ['all', '*']: - filter['type'] = type - - records = table.find(filter) - if not records: raise RecordNotFound(hrn) - record = records[0] - type = record['type'] - - credential = api.getCredential() - registries = api.registries - - # Try to remove the object from the PLCDB of federated agg. - # This is attempted before removing the object from the local agg's PLCDB and sfa table - if hrn.startswith(api.hrn) and type in ['user', 'slice', 'authority']: - for registry in registries: - if registry not in [api.hrn]: - try: - result=registries[registry].remove_peer_object(credential, record, origin_hrn) - except: - pass - if type == "user": - persons = api.driver.GetPersons(record['pointer']) - # only delete this person if he has site ids. if he doesnt, it probably means - # he was just removed from a site, not actually deleted - if persons and persons[0]['site_ids']: - api.driver.DeletePerson(record['pointer']) - elif type == "slice": - if api.driver.GetSlices(record['pointer']): - api.driver.DeleteSlice(record['pointer']) - elif type == "node": - if api.driver.GetNodes(record['pointer']): - api.driver.DeleteNode(record['pointer']) - elif type == "authority": - if api.driver.GetSites(record['pointer']): - api.driver.DeleteSite(record['pointer']) - else: - raise UnknownSfaType(type) - - table.remove(record) - - return 1 - diff --git a/sfa/managers/slice_manager_slab.py b/sfa/managers/slice_manager_slab.py deleted file mode 100644 index e67d2b5d..00000000 --- a/sfa/managers/slice_manager_slab.py +++ /dev/null @@ -1,670 +0,0 @@ -# -import sys -import time,datetime -from StringIO import StringIO -from types import StringTypes -from copy import deepcopy -from copy import copy -from lxml import etree - -from sfa.util.sfalogging import logger -#from sfa.util.sfalogging import sfa_logger -#from sfa.util.rspecHelper import merge_rspecs -from sfa.util.xrn import Xrn, urn_to_hrn, hrn_to_urn -from sfa.util.plxrn import hrn_to_pl_slicename -#from sfa.util.rspec import * -#from sfa.util.specdict import * -from sfa.util.faults import * -from sfa.util.record import SfaRecord -#from sfa.rspecs.pg_rspec import PGRSpec -#from sfa.rspecs.sfa_rspec import SfaRSpec -from sfa.rspecs.rspec_converter import RSpecConverter -#from sfa.rspecs.rspec_parser import parse_rspec -#from sfa.rspecs.rspec_version import RSpecVersion -#from sfa.rspecs.sfa_rspec import sfa_rspec_version -#from sfa.rspecs.pg_rspec import pg_rspec_ad_version, pg_rspec_request_version -from sfa.client.client_helper import sfa_to_pg_users_arg -from sfa.rspecs.version_manager import VersionManager - -#from sfa.rspecs.rspec import RSpec -from sfa.util.policy import Policy -from sfa.util.prefixTree import prefixTree -#from sfa.util.sfaticket import * -from sfa.trust.credential import Credential -#from sfa.util.threadmanager import ThreadManager -#import sfa.util.xmlrpcprotocol as xmlrpcprotocol -#import sfa.plc.peers as peers -from sfa.util.version import version_core -from sfa.util.callids import Callids -#from sfa.senslab.api import * - - -#api=SfaAPI(interface='slicemgr') - -def _call_id_supported(api, server): - """ - Returns true if server support the optional call_id arg, false otherwise. - """ - server_version = api.get_cached_server_version(server) - - if 'sfa' in server_version: - code_tag = server_version['code_tag'] - code_tag_parts = code_tag.split("-") - - version_parts = code_tag_parts[0].split(".") - major, minor = version_parts[0:2] - rev = code_tag_parts[1] - if int(major) > 1: - if int(minor) > 0 or int(rev) > 20: - return True - return False - -# we have specialized xmlrpclib.ServerProxy to remember the input url -# OTOH it's not clear if we're only dealing with XMLRPCServerProxy instances -def get_serverproxy_url (server): - try: - return server.get_url() - except: - logger.warning("GetVersion, falling back to xmlrpclib.ServerProxy internals") - return server._ServerProxy__host + server._ServerProxy__handler - -def GetVersion(api): - # peers explicitly in aggregates.xml - peers =dict ([ (peername,get_serverproxy_url(v)) for (peername,v) in api.aggregates.iteritems() - if peername != api.hrn]) - version_manager = VersionManager() - ad_rspec_versions = [] - request_rspec_versions = [] - for rspec_version in version_manager.versions: - if rspec_version.content_type in ['*', 'ad']: - ad_rspec_versions.append(rspec_version.to_dict()) - if rspec_version.content_type in ['*', 'request']: - request_rspec_versions.append(rspec_version.to_dict()) - default_rspec_version = version_manager.get_version("sfa 1").to_dict() - xrn=Xrn(api.hrn, 'authority+sa') - version_more = {'interface':'slicemgr', - 'hrn' : xrn.get_hrn(), - 'urn' : xrn.get_urn(), - 'peers': peers, - 'request_rspec_versions': request_rspec_versions, - 'ad_rspec_versions': ad_rspec_versions, - 'default_ad_rspec': default_rspec_version - } - sm_version=version_core(version_more) - # local aggregate if present needs to have localhost resolved - if api.hrn in api.aggregates: - local_am_url=get_serverproxy_url(api.aggregates[api.hrn]) - sm_version['peers'][api.hrn]=local_am_url.replace('localhost',sm_version['hostname']) - return sm_version - - -#def GetVersion(api): - ## peers explicitly in aggregates.xml - #peers =dict ([ (peername,get_serverproxy_url(v)) for (peername,v) in api.aggregates.iteritems() - #if peername != api.hrn]) - #xrn=Xrn (api.hrn) - #request_rspec_versions = [dict(pg_rspec_request_version), dict(sfa_rspec_version)] - #ad_rspec_versions = [dict(pg_rspec_ad_version), dict(sfa_rspec_version)] - #version_more = {'interface':'slicemgr', - #'hrn' : xrn.get_hrn(), - #'urn' : xrn.get_urn(), - #'peers': peers, - #'request_rspec_versions': request_rspec_versions, - #'ad_rspec_versions': ad_rspec_versions, - #'default_ad_rspec': dict(sfa_rspec_version) - #} - #sm_version=version_core(version_more) - ## local aggregate if present needs to have localhost resolved - #if api.hrn in api.aggregates: - #local_am_url=get_serverproxy_url(api.aggregates[api.hrn]) - #sm_version['peers'][api.hrn]=local_am_url.replace('localhost',sm_version['hostname']) - #return sm_version - - -def drop_slicemgr_stats(api,rspec): - try: - stats_elements = rspec.xml.xpath('//statistics') - for node in stats_elements: - node.getparent().remove(node) - except Exception, e: - api.logger.warn("drop_slicemgr_stats failed: %s " % (str(e))) - - - - -def CreateSliver(api, xrn, creds, rspec_str, users, call_id): - - version_manager = VersionManager() - def _CreateSliver(aggregate, server, xrn, credential, rspec, users, call_id): - - tStart = time.time() - try: - # Need to call GetVersion at an aggregate to determine the supported - # rspec type/format beofre calling CreateSliver at an Aggregate. - print>>sys.stderr, " \r\n SLICE MANAGERSLAB _CreateSliver server " - server_version = api.get_cached_server_version(server) - requested_users = users - if 'sfa' not in server_version and 'geni_api' in server_version: - # sfa aggregtes support both sfa and pg rspecs, no need to convert - # if aggregate supports sfa rspecs. otherwise convert to pg rspec - rspec = RSpec(RSpecConverter.to_pg_rspec(rspec, 'request')) - filter = {'component_manager_id': server_version['urn']} - rspec.filter(filter) - rspec = rspec.toxml() - requested_users = sfa_to_pg_users_arg(users) - args = [xrn, credential, rspec, requested_users] - if _call_id_supported(api, server): - args.append(call_id) - rspec = server.CreateSliver(*args) - return {"aggregate": aggregate, "rspec": rspec, "elapsed": time.time()-tStart, "status": "success"} - except: - logger.log_exc('Something wrong in _CreateSliver with URL %s'%server.url) - return {"aggregate": aggregate, "elapsed": time.time()-tStart, "status": "exception"} - - - if Callids().already_handled(call_id): return "" - - # Validate the RSpec against PlanetLab's schema --disabled for now - # The schema used here needs to aggregate the PL and VINI schemas - # schema = "/var/www/html/schemas/pl.rng" - rspec = RSpec(rspec_str) - schema = None - if schema: - rspec.validate(schema) - - print>>sys.stderr, " \r\n \r\n \t\t =======SLICE MANAGER _CreateSliver api %s" %(api) - # if there is a section, the aggregates don't care about it, - # so delete it. - drop_slicemgr_stats(api,rspec) - - # attempt to use delegated credential first - credential = api.getDelegatedCredential(creds) - if not credential: - credential = api.getCredential() - - # get the callers hrn - hrn, type = urn_to_hrn(xrn) - valid_cred = api.auth.checkCredentials(creds, 'createsliver', hrn)[0] - caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() - threads = ThreadManager() - print>>sys.stderr, " \r\n \r\n \t\t =======SLICE MANAGER _CreateSliver api aggregates %s \t caller_hrn %s api.hrn %s" %(api.aggregates, caller_hrn, api.hrn) - for aggregate in api.aggregates: - # prevent infinite loop. Dont send request back to caller - # unless the caller is the aggregate's SM - if caller_hrn == aggregate and aggregate != api.hrn: - continue - interface = api.aggregates[aggregate] - print>>sys.stderr, " \r\n \r\n \t\t =======SLICE MANAGER _CreateSliver aggregate %s interface %s" %(api.aggregates[aggregate],interface) - server = api.get_server(interface, credential) - if server is None: - print>>sys.stderr, " \r\n \r\n \t\t =======SLICE MANAGER _CreateSliver NOSERVERS " - # Just send entire RSpec to each aggregate - #threads.run(_CreateSliver, aggregate, xrn, [credential], rspec.toxml(), users, call_id) - threads.run(_CreateSliver, aggregate, server, xrn, [credential], rspec.toxml(), users, call_id) - results = threads.get_results() - manifest_version = version_manager._get_version(rspec.version.type, rspec.version.version, 'manifest') - result_rspec = RSpec(version=manifest_version) - #rspec = SfaRSpec() - for result in results: - add_slicemgr_stat(result_rspec, "CreateSliver", result["aggregate"], result["elapsed"], result["status"]) - if result["status"]=="success": - try: - result_rspec.version.merge(result["rspec"]) - except: - api.logger.log_exc("SM.CreateSliver: Failed to merge aggregate rspec") - return result_rspec.toxml() - #rspec.merge(result) - #return rspec.toxml() - -def RenewSliver(api, xrn, creds, expiration_time, call_id): - if Callids().already_handled(call_id): return True - - (hrn, type) = urn_to_hrn(xrn) - # get the callers hrn - valid_cred = api.auth.checkCredentials(creds, 'renewsliver', hrn)[0] - caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() - - # attempt to use delegated credential first - credential = api.getDelegatedCredential(creds) - if not credential: - credential = api.getCredential() - threads = ThreadManager() - for aggregate in api.aggregates: - # prevent infinite loop. Dont send request back to caller - # unless the caller is the aggregate's SM - if caller_hrn == aggregate and aggregate != api.hrn: - continue - - server = api.aggregates[aggregate] - threads.run(server.RenewSliver, xrn, [credential], expiration_time, call_id) - # 'and' the results - return reduce (lambda x,y: x and y, threads.get_results() , True) - -def get_ticket(api, xrn, creds, rspec, users): - slice_hrn, type = urn_to_hrn(xrn) - # get the netspecs contained within the clients rspec - aggregate_rspecs = {} - tree= etree.parse(StringIO(rspec)) - elements = tree.findall('./network') - for element in elements: - aggregate_hrn = element.values()[0] - aggregate_rspecs[aggregate_hrn] = rspec - - # get the callers hrn - valid_cred = api.auth.checkCredentials(creds, 'getticket', slice_hrn)[0] - caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() - - # attempt to use delegated credential first - credential = api.getDelegatedCredential(creds) - if not credential: - credential = api.getCredential() - threads = ThreadManager() - for (aggregate, aggregate_rspec) in aggregate_rspecs.iteritems(): - # prevent infinite loop. Dont send request back to caller - # unless the caller is the aggregate's SM - if caller_hrn == aggregate and aggregate != api.hrn: - continue - server = None - if aggregate in api.aggregates: - server = api.aggregates[aggregate] - else: - net_urn = hrn_to_urn(aggregate, 'authority') - # we may have a peer that knows about this aggregate - for agg in api.aggregates: - target_aggs = api.aggregates[agg].get_aggregates(credential, net_urn) - if not target_aggs or not 'hrn' in target_aggs[0]: - continue - # send the request to this address - url = target_aggs[0]['url'] - server = xmlrpcprotocol.get_server(url, api.key_file, api.cert_file) - # aggregate found, no need to keep looping - break - if server is None: - continue - threads.run(server.ParseTicket, xrn, credential, aggregate_rspec, users) - - results = threads.get_results() - - # gather information from each ticket - rspecs = [] - initscripts = [] - slivers = [] - object_gid = None - for result in results: - agg_ticket = SfaTicket(string=result) - attrs = agg_ticket.get_attributes() - if not object_gid: - object_gid = agg_ticket.get_gid_object() - rspecs.append(agg_ticket.get_rspec()) - initscripts.extend(attrs.get('initscripts', [])) - slivers.extend(attrs.get('slivers', [])) - - # merge info - attributes = {'initscripts': initscripts, - 'slivers': slivers} - merged_rspec = merge_rspecs(rspecs) - - # create a new ticket - ticket = SfaTicket(subject = slice_hrn) - ticket.set_gid_caller(api.auth.client_gid) - ticket.set_issuer(key=api.key, subject=api.hrn) - ticket.set_gid_object(object_gid) - ticket.set_pubkey(object_gid.get_pubkey()) - #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn)) - ticket.set_attributes(attributes) - ticket.set_rspec(merged_rspec) - ticket.encode() - ticket.sign() - return ticket.save_to_string(save_parents=True) - - -def DeleteSliver(api, xrn, creds, call_id): - if Callids().already_handled(call_id): return "" - (hrn, type) = urn_to_hrn(xrn) - # get the callers hrn - valid_cred = api.auth.checkCredentials(creds, 'deletesliver', hrn)[0] - caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() - - # attempt to use delegated credential first - credential = api.getDelegatedCredential(creds) - if not credential: - credential = api.getCredential() - threads = ThreadManager() - for aggregate in api.aggregates: - # prevent infinite loop. Dont send request back to caller - # unless the caller is the aggregate's SM - if caller_hrn == aggregate and aggregate != api.hrn: - continue - server = api.aggregates[aggregate] - threads.run(server.DeleteSliver, xrn, credential, call_id) - threads.get_results() - return 1 - -def start_slice(api, xrn, creds): - hrn, type = urn_to_hrn(xrn) - - # get the callers hrn - valid_cred = api.auth.checkCredentials(creds, 'startslice', hrn)[0] - caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() - - # attempt to use delegated credential first - credential = api.getDelegatedCredential(creds) - if not credential: - credential = api.getCredential() - threads = ThreadManager() - for aggregate in api.aggregates: - # prevent infinite loop. Dont send request back to caller - # unless the caller is the aggregate's SM - if caller_hrn == aggregate and aggregate != api.hrn: - continue - server = api.aggregates[aggregate] - threads.run(server.Start, xrn, credential) - threads.get_results() - return 1 - -def stop_slice(api, xrn, creds): - hrn, type = urn_to_hrn(xrn) - - # get the callers hrn - valid_cred = api.auth.checkCredentials(creds, 'stopslice', hrn)[0] - caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() - - # attempt to use delegated credential first - credential = api.getDelegatedCredential(creds) - if not credential: - credential = api.getCredential() - threads = ThreadManager() - for aggregate in api.aggregates: - # prevent infinite loop. Dont send request back to caller - # unless the caller is the aggregate's SM - if caller_hrn == aggregate and aggregate != api.hrn: - continue - server = api.aggregates[aggregate] - threads.run(server.Stop, xrn, credential) - threads.get_results() - return 1 - -def reset_slice(api, xrn): - """ - Not implemented - """ - return 1 - -def shutdown(api, xrn, creds): - """ - Not implemented - """ - return 1 - -def status(api, xrn, creds): - """ - Not implemented - """ - return 1 - -# Thierry : caching at the slicemgr level makes sense to some extent -#caching=True -caching=False -def ListSlices(api, creds, call_id): - - if Callids().already_handled(call_id): return [] - - # look in cache first - if caching and api.cache: - slices = api.cache.get('slices') - if slices: - return slices - - # get the callers hrn - valid_cred = api.auth.checkCredentials(creds, 'listslices', None)[0] - caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() - - # attempt to use delegated credential first - credential = api.getDelegatedCredential(creds) - if not credential: - credential = api.getCredential() - threads = ThreadManager() - # fetch from aggregates - for aggregate in api.aggregates: - # prevent infinite loop. Dont send request back to caller - # unless the caller is the aggregate's SM - if caller_hrn == aggregate and aggregate != api.hrn: - continue - server = api.aggregates[aggregate] - threads.run(server.ListSlices, credential, call_id) - - # combime results - results = threads.get_results() - slices = [] - for result in results: - slices.extend(result) - - # cache the result - if caching and api.cache: - api.cache.add('slices', slices) - - return slices - -def add_slicemgr_stat(rspec, callname, aggname, elapsed, status): - try: - stats_tags = rspec.xml.xpath('//statistics[@call="%s"]' % callname) - if stats_tags: - stats_tag = stats_tags[0] - else: - stats_tag = etree.SubElement(rspec.xml.root, "statistics", call=callname) - - etree.SubElement(stats_tag, "aggregate", name=str(aggname), elapsed=str(elapsed), status=str(status)) - except Exception, e: - api.logger.warn("add_slicemgr_stat failed on %s: %s" %(aggname, str(e))) - - - - -def ListResources(api, creds, options, call_id): - version_manager = VersionManager() - def _ListResources(aggregate, server, credential, opts, call_id): - - my_opts = copy(opts) - args = [credential, my_opts] - tStart = time.time() - try: - if _call_id_supported(api, server): - args.append(call_id) - version = api.get_cached_server_version(server) - # force ProtoGENI aggregates to give us a v2 RSpec - if 'sfa' not in version.keys(): - my_opts['rspec_version'] = version_manager.get_version('ProtoGENI 2').to_dict() - rspec = server.ListResources(*args) - return {"aggregate": aggregate, "rspec": rspec, "elapsed": time.time()-tStart, "status": "success"} - except Exception, e: - api.logger.log_exc("ListResources failed at %s" %(server.url)) - return {"aggregate": aggregate, "elapsed": time.time()-tStart, "status": "exception"} - - if Callids().already_handled(call_id): return "" - - # get slice's hrn from options - xrn = options.get('geni_slice_urn', '') - (hrn, type) = urn_to_hrn(xrn) - if 'geni_compressed' in options: - del(options['geni_compressed']) - - # get the rspec's return format from options - rspec_version = version_manager.get_version(options.get('rspec_version')) - version_string = "rspec_%s" % (rspec_version.to_string()) - - # look in cache first - if caching and api.cache and not xrn: - rspec = api.cache.get(version_string) - if rspec: - return rspec - - # get the callers hrn - valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0] - caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() - - # attempt to use delegated credential first - cred = api.getDelegatedCredential(creds) - if not cred: - cred = api.getCredential() - threads = ThreadManager() - for aggregate in api.aggregates: - # prevent infinite loop. Dont send request back to caller - # unless the caller is the aggregate's SM - if caller_hrn == aggregate and aggregate != api.hrn: - continue - - # get the rspec from the aggregate - interface = api.aggregates[aggregate] - server = api.get_server(interface, cred) - threads.run(_ListResources, aggregate, server, [cred], options, call_id) - - - results = threads.get_results() - rspec_version = version_manager.get_version(options.get('rspec_version')) - if xrn: - result_version = version_manager._get_version(rspec_version.type, rspec_version.version, 'manifest') - else: - result_version = version_manager._get_version(rspec_version.type, rspec_version.version, 'ad') - rspec = RSpec(version=result_version) - for result in results: - add_slicemgr_stat(rspec, "ListResources", result["aggregate"], result["elapsed"], result["status"]) - if result["status"]=="success": - try: - rspec.version.merge(result["rspec"]) - except: - api.logger.log_exc("SM.ListResources: Failed to merge aggregate rspec") - - # cache the result - if caching and api.cache and not xrn: - api.cache.add(version_string, rspec.toxml()) - - print >>sys.stderr, "\r\n slice_manager \r\n" , rspec - return rspec.toxml() - -#def ListResources(api, creds, options, call_id): - - #if Callids().already_handled(call_id): return "" - - ## get slice's hrn from options - #xrn = options.get('geni_slice_urn', '') - #(hrn, type) = urn_to_hrn(xrn) - #print >>sys.stderr, " SM_ListResources xrn " , xrn - ##print >>sys.stderr, " SM ListResources api.__dict__ " , api.__dict__.keys() - ##print >>sys.stderr, " SM ListResources dir(api)" , dir(api) - #print >>sys.stderr, " \r\n avant RspecVersion \r\n \r\n" - ## get the rspec's return format from options - #rspec_version = RSpecVersion(options.get('rspec_version')) - #print >>sys.stderr, " \r\n \r\n ListResources RSpecVersion ", rspec_version - #version_string = "rspec_%s" % (rspec_version.get_version_name()) - - ##panos adding the info option to the caching key (can be improved) - #if options.get('info'): - #version_string = version_string + "_"+options.get('info') - - #print>>sys.stderr,"version string = ",version_string - - ## look in cache first - #if caching and api.cache and not xrn: - #print>>sys.stderr," \r\n caching %s and api.cache %s and not xrn %s"%(caching , api.cache,xrn) - #rspec = api.cache.get(version_string) - #if rspec: - #return rspec - - ## get the callers hrn - #print >>sys.stderr, " SM ListResources get the callers hrn " - #valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0] - #caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() - #print >>sys.stderr, " \r\n SM ListResources get the callers caller_hrn hrn %s "%(caller_hrn) - ## attempt to use delegated credential first - #credential = api.getDelegatedCredential(creds) - #print >>sys.stderr, " \r\n SM ListResources get the callers credential %s "%(credential) - #if not credential: - #credential = api.getCredential() - #threads = ThreadManager() - #print >>sys.stderr, " \r\n SM ListResources get the callers api.aggregates %s "%(api.aggregates) - #for aggregate in api.aggregates: - ## prevent infinite loop. Dont send request back to caller - ## unless the caller is the aggregate's SM - #if caller_hrn == aggregate and aggregate != api.hrn: - #continue - ## get the rspec from the aggregate - #server = api.aggregates[aggregate] - #print >>sys.stderr, " Slice Mgr ListResources, server" ,server - #my_opts = copy(options) - #my_opts['geni_compressed'] = False - #threads.run(server.ListResources, credential, my_opts, call_id) - #print >>sys.stderr, "\r\n !!!!!!!!!!!!!!!! \r\n" - #results = threads.get_results() - ##results.append(open('/root/protogeni.rspec', 'r').read()) - #rspec_version = RSpecVersion(my_opts.get('rspec_version')) - #if rspec_version['type'].lower() == 'protogeni': - #rspec = PGRSpec() - #else: - #rspec = SfaRSpec() - - #for result in results: - #print >>sys.stderr, "\r\n slice_manager result" , result - #try: - #print >>sys.stderr, "avant merge" , rspec - #rspec.merge(result) - #print >>sys.stderr, "AFTERMERGE" , rspec - #except: - #raise - #api.logger.info("SM.ListResources: Failed to merge aggregate rspec") - - ## cache the result - #if caching and api.cache and not xrn: - #api.cache.add(version_string, rspec.toxml()) - - #print >>sys.stderr, "\r\n slice_manager \r\n" , rspec - #return rspec.toxml() - -# first draft at a merging SliverStatus -def SliverStatus(api, slice_xrn, creds, call_id): - if Callids().already_handled(call_id): return {} - # attempt to use delegated credential first - credential = api.getDelegatedCredential(creds) - if not credential: - credential = api.getCredential() - threads = ThreadManager() - for aggregate in api.aggregates: - server = api.aggregates[aggregate] - threads.run (server.SliverStatus, slice_xrn, credential, call_id) - results = threads.get_results() - - # get rid of any void result - e.g. when call_id was hit where by convention we return {} - results = [ result for result in results if result and result['geni_resources']] - - # do not try to combine if there's no result - if not results : return {} - - # otherwise let's merge stuff - overall = {} - - # mmh, it is expected that all results carry the same urn - overall['geni_urn'] = results[0]['geni_urn'] - - # consolidate geni_status - simple model using max on a total order - states = [ 'ready', 'configuring', 'failed', 'unknown' ] - # hash name to index - shash = dict ( zip ( states, range(len(states)) ) ) - def combine_status (x,y): - return shash [ max (shash(x),shash(y)) ] - overall['geni_status'] = reduce (combine_status, [ result['geni_status'] for result in results], 'ready' ) - - # {'ready':0,'configuring':1,'failed':2,'unknown':3} - # append all geni_resources - overall['geni_resources'] = \ - reduce (lambda x,y: x+y, [ result['geni_resources'] for result in results] , []) - - return overall - -def main(): - r = RSpec() - r.parseFile(sys.argv[1]) - rspec = r.toDict() - CreateSliver(None,'plc.princeton.tmacktestslice',rspec,'create-slice-tmacktestslice') - -if __name__ == "__main__": - main() - diff --git a/sfa/senslab/OARrestapi.py b/sfa/senslab/OARrestapi.py index 9e08418d..4d32d8d0 100644 --- a/sfa/senslab/OARrestapi.py +++ b/sfa/senslab/OARrestapi.py @@ -5,7 +5,7 @@ import json import datetime from time import gmtime, strftime from sfa.senslab.parsing import * -from sfa.senslab.SenslabImportUsers import * +#from sfa.senslab.SenslabImportUsers import * import urllib import urllib2 from sfa.util.config import Config diff --git a/sfa/senslab/SenslabImport.py b/sfa/senslab/SenslabImport.py deleted file mode 100644 index 716e484c..00000000 --- a/sfa/senslab/SenslabImport.py +++ /dev/null @@ -1,274 +0,0 @@ - -# -# The import tool assumes that the existing PLC hierarchy should all be part -# of "planetlab.us" (see the root_auth and level1_auth variables below). -# -# Public keys are extracted from the users' SSH keys automatically and used to -# create GIDs. This is relatively experimental as a custom tool had to be -# written to perform conversion from SSH to OpenSSL format. It only supports -# RSA keys at this time, not DSA keys. -## - -import getopt -import sys -import tempfile -from sfa.util.sfalogging import _SfaLogger -#from sfa.util.sfalogging import sfa_logger_goes_to_import,sfa_logger - -from sfa.util.record import * -from sfa.util.table import SfaTable -from sfa.util.xrn import get_authority, hrn_to_urn -from sfa.util.plxrn import email_to_hrn -from sfa.util.config import Config -from sfa.trust.certificate import convert_public_key, Keypair -from sfa.trust.trustedroots import * -from sfa.trust.hierarchy import * -from sfa.trust.gid import create_uuid - - - -def _un_unicode(str): - if isinstance(str, unicode): - return str.encode("ascii", "ignore") - else: - return str - -def _cleanup_string(str): - # pgsql has a fit with strings that have high ascii in them, so filter it - # out when generating the hrns. - tmp = "" - for c in str: - if ord(c) < 128: - tmp = tmp + c - str = tmp - - str = _un_unicode(str) - str = str.replace(" ", "_") - str = str.replace(".", "_") - str = str.replace("(", "_") - str = str.replace("'", "_") - str = str.replace(")", "_") - str = str.replace('"', "_") - return str - -class SenslabImport: - - def __init__(self): - self.logger = _SfaLogger(logfile='/var/log/sfa_import.log', loggername='importlog') - - #sfa_logger_goes_to_import() - #self.logger = sfa_logger() - self.AuthHierarchy = Hierarchy() - self.config = Config() - self.TrustedRoots = TrustedRoots(Config.get_trustedroots_dir(self.config)) - print>>sys.stderr, "\r\n ========= \t\t SenslabImport TrustedRoots\r\n" , self.TrustedRoots - self.plc_auth = self.config.get_plc_auth() - print>>sys.stderr, "\r\n ========= \t\t SenslabImport self.plc_auth %s \r\n" %(self.plc_auth ) - self.root_auth = self.config.SFA_REGISTRY_ROOT_AUTH - - def create_sm_client_record(self): - """ - Create a user record for the Slicemanager service. - """ - hrn = self.config.SFA_INTERFACE_HRN + '.slicemanager' - urn = hrn_to_urn(hrn, 'user') - if not self.AuthHierarchy.auth_exists(urn): - self.logger.info("Import: creating Slice Manager user") - self.AuthHierarchy.create_auth(urn) - - auth_info = self.AuthHierarchy.get_auth_info(hrn) - table = SfaTable() - sm_user_record = table.find({'type': 'user', 'hrn': hrn}) - if not sm_user_record: - record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="user", pointer=-1) - record['authority'] = get_authority(record['hrn']) - table.insert(record) - - def create_top_level_auth_records(self, hrn): - """ - Create top level records (includes root and sub authorities (local/remote) - """ - print>>sys.stderr, "\r\n =========SenslabImport create_top_level_auth_records\r\n" - urn = hrn_to_urn(hrn, 'authority') - # make sure parent exists - parent_hrn = get_authority(hrn) - if not parent_hrn: - parent_hrn = hrn - if not parent_hrn == hrn: - self.create_top_level_auth_records(parent_hrn) - - # create the authority if it doesnt already exist - if not self.AuthHierarchy.auth_exists(urn): - self.logger.info("Import: creating top level authorities") - self.AuthHierarchy.create_auth(urn) - - # create the db record if it doesnt already exist - auth_info = self.AuthHierarchy.get_auth_info(hrn) - table = SfaTable() - auth_record = table.find({'type': 'authority', 'hrn': hrn}) - - if not auth_record: - auth_record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="authority", pointer=-1) - auth_record['authority'] = get_authority(auth_record['hrn']) - self.logger.info("Import: inserting authority record for %s"%hrn) - table.insert(auth_record) - print>>sys.stderr, "\r\n ========= \t\t SenslabImport NO AUTH RECORD \r\n" ,auth_record['authority'] - - - def create_interface_records(self): - """ - Create a record for each SFA interface - """ - # just create certs for all sfa interfaces even if they - # arent enabled - interface_hrn = self.config.SFA_INTERFACE_HRN - interfaces = ['authority+sa', 'authority+am', 'authority+sm'] - table = SfaTable() - auth_info = self.AuthHierarchy.get_auth_info(interface_hrn) - pkey = auth_info.get_pkey_object() - for interface in interfaces: - interface_record = table.find({'type': interface, 'hrn': interface_hrn}) - if not interface_record: - self.logger.info("Import: interface %s %s " % (interface_hrn, interface)) - urn = hrn_to_urn(interface_hrn, interface) - gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey) - record = SfaRecord(hrn=interface_hrn, gid=gid, type=interface, pointer=-1) - record['authority'] = get_authority(interface_hrn) - print>>sys.stderr,"\r\n ==========create_interface_records", record['authority'] - table.insert(record) - - def import_person(self, parent_hrn, person, keys): - """ - Register a user record - """ - hrn = email_to_hrn(parent_hrn, person['email']) - - print >>sys.stderr , "\r\n_____00______SenslabImport : person", person - # ASN.1 will have problems with hrn's longer than 64 characters - if len(hrn) > 64: - hrn = hrn[:64] - print >>sys.stderr , "\r\n_____0______SenslabImport : parent_hrn", parent_hrn - self.logger.info("Import: person %s"%hrn) - key_ids = [] - # choper les cles ssh des users , sont ils dans OAR - if 'key_ids' in person and person['key_ids']: - key_ids = person["key_ids"] - # get the user's private key from the SSH keys they have uploaded - # to planetlab - - print >>sys.stderr , "\r\n_____1______SenslabImport : self.plc_auth %s \r\n \t keys %s key[0] %s" %(self.plc_auth,keys, keys[0]) - key = keys[0]['key'] - pkey = convert_public_key(key) - print >>sys.stderr , "\r\n_____2______SenslabImport : key %s pkey %s"% (key,pkey.as_pem()) - if not pkey: - pkey = Keypair(create=True) - else: - # the user has no keys - self.logger.warning("Import: person %s does not have a PL public key"%hrn) - # if a key is unavailable, then we still need to put something in the - # user's GID. So make one up. - pkey = Keypair(create=True) - print >>sys.stderr , "\r\n___ELSE________SenslabImport pkey : %s"%(pkey.key) - # create the gid - urn = hrn_to_urn(hrn, 'user') - print >>sys.stderr , "\r\n \t\t : urn ", urn - person_gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey) - table = SfaTable() - person_record = SfaRecord(hrn=hrn, gid=person_gid, type="user", pointer=person['person_id']) - person_record['authority'] = get_authority(person_record['hrn']) - existing_records = table.find({'hrn': hrn, 'type': 'user', 'pointer': person['person_id']}) - if not existing_records: - table.insert(person_record) - else: - self.logger.info("Import: %s exists, updating " % hrn) - existing_record = existing_records[0] - person_record['record_id'] = existing_record['record_id'] - table.update(person_record) - - def import_slice(self, parent_hrn, slice): - #slicename = slice['name'].split("_",1)[-1] - - slicename = _cleanup_string(slice['name']) - - if not slicename: - self.logger.error("Import: failed to parse slice name %s" %slice['name']) - return - - hrn = parent_hrn + "." + slicename - self.logger.info("Import: slice %s"%hrn) - - pkey = Keypair(create=True) - urn = hrn_to_urn(hrn, 'slice') - slice_gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey) - slice_record = SfaRecord(hrn=hrn, gid=slice_gid, type="slice", pointer=slice['slice_id']) - slice_record['authority'] = get_authority(slice_record['hrn']) - table = SfaTable() - existing_records = table.find({'hrn': hrn, 'type': 'slice', 'pointer': slice['slice_id']}) - if not existing_records: - table.insert(slice_record) - else: - self.logger.info("Import: %s exists, updating " % hrn) - existing_record = existing_records[0] - slice_record['record_id'] = existing_record['record_id'] - table.update(slice_record) - - def import_node(self, hrn, node): - self.logger.info("Import: node %s" % hrn) - # ASN.1 will have problems with hrn's longer than 64 characters - if len(hrn) > 64: - hrn = hrn[:64] - - table = SfaTable() - node_record = table.find({'type': 'node', 'hrn': hrn}) - pkey = Keypair(create=True) - urn = hrn_to_urn(hrn, 'node') - node_gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey) - node_record = SfaRecord(hrn=hrn, gid=node_gid, type="node", pointer=node['node_id']) - node_record['authority'] = get_authority(node_record['hrn']) - existing_records = table.find({'hrn': hrn, 'type': 'node', 'pointer': node['node_id']}) - if not existing_records: - table.insert(node_record) - else: - self.logger.info("Import: %s exists, updating " % hrn) - existing_record = existing_records[0] - node_record['record_id'] = existing_record['record_id'] - table.update(node_record) - - - def import_site(self, parent_hrn, site): - plc_auth = self.plc_auth - sitename = site['login_base'] - sitename = _cleanup_string(sitename) - hrn = parent_hrn + "." + sitename - - urn = hrn_to_urn(hrn, 'authority') - self.logger.info("Import: site %s"%hrn) - - # create the authority - if not self.AuthHierarchy.auth_exists(urn): - self.AuthHierarchy.create_auth(urn) - - auth_info = self.AuthHierarchy.get_auth_info(urn) - - table = SfaTable() - auth_record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="authority", pointer=site['site_id']) - auth_record['authority'] = get_authority(auth_record['hrn']) - existing_records = table.find({'hrn': hrn, 'type': 'authority', 'pointer': site['site_id']}) - if not existing_records: - table.insert(auth_record) - else: - self.logger.info("Import: %s exists, updating " % hrn) - existing_record = existing_records[0] - auth_record['record_id'] = existing_record['record_id'] - table.update(auth_record) - - return hrn - - - def delete_record(self, hrn, type): - # delete the record - table = SfaTable() - record_list = table.find({'type': type, 'hrn': hrn}) - for record in record_list: - self.logger.info("Import: removing record %s %s" % (type, hrn)) - table.remove(record) diff --git a/sfa/senslab/SenslabImportUsers.py b/sfa/senslab/SenslabImportUsers.py deleted file mode 100644 index 8109f343..00000000 --- a/sfa/senslab/SenslabImportUsers.py +++ /dev/null @@ -1,141 +0,0 @@ -#!/usr/bin/python - -# import modules used here -- sys is a very standard one -import sys -import httplib -import json -import datetime -import time -from sfa.senslab.parsing import * - - - - -class SenslabImportUsers: - - - def __init__(self): - self.person_list = [] - self.keys_list = [] - self.slices_list= [] - #self.resources_fulldict['keys'] = [] - #self.InitPersons() - #self.InitKeys() - #self.InitSlices() - - - #def InitSlices(self): - #slices_per_site = {} - #dflt_slice = { 'instantiation': None, 'description': "Senslab Slice Test", 'node_ids': [], 'url': "http://localhost.localdomain/", 'max_nodes': 256, 'site_id': 3,'peer_slice_id': None, 'slice_tag_ids': [], 'peer_id': None, 'hrn' :None} - #for person in self.person_list: - #if 'user' or 'pi' in person['roles']: - #def_slice = {} - ##print>>sys.stderr, "\r\n \rn \t\t _____-----------************def_slice person %s \r\n \rn " %(person['person_id']) - #def_slice['person_ids'] = [] - #def_slice['person_ids'].append(person['person_id']) - #def_slice['slice_id'] = person['person_id'] - #def_slice['creator_person_id'] = person['person_id'] - #extime = datetime.datetime.utcnow() - #def_slice['created'] = int(time.mktime(extime.timetuple())) - #extime = extime + datetime.timedelta(days=365) - #def_slice['expires'] = int(time.mktime(extime.timetuple())) - ##print>>sys.stderr, "\r\n \rn \t\t _____-----------************def_slice expires %s \r\n \r\n "%(def_slice['expires']) - #def_slice['name'] = person['email'].replace('@','_',1) - ##print>>sys.stderr, "\r\n \rn \t\t _____-----------************def_slice %s \r\n \r\n " %(def_slice['name']) - #def_slice.update(dflt_slice) - #self.slices_list.append(def_slice) - - ##print>>sys.stderr, "InitSlices SliceLIST", self.slices_list - - #def InitPersons(self): - #persons_per_site = {} - #person_id = 7 - #persons_per_site[person_id] = {'person_id': person_id,'site_ids': [3],'email': 'a_rioot@senslab.fr', 'key_ids':[1], 'roles': ['pi'], 'role_ids':[20],'first_name':'A','last_name':'rioot'} - #person_id = 8 - #persons_per_site[person_id] = {'person_id': person_id,'site_ids': [3],'email': 'lost@senslab.fr','key_ids':[1],'roles': ['pi'], 'role_ids':[20],'first_name':'L','last_name':'lost'} - #person_id = 9 - #persons_per_site[person_id] = {'person_id': person_id,'site_ids': [3],'email': 'user@senslab.fr','key_ids':[1],'roles': ['user'], 'role_ids':[1],'first_name':'U','last_name':'senslab'} - #for person_id in persons_per_site.keys(): - #person = persons_per_site[person_id] - #if person['person_id'] not in self.person_list: - #self.person_list.append(person) - ##print>>sys.stderr, "InitPersons PERSON DICLIST", self.person_list - - - #def InitKeys(self): - ##print>>sys.stderr, " InitKeys HEYYYYYYY\r\n" - - #self.keys_list = [{'peer_key_id': None, 'key_type': 'ssh', 'key' :"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEArcdW0X2la754SoFE+URbDsYP07AZJjrspMlvUc6u+4o6JpGRkqiv7XdkgOMIn6w3DF3cYCcA1Mc6XSG7gSD7eQx614cjlLmXzHpxSeidSs/LgZaAQpq9aQ0KhEiFxg0gp8TPeB5Z37YOPUumvcJr1ArwL/8tAOx3ClwgRhccr2HOe10YtZbMEboCarTlzNHiGolo7RYIJjGuG2RBSeAg6SMZrtnn0OdKBwp3iUlOfkS98eirVtWUp+G5+SZggip3fS3k5Oj7OPr1qauva8Rizt02Shz30DN9ikFNqV2KuPg54nC27/DQsQ6gtycARRVY91VvchmOk0HxFiW/9kS2GQ== root@FlabFedora2",'person_id': 7, 'key_id':1, 'peer_id':None }, - #{'peer_key_id': None, 'key_type': 'ssh', 'key' :"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEArcdW0X2la754SoFE+URbDsYP07AZJjrspMlvUc6u+4o6JpGRkqiv7XdkgOMIn6w3DF3cYCcA1Mc6XSG7gSD7eQx614cjlLmXzHpxSeidSs/LgZaAQpq9aQ0KhEiFxg0gp8TPeB5Z37YOPUumvcJr1ArwL/8tAOx3ClwgRhccr2HOe10YtZbMEboCarTlzNHiGolo7RYIJjGuG2RBSeAg6SMZrtnn0OdKBwp3iUlOfkS98eirVtWUp+G5+SZggip3fS3k5Oj7OPr1qauva8Rizt02Shz30DN9ikFNqV2KuPg54nC27/DQsQ6gtycARRVY91VvchmOk0HxFiW/9kS2GQ== root@FlabFedora2",'person_id': 8, 'key_id':1, 'peer_id':None }, - #{'peer_key_id': None, 'key_type': 'ssh', 'key' :"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEArcdW0X2la754SoFE+URbDsYP07AZJjrspMlvUc6u+4o6JpGRkqiv7XdkgOMIn6w3DF3cYCcA1Mc6XSG7gSD7eQx614cjlLmXzHpxSeidSs/LgZaAQpq9aQ0KhEiFxg0gp8TPeB5Z37YOPUumvcJr1ArwL/8tAOx3ClwgRhccr2HOe10YtZbMEboCarTlzNHiGolo7RYIJjGuG2RBSeAg6SMZrtnn0OdKBwp3iUlOfkS98eirVtWUp+G5+SZggip3fS3k5Oj7OPr1qauva8Rizt02Shz30DN9ikFNqV2KuPg54nC27/DQsQ6gtycARRVY91VvchmOk0HxFiW/9kS2GQ== root@FlabFedora2",'person_id': 9, 'key_id':1, 'peer_id':None }] - - - - - #def GetPersons(self, person_filter=None, return_fields=None): - ##print>>sys.stderr, " \r\n GetPersons person_filter %s return_fields %s list: %s" %(person_filter,return_fields, self.person_list) - #if not self.person_list : - #print>>sys.stderr, " \r\n ========>GetPersons NO PERSON LIST DAMMIT<========== \r\n" - - #if not (person_filter or return_fields): - #return self.person_list - - #return_person_list= [] - #return_person_list = parse_filter(self.person_list,person_filter ,'persons', return_fields) - #return return_person_list - - - def GetPIs(self,site_id): - return_person_list= [] - for person in self.person_list : - if site_id in person['site_ids'] and 'pi' in person['roles'] : - return_person_list.append(person['person_id']) - #print>>sys.stderr, " \r\n GetPIs return_person_list %s :" %(return_person_list) - return return_person_list - - - def GetKeys(self,key_filter=None, return_fields=None): - return_key_list= [] - print>>sys.stderr, " \r\n GetKeys" - - if not (key_filter or return_fields): - return self.keys_list - return_key_list = parse_filter(self.keys_list,key_filter ,'keys', return_fields) - return return_key_list - - #return_key_list= [] - #print>>sys.stderr, " \r\n GetKeys" - - #if not (key_filter or return_fields): - #return self.keys_list - - #elif key_filter or return_fields: - #for key in self.keys_list: - #tmp_key = {} - #if key_filter: - #for k_filter in key_filter: - #if key['key_id'] == k_filter : - #if return_fields: - #for field in return_fields: - #if field in key.keys(): - #tmp_key[field] = key[field] - #else: - #tmp_key = key - - #print>>sys.stderr, " \r\n tmp_key",tmp_key - #return_key_list.append(tmp_key) - #print>>sys.stderr," \r\n End GetKeys with filter ", return_key_list - #return return_key_list - - - - - def AddSlice(self, slice_fields): - print>>sys.stderr, " \r\n \r\nAddSlice " - - - def AddPersonToSlice(self,person_id_or_email, slice_id_or_name): - print>>sys.stderr, " \r\n \r\n AddPersonToSlice" - - def DeletePersonFromSlice(self,person_id_or_email, slice_id_or_name): - print>>sys.stderr, " \r\n \r\n DeletePersonFromSlice " diff --git a/sfa/senslab/slabdriver.py b/sfa/senslab/slabdriver.py index 5d7a1522..d7c68780 100644 --- a/sfa/senslab/slabdriver.py +++ b/sfa/senslab/slabdriver.py @@ -27,7 +27,7 @@ from sfa.util.plxrn import slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicenam ## thierry : please avoid wildcard imports :) from sfa.senslab.OARrestapi import OARrestapi from sfa.senslab.LDAPapi import LDAPapi -from sfa.senslab.SenslabImportUsers import SenslabImportUsers +#from sfa.senslab.SenslabImportUsers import SenslabImportUsers from sfa.senslab.parsing import parse_filter from sfa.senslab.slabpostgres import SlabDB from sfa.senslab.slabaggregate import SlabAggregate @@ -64,7 +64,7 @@ class SlabDriver(Driver): #self.oar = OARapi() self.oar = OARrestapi() self.ldap = LDAPapi() - self.users = SenslabImportUsers() + #self.users = SenslabImportUsers() self.time_format = "%Y-%m-%d %H:%M:%S" self.db = SlabDB() #self.logger=sfa_logger()