'sfa/managers',
'sfa/importer',
'sfa/plc',
+ 'sfa/senslab',
'sfa/rspecs',
'sfa/rspecs/elements',
'sfa/rspecs/elements/versions',
'keys': record['keys'],
'email': record['email'], # needed for MyPLC
'person_id': record['person_id'], # needed for MyPLC
- 'first_name': record['first_name'], # needed for MyPLC
- 'last_name': record['last_name'], # needed for MyPLC
+ #'first_name': record['first_name'], # needed for MyPLC
+ #'last_name': record['last_name'], # needed for MyPLC
'slice_record': slice_record, # needed for legacy refresh peer
'key_ids': record['key_ids'] # needed for legacy refresh peer
}
--- /dev/null
+from sfa.generic import Generic
+
+import sfa.server.sfaapi
+import sfa.senslab.slabdriver
+import sfa.managers.registry_manager_slab
+import sfa.managers.slice_manager_slab
+import sfa.managers.aggregate_manager_slab
+
+class slab (Generic):
+
+ # use the standard api class
+ def api_class (self):
+ return sfa.server.sfaapi.SfaApi
+
+ # the manager classes for the server-side services
+ def registry_manager_class (self) :
+ return sfa.managers.registry_manager_slab
+ def slicemgr_manager_class (self) :
+ return sfa.managers.slice_manager_slab
+ def aggregate_manager_class (self) :
+ return sfa.managers.aggregate_manager_slab
+
+ # driver class for server-side services, talk to the whole testbed
+ def driver_class (self):
+ return sfa.senslab.slabdriver.SlabDriver
+
+ # slab does not have a component manager yet
+ # manager class
+ def component_manager_class (self):
+ return None
+ # driver_class
+ def component_driver_class (self):
+ return None
+
+
--- /dev/null
+#!/usr/bin/python
+
+import datetime
+import time
+import traceback
+import sys
+import re
+from types import StringTypes
+
+from sfa.util.faults import *
+from sfa.util.xrn import get_authority, hrn_to_urn, urn_to_hrn, Xrn, urn_to_sliver_id
+from sfa.util.plxrn import slicename_to_hrn, hrn_to_pl_slicename, hostname_to_urn
+#from sfa.util.rspec import *
+#from sfa.util.specdict import *
+from sfa.util.record import SfaRecord
+from sfa.util.policy import Policy
+from sfa.util.record import *
+#from sfa.util.sfaticket import SfaTicket
+
+#from sfa.senslab.slices import *
+
+from sfa.trust.credential import Credential
+import sfa.plc.peers as peers
+from sfa.plc.network import *
+from sfa.senslab.OARrestapi import *
+#from sfa.senslab.api import SfaAPI
+#from sfa.plc.aggregate import Aggregate
+#from sfa.plc.slices import *
+from sfa.util.version import version_core
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec
+from sfa.util.sfatime import utcparse
+from sfa.util.callids import Callids
+from sfa.senslab.OARrspec import *
+#from sfa.plc.aggregate import Aggregate
+
+def GetVersion(api):
+ print>>sys.stderr, "\r\n AGGREGATE GET_VERSION "
+ #xrn=Xrn(api.hrn)
+ #supported_rspecs = [dict(pg_rspec_request_version), dict(sfa_rspec_version)]
+ #ad_rspec_versions = [dict(pg_rspec_ad_version), dict(sfa_rspec_version)]
+ version_manager = VersionManager()
+ ad_rspec_versions = []
+ request_rspec_versions = []
+ for rspec_version in version_manager.versions:
+ if rspec_version.content_type in ['*', 'ad']:
+ ad_rspec_versions.append(rspec_version.to_dict())
+ if rspec_version.content_type in ['*', 'request']:
+ request_rspec_versions.append(rspec_version.to_dict())
+ default_rspec_version = version_manager.get_version("sfa 1").to_dict()
+ xrn=Xrn(api.hrn)
+ version_more = {'interface':'aggregate',
+ 'testbed':'senslab',
+ 'hrn':xrn.get_hrn(),
+ 'request_rspec_versions': request_rspec_versions,
+ 'ad_rspec_versions': ad_rspec_versions,
+ 'default_ad_rspec': default_rspec_version
+ }
+ print>>sys.stderr, "\r\n AGGREGATE GET_VERSION : %s \r\n \r\n" %(version_core(version_more))
+ return version_core(version_more)
+
+def __get_registry_objects(slice_xrn, creds, users):
+ """
+
+ """
+ hrn, type = urn_to_hrn(slice_xrn)
+
+ hrn_auth = get_authority(hrn)
+
+ # Build up objects that an SFA registry would return if SFA
+ # could contact the slice's registry directly
+ reg_objects = None
+
+ if users:
+ # dont allow special characters in the site login base
+ #only_alphanumeric = re.compile('[^a-zA-Z0-9]+')
+ #login_base = only_alphanumeric.sub('', hrn_auth[:20]).lower()
+ slicename = hrn_to_pl_slicename(hrn)
+ login_base = slicename.split('_')[0]
+ reg_objects = {}
+ site = {}
+ site['site_id'] = 0
+ site['name'] = 'geni.%s' % login_base
+ site['enabled'] = True
+ site['max_slices'] = 100
+
+ # Note:
+ # Is it okay if this login base is the same as one already at this myplc site?
+ # Do we need uniqueness? Should use hrn_auth instead of just the leaf perhaps?
+ site['login_base'] = login_base
+ site['abbreviated_name'] = login_base
+ site['max_slivers'] = 1000
+ reg_objects['site'] = site
+
+ slice = {}
+
+ #extime = Credential(string=creds[0]).get_expiration()
+ ## If the expiration time is > 60 days from now, set the expiration time to 60 days from now
+ #if extime > datetime.datetime.utcnow() + datetime.timedelta(days=60):
+ extime = datetime.datetime.utcnow() + datetime.timedelta(days=60)
+ slice['expires'] = int(time.mktime(extime.timetuple()))
+ slice['hrn'] = hrn
+ slice['name'] = hrn_to_pl_slicename(hrn)
+ slice['url'] = hrn
+ slice['description'] = hrn
+ slice['pointer'] = 0
+ reg_objects['slice_record'] = slice
+
+ reg_objects['users'] = {}
+ for user in users:
+ user['key_ids'] = []
+ hrn, _ = urn_to_hrn(user['urn'])
+ user['email'] = hrn_to_pl_slicename(hrn) + "@geni.net"
+ user['first_name'] = hrn
+ user['last_name'] = hrn
+ reg_objects['users'][user['email']] = user
+
+ return reg_objects
+
+def __get_hostnames(nodes):
+ hostnames = []
+ for node in nodes:
+ hostnames.append(node.hostname)
+ return hostnames
+
+def SliverStatus(api, slice_xrn, creds, call_id):
+ if Callids().already_handled(call_id): return {}
+
+ (hrn, type) = urn_to_hrn(slice_xrn)
+ # find out where this slice is currently running
+ api.logger.info(hrn)
+ slicename = hrn_to_pl_slicename(hrn)
+
+ slices = api.plshell.GetSlices(api.plauth, [slicename], ['node_ids','person_ids','name','expires'])
+ if len(slices) == 0:
+ raise Exception("Slice %s not found (used %s as slicename internally)" % slice_xrn, slicename)
+ slice = slices[0]
+
+ # report about the local nodes only
+ nodes = api.plshell.GetNodes(api.plauth, {'node_id':slice['node_ids'],'peer_id':None},
+ ['hostname', 'site_id', 'boot_state', 'last_contact'])
+ site_ids = [node['site_id'] for node in nodes]
+ sites = api.plshell.GetSites(api.plauth, site_ids, ['site_id', 'login_base'])
+ sites_dict = dict ( [ (site['site_id'],site['login_base'] ) for site in sites ] )
+
+ result = {}
+ top_level_status = 'unknown'
+ if nodes:
+ top_level_status = 'ready'
+ result['geni_urn'] = Xrn(slice_xrn, 'slice').get_urn()
+ result['pl_login'] = slice['name']
+ result['pl_expires'] = datetime.datetime.fromtimestamp(slice['expires']).ctime()
+
+ resources = []
+ for node in nodes:
+ res = {}
+ res['pl_hostname'] = node['hostname']
+ res['pl_boot_state'] = node['boot_state']
+ res['pl_last_contact'] = node['last_contact']
+ if node['last_contact'] is not None:
+ res['pl_last_contact'] = datetime.datetime.fromtimestamp(node['last_contact']).ctime()
+ res['geni_urn'] = hostname_to_urn(api.hrn, sites_dict[node['site_id']], node['hostname'])
+ if node['boot_state'] == 'boot':
+ res['geni_status'] = 'ready'
+ else:
+ res['geni_status'] = 'failed'
+ top_level_staus = 'failed'
+
+ res['geni_error'] = ''
+
+ resources.append(res)
+
+ result['geni_status'] = top_level_status
+ result['geni_resources'] = resources
+ # XX remove me
+ #api.logger.info(result)
+ # XX remove me
+ return result
+
+def CreateSliver(api, slice_xrn, creds, rspec_string, users, call_id):
+ """
+ Create the sliver[s] (slice) at this aggregate.
+ Verify HRN and initialize the slice record in PLC if necessary.
+ """
+ print>>sys.stderr, " \r\n AGGREGATE CreateSliver-----------------> "
+ if Callids().already_handled(call_id): return ""
+
+ #reg_objects = __get_registry_objects(slice_xrn, creds, users)
+ #aggregate = Aggregate(api)
+ aggregate = OARrspec(api)
+ print>>sys.stderr, " \r\n AGGREGATE CreateSliver DAFUQ IS THIS ?-----------------> aggregate " , aggregate
+ slices = Slices(api)
+ print>>sys.stderr, " \r\n AGGREGATE CreateSliver DAFUQ IS THAT ?-----------------> Slices " , slices
+ (hrn, type) = urn_to_hrn(slice_xrn)
+
+ peer = slices.get_peer(hrn)
+ sfa_peer = slices.get_sfa_peer(hrn)
+ slice_record=None
+ if users:
+ slice_record = users[0].get('slice_record', {})
+
+ #registry = api.registries[api.hrn]
+ #credential = api.getCredential()
+ #(site_id, remote_site_id) = slices.verify_site(registry, credential, hrn,
+ #peer, sfa_peer, reg_objects)
+ # parse rspec
+ rspec = RSpec(rspec_string)
+ requested_attributes = rspec.version.get_slice_attributes()
+
+ # ensure site record exists
+ site = slices.verify_site(hrn, slice_record, peer, sfa_peer)
+ # ensure slice record exists
+ slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)
+ # ensure person records exists
+ persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)
+ # ensure slice attributes exists
+ slices.verify_slice_attributes(slice, requested_attributes)
+
+ # add/remove slice from nodes
+ requested_slivers = [str(host) for host in rspec.version.get_nodes_with_slivers()]
+ slices.verify_slice_nodes(slice, requested_slivers, peer)
+
+
+
+ #nodes = api.oar.GetNodes(slice['node_ids'], ['hostname'])
+ #current_slivers = [node['hostname'] for node in nodes]
+ #rspec = parse_rspec(rspec_string)
+ #requested_slivers = [str(host) for host in rspec.get_nodes_with_slivers()]
+ ## remove nodes not in rspec
+ #deleted_nodes = list(set(current_slivers).difference(requested_slivers))
+
+ ## add nodes from rspec
+ #added_nodes = list(set(requested_slivers).difference(current_slivers))
+
+ #try:
+ #if peer:
+ #api.plshell.UnBindObjectFromPeer(api.plauth, 'slice', slice['slice_id'], peer)
+
+ #api.plshell.AddSliceToNodes(api.plauth, slice['name'], added_nodes)
+ #api.plshell.DeleteSliceFromNodes(api.plauth, slice['name'], deleted_nodes)
+
+ ## TODO: update slice tags
+ ##network.updateSliceTags()
+
+ #finally:
+ #if peer:
+ #api.plshell.BindObjectToPeer(api.plauth, 'slice', slice.id, peer,
+ #slice.peer_id)
+ # hanlde MyPLC peer association.
+ # only used by plc and ple.
+ #slices.handle_peer(site, slice, persons, peer)
+
+ return aggregate.get_rspec(slice_xrn=slice_xrn, version=rspec.version)
+
+
+def RenewSliver(api, xrn, creds, expiration_time, call_id):
+ if Callids().already_handled(call_id): return True
+ (hrn, type) = urn_to_hrn(xrn)
+ slicename = hrn_to_pl_slicename(hrn)
+ slices = api.plshell.GetSlices(api.plauth, {'name': slicename}, ['slice_id'])
+ if not slices:
+ raise RecordNotFound(hrn)
+ slice = slices[0]
+ requested_time = utcparse(expiration_time)
+ record = {'expires': int(time.mktime(requested_time.timetuple()))}
+ try:
+ api.plshell.UpdateSlice(api.plauth, slice['slice_id'], record)
+ return True
+ except:
+ return False
+
+def start_slice(api, xrn, creds):
+ hrn, type = urn_to_hrn(xrn)
+ slicename = hrn_to_pl_slicename(hrn)
+ slices = api.plshell.GetSlices(api.plauth, {'name': slicename}, ['slice_id'])
+ if not slices:
+ raise RecordNotFound(hrn)
+ slice_id = slices[0]['slice_id']
+ slice_tags = api.plshell.GetSliceTags(api.plauth, {'slice_id': slice_id, 'tagname': 'enabled'}, ['slice_tag_id'])
+ # just remove the tag if it exists
+ if slice_tags:
+ api.plshell.DeleteSliceTag(api.plauth, slice_tags[0]['slice_tag_id'])
+
+ return 1
+
+def stop_slice(api, xrn, creds):
+ hrn, type = urn_to_hrn(xrn)
+ slicename = hrn_to_pl_slicename(hrn)
+ slices = api.plshell.GetSlices(api.plauth, {'name': slicename}, ['slice_id'])
+ if not slices:
+ raise RecordNotFound(hrn)
+ slice_id = slices[0]['slice_id']
+ slice_tags = api.plshell.GetSliceTags(api.plauth, {'slice_id': slice_id, 'tagname': 'enabled'})
+ if not slice_tags:
+ api.plshell.AddSliceTag(api.plauth, slice_id, 'enabled', '0')
+ elif slice_tags[0]['value'] != "0":
+ tag_id = attributes[0]['slice_tag_id']
+ api.plshell.UpdateSliceTag(api.plauth, tag_id, '0')
+ return 1
+
+def reset_slice(api, xrn):
+ # XX not implemented at this interface
+ return 1
+
+def DeleteSliver(api, xrn, creds, call_id):
+ if Callids().already_handled(call_id): return ""
+ (hrn, type) = urn_to_hrn(xrn)
+ slicename = hrn_to_pl_slicename(hrn)
+ slices = api.plshell.GetSlices(api.plauth, {'name': slicename})
+ if not slices:
+ return 1
+ slice = slices[0]
+
+ # determine if this is a peer slice
+ peer = peers.get_peer(api, hrn)
+ try:
+ if peer:
+ api.plshell.UnBindObjectFromPeer(api.plauth, 'slice', slice['slice_id'], peer)
+ api.plshell.DeleteSliceFromNodes(api.plauth, slicename, slice['node_ids'])
+ finally:
+ if peer:
+ api.plshell.BindObjectToPeer(api.plauth, 'slice', slice['slice_id'], peer, slice['peer_slice_id'])
+ return 1
+
+# xxx Thierry : caching at the aggregate level sounds wrong...
+#caching=True
+caching=False
+def ListSlices(api, creds, call_id):
+ if Callids().already_handled(call_id): return []
+ # look in cache first
+ if caching and api.cache:
+ slices = api.cache.get('slices')
+ if slices:
+ return slices
+
+ # get data from db
+ slices = api.plshell.GetSlices(api.plauth, {'peer_id': None}, ['name'])
+ slice_hrns = [slicename_to_hrn(api.hrn, slice['name']) for slice in slices]
+ slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
+
+ # cache the result
+ if caching and api.cache:
+ api.cache.add('slices', slice_urns)
+
+ return slice_urns
+
+ #ajouter caching cf pl manager
+def ListResources(api, creds, options,call_id):
+
+ print >>sys.stderr, 'RESOURCES AGGREGATE'
+ OARImporter = OARapi()
+
+ if Callids().already_handled(call_id): return ""
+ # get slice's hrn from options
+ xrn = options.get('geni_slice_urn', '')
+ (hrn, type) = urn_to_hrn(xrn)
+
+ version_manager = VersionManager()
+ # get the rspec's return format from options
+ rspec_version = version_manager.get_version(options.get('rspec_version'))
+ version_string = "rspec_%s" % (rspec_version.to_string())
+
+ #panos adding the info option to the caching key (can be improved)
+ if options.get('info'):
+ version_string = version_string + "_"+options.get('info', 'default')
+
+ print >>sys.stderr, "[aggregate] version string = %s "%(version_string)
+
+ # look in cache first
+ if caching and api.cache and not xrn:
+ rspec = api.cache.get(version_string)
+ if rspec:
+ api.logger.info("aggregate.ListResources: returning cached value for hrn %s"%hrn)
+ return rspec
+
+ #panos: passing user-defined options
+ #print "manager options = ",options
+ OAR_rspec = OARrspec(api,options)
+ #aggregate = Aggregate(api, options)
+ rspec = OAR_rspec.get_rspec(slice_xrn=xrn, version=rspec_version)
+
+ # cache the result
+ if caching and api.cache and not xrn:
+ api.cache.add(version_string, rspec)
+
+ #if rspec_version['type'].lower() == 'protogeni':
+ #spec = PGRSpec()
+ ##panos pass user options to SfaRSpec
+ #elif rspec_version['type'].lower() == 'sfa':
+ #rspec = SfaRSpec("",{},options)
+ #else:
+ #rspec = SfaRSpec("",{},options)
+
+
+ rspec = OAR_rspec.get_rspec(slice_xrn=xrn, version=rspec_version)
+ print >>sys.stderr, '\r\n OARImporter.GetNodes()', OARImporter.GetNodes()
+
+ print >>sys.stderr, ' \r\n **************RSPEC' , rspec
+
+
+ return rspec
+
+
+def get_ticket(api, xrn, creds, rspec, users):
+
+ reg_objects = __get_registry_objects(xrn, creds, users)
+
+ slice_hrn, type = urn_to_hrn(xrn)
+ slices = Slices(api)
+ peer = slices.get_peer(slice_hrn)
+ sfa_peer = slices.get_sfa_peer(slice_hrn)
+
+ # get the slice record
+ registry = api.registries[api.hrn]
+ credential = api.getCredential()
+ records = registry.Resolve(xrn, credential)
+
+ # similar to CreateSliver, we must verify that the required records exist
+ # at this aggregate before we can issue a ticket
+ site_id, remote_site_id = slices.verify_site(registry, credential, slice_hrn,
+ peer, sfa_peer, reg_objects)
+ slice = slices.verify_slice(registry, credential, slice_hrn, site_id,
+ remote_site_id, peer, sfa_peer, reg_objects)
+
+ # make sure we get a local slice record
+ record = None
+ for tmp_record in records:
+ if tmp_record['type'] == 'slice' and \
+ not tmp_record['peer_authority']:
+ record = SliceRecord(dict=tmp_record)
+ if not record:
+ raise RecordNotFound(slice_hrn)
+
+ # get sliver info
+ slivers = Slices(api).get_slivers(slice_hrn)
+ if not slivers:
+ raise SliverDoesNotExist(slice_hrn)
+
+ # get initscripts
+ initscripts = []
+ data = {
+ 'timestamp': int(time.time()),
+ 'initscripts': initscripts,
+ 'slivers': slivers
+ }
+
+ # create the ticket
+ object_gid = record.get_gid_object()
+ new_ticket = SfaTicket(subject = object_gid.get_subject())
+ new_ticket.set_gid_caller(api.auth.client_gid)
+ new_ticket.set_gid_object(object_gid)
+ new_ticket.set_issuer(key=api.key, subject=api.hrn)
+ new_ticket.set_pubkey(object_gid.get_pubkey())
+ new_ticket.set_attributes(data)
+ new_ticket.set_rspec(rspec)
+ #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
+ new_ticket.encode()
+ new_ticket.sign()
+
+ return new_ticket.save_to_string(save_parents=True)
+
+
+
+def main():
+ api = SfaAPI()
+ """
+ rspec = ListResources(api, "plc.princeton.sapan", None, 'pl_test_sapan')
+ #rspec = ListResources(api, "plc.princeton.coblitz", None, 'pl_test_coblitz')
+ #rspec = ListResources(api, "plc.pl.sirius", None, 'pl_test_sirius')
+ print rspec
+ """
+ f = open(sys.argv[1])
+ xml = f.read()
+ f.close()
+ CreateSliver(api, "plc.princeton.sapan", xml, 'CreateSliver_sapan')
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+import types
+import time
+import sys
+
+from sfa.util.faults import RecordNotFound, AccountNotEnabled, PermissionError, MissingAuthority, \
+ UnknownSfaType, ExistingRecord
+from sfa.util.prefixTree import prefixTree
+from sfa.util.record import SfaRecord
+from sfa.senslab.table_slab import SfaTable
+from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn, urn_to_hrn
+from sfa.util.version import version_core
+
+from sfa.trust.gid import GID
+from sfa.trust.credential import Credential
+from sfa.trust.certificate import Certificate, Keypair, convert_public_key
+from sfa.trust.gid import create_uuid
+
+# The GENI GetVersion call
+def GetVersion(api):
+ # Bugfix TP 09/11/2011
+ #peers =dict ([ (peername,v._ServerProxy__host) for (peername,v) in api.registries.iteritems()
+ peers =dict ([ (peername,v.get_url()) for (peername,v) in api.registries.iteritems()
+ if peername != api.hrn])
+ xrn=Xrn(api.hrn)
+ return version_core({'interface':'registry',
+ 'hrn':xrn.get_hrn(),
+ 'urn':xrn.get_urn(),
+ 'peers':peers})
+
+def get_credential(api, xrn, type, is_self=False):
+ # convert xrn to hrn
+ if type:
+ hrn = urn_to_hrn(xrn)[0]
+ else:
+ hrn, type = urn_to_hrn(xrn)
+
+ # Is this a root or sub authority
+ auth_hrn = api.auth.get_authority(hrn)
+ print>> sys.stderr , " \r\n REGISTRY get_credential auth_hrn:" , auth_hrn,"hrn : ", hrn, " Type : ", type, "is self : " , is_self,"<<"
+ if not auth_hrn or hrn == api.config.SFA_INTERFACE_HRN:
+ auth_hrn = hrn
+ # get record info
+ auth_info = api.auth.get_auth_info(auth_hrn)
+ table = SfaTable()
+ print >> sys.stderr , " findObject ", type, hrn
+ records = table.findObjects({'type': type, 'hrn': hrn})
+ print>> sys.stderr , " \r\n ++ REGISTRY get_credential hrn %s records %s " %(hrn, records)
+ if not records:
+ raise RecordNotFound(hrn)
+ record = records[0]
+
+ # verify_cancreate_credential requires that the member lists
+ # (researchers, pis, etc) be filled in
+ api.driver.fill_record_info(record, api.aggregates)
+ record['enabled'] = True
+ print>> sys.stderr , " \r\n ++ REGISTRY get_credential hrn %s record['enabled'] %s is_self %s" %(hrn, record['enabled'], is_self)
+ if record['type']=='user':
+ if not record['enabled']:
+ print>> sys.stderr , " \r\n ++ REGISTRY get_credential hrn %s ACCOUNT Not enabled"
+ raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record['email']))
+
+ # get the callers gid
+ # if this is a self cred the record's gid is the caller's gid
+ if is_self:
+ caller_hrn = hrn
+ caller_gid = record.get_gid_object()
+ print>>sys.stderr, " \r\n REGISTRY IS SELF OK caller_hrn %s--- \r\n caller_gid %s---------" %(caller_hrn,caller_gid)
+ else:
+ print>> sys.stderr , " \r\n ++ ELSE "
+ caller_gid = api.auth.client_cred.get_gid_caller()
+ print>> sys.stderr , " \r\n ++ ELSE caller_gid %s record %s" %(caller_gid, record)
+ caller_hrn = caller_gid.get_hrn()
+ print>> sys.stderr , " \r\n ++ ELSE caller_hrn %s " %(caller_hrn)
+
+ object_hrn = record.get_gid_object().get_hrn()
+ print>> sys.stderr , " \r\n ++ ELSE object_hrn %s " %(object_hrn)
+
+ rights = api.auth.determine_user_rights(caller_hrn, record)
+ print>> sys.stderr , " \r\n ++ After rights record: %s \r\n ====RIGHTS %s " %(record , rights)
+
+ # make sure caller has rights to this object
+ if rights.is_empty():
+ raise PermissionError(caller_hrn + " has no rights to " + record['name'])
+
+ object_gid = GID(string=record['gid'])
+ new_cred = Credential(subject = object_gid.get_subject())
+ new_cred.set_gid_caller(caller_gid)
+ new_cred.set_gid_object(object_gid)
+ new_cred.set_issuer_keys(auth_info.get_privkey_filename(), auth_info.get_gid_filename())
+ #new_cred.set_pubkey(object_gid.get_pubkey())
+ new_cred.set_privileges(rights)
+ new_cred.get_privileges().delegate_all_privileges(True)
+ if 'expires' in record:
+ new_cred.set_expiration(int(record['expires']))
+ auth_kind = "authority,ma,sa"
+ # Parent not necessary, verify with certs
+ #new_cred.set_parent(api.auth.hierarchy.get_auth_cred(auth_hrn, kind=auth_kind))
+ new_cred.encode()
+ new_cred.sign()
+
+ return new_cred.save_to_string(save_parents=True)
+
+
+def resolve(api, xrns, type=None, full=True):
+
+ # load all known registry names into a prefix tree and attempt to find
+ # the longest matching prefix
+ print >>sys.stderr , '\t\t REGISTRY MANAGER : resolve=========xrns ', xrns
+ if not isinstance(xrns, types.ListType):
+ if not type:
+ type = Xrn(xrns).get_type()
+ xrns = [xrns]
+ hrns = [urn_to_hrn(xrn)[0] for xrn in xrns]
+ print >>sys.stderr , '\t\t =========hrns ', hrns
+ # create a dict where key is a registry hrn and its value is a
+ # hrns at that registry (determined by the known prefix tree).
+ xrn_dict = {}
+ print >>sys.stderr, '\r\n REGISTRY MANAGER : resolve xrns ' , xrns #api.__dict__.keys()
+ registries = api.registries
+ tree = prefixTree()
+ registry_hrns = registries.keys()
+ print >>sys.stderr, '\r\n \t\t REGISTRY MANAGER registry_hrns' , registry_hrns
+ tree.load(registry_hrns)
+ for xrn in xrns:
+ registry_hrn = tree.best_match(urn_to_hrn(xrn)[0])
+ print >>sys.stderr, '\t\tREGISTRY MANAGER *****tree.best_match ', registry_hrn
+ if registry_hrn not in xrn_dict:
+ xrn_dict[registry_hrn] = []
+ xrn_dict[registry_hrn].append(xrn)
+ print >>sys.stderr, '\t\tREGISTRY MANAGER *****xrn_dict[registry_hrn] ',xrn_dict[registry_hrn]
+ records = []
+ for registry_hrn in xrn_dict:
+ # skip the hrn without a registry hrn
+ # XX should we let the user know the authority is unknown?
+ print >>sys.stderr, '\t\t registry_hrn in xrn_dict ', registry_hrn
+ if not registry_hrn:
+ continue
+
+ # if the best match (longest matching hrn) is not the local registry,
+ # forward the request
+ xrns = xrn_dict[registry_hrn]
+ if registry_hrn != api.hrn:
+ credential = api.getCredential()
+ interface = api.registries[registry_hrn]
+ server = api.server_proxy(interface, credential)
+ peer_records = server.Resolve(xrns, credential)
+ print >>sys.stderr , '\t\t peer_records ', peer_records
+ records.extend([SfaRecord(dict=record).as_dict() for record in peer_records])
+
+ print >>sys.stderr,'\t\t hrns ' , hrns
+ # try resolving the remaining unfound records at the local registry
+ remaining_hrns = set(hrns).difference([record['hrn'] for record in records])
+ # convert set to list
+ remaining_hrns = [hrn for hrn in remaining_hrns]
+ print >>sys.stderr, '\t\t remaining_hrns', remaining_hrns
+ table = SfaTable()
+ local_records = table.findObjects({'hrn': remaining_hrns})
+
+ print >>sys.stderr, '\t\t LOCAL REC !', local_records
+ for rec in local_records:
+ print >>sys.stderr, '\t\t resolve regmanager : rec ', rec
+
+ if full:
+ print >>sys.stderr, '\r\n \r\n REGISTRY:_FULL', api
+ api.driver.fill_record_info(local_records)
+
+ # convert local record objects to dicts
+ records.extend([dict(record) for record in local_records])
+ print >>sys.stderr, "\r\n \t\t records extends %s" %(records)
+ if not records:
+ raise RecordNotFound(str(hrns))
+
+ if type:
+ records = filter(lambda rec: rec['type'] in [type], records)
+
+ return records
+
+def list(api, xrn, origin_hrn=None):
+ hrn, type = urn_to_hrn(xrn)
+ # load all know registry names into a prefix tree and attempt to find
+ # the longest matching prefix
+ records = []
+ registries = api.registries
+ registry_hrns = registries.keys()
+ tree = prefixTree()
+ tree.load(registry_hrns)
+ registry_hrn = tree.best_match(hrn)
+
+ #if there was no match then this record belongs to an unknow registry
+ if not registry_hrn:
+ raise MissingAuthority(xrn)
+ # if the best match (longest matching hrn) is not the local registry,
+ # forward the request
+ records = []
+ if registry_hrn != api.hrn:
+ credential = api.getCredential()
+ print>>sys.stderr, "Registries : ", registries
+ interface = api.registries[registry_hrn]
+ server = api.server_proxy(interface, credential)
+ record_list = server.List(xrn, credential)
+ records = [SfaRecord(dict=record).as_dict() for record in record_list]
+
+ # if we still have not found the record yet, try the local registry
+ if not records:
+ if not api.auth.hierarchy.auth_exists(hrn):
+ raise MissingAuthority(hrn)
+
+ table = SfaTable()
+ records = table.find({'authority': hrn})
+
+ return records
+
+
+def register(api, record):
+
+ #hrn, type = record['hrn'], record['type']
+ hrn = str(record['hrn']).strip("['']")
+ type = str( record['type']).strip("['']")
+ urn = hrn_to_urn(hrn,type)
+ # validate the type
+ if type not in ['authority', 'slice', 'node', 'user']:
+ raise UnknownSfaType(type)
+
+ # check if record already exists
+ table = SfaTable()
+ existing_records = table.find({'type': type, 'hrn': hrn})
+ if existing_records:
+ raise ExistingRecord(hrn)
+
+ record = SfaRecord(dict = record)
+
+ print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAN.PY register SfaRecordrecord %s" %(record)
+ #record['authority'] = get_authority(record['hrn'])
+ record['authority'] = get_authority(hrn)
+
+ #type_of_rec = record['type']
+ #hrn = record['hrn']
+
+ #api.auth.verify_object_permission(hrn)
+ api.auth.verify_object_permission( record['hrn'])
+ auth_info = api.auth.get_auth_info(record['authority'])
+ pub_key = None
+ # make sure record has a gid
+ if 'gid' not in record:
+ uuid = create_uuid()
+ pkey = Keypair(create=True)
+ if 'key' in record and record['key']:
+ if isinstance(record['key'], types.ListType):
+ pub_key = record['key'][0]
+ else:
+ pub_key = record['key']
+ pkey = convert_public_key(pub_key)
+
+ gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
+ gid = gid_object.save_to_string(save_parents=True)
+ record['gid'] = gid
+ record.set_gid(gid)
+ print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAB.PY record['gid'] %s" %(record['gid'])
+ print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAB.PY register type %s"%(type)
+
+ if type in ["authority"]:
+ # update the tree
+ if not api.auth.hierarchy.auth_exists(hrn):
+ api.auth.hierarchy.create_auth(hrn_to_urn(hrn,'authority'))
+
+ # get the GID from the newly created authority
+ gid = auth_info.get_gid_object()
+ record.set_gid(gid.save_to_string(save_parents=True))
+
+ #pl_record = api.driver.sfa_fields_to_pl_fields(type, hrn, record)
+ print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAB.PY register : type in [authority ] sfa_fields_to_pl_fields FIELDS A CHANGER"
+
+ # thierry: ideally we'd like to be able to write api.driver.GetSites
+ # in which case the code would become mostly the same as for pl
+ sites = api.driver.GetSites([pl_record['login_base']])
+ if not sites:
+ # thierry
+ # Error (E0601, register): Using variable 'pl_record' before assignment
+ pointer = api.driver.AddSite( pl_record)
+ else:
+ pointer = sites[0]['site_id']
+
+ record.set_pointer(pointer)
+ record['pointer'] = pointer
+
+ elif (type == "slice"):
+ acceptable_fields=['url', 'instantiation', 'name', 'description']
+ pl_record = api.driver.sfa_fields_to_pl_fields(type, hrn, record)
+ print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAB.PY register slice pl_record %s"%(pl_record)
+ for key in pl_record.keys():
+ if key not in acceptable_fields:
+ pl_record.pop(key)
+ slices = api.driver.GetSlices([pl_record['name']])
+ if not slices:
+ pointer = api.driver.AddSlice(pl_record)
+ else:
+ pointer = slices[0]['slice_id']
+ record.set_pointer(pointer)
+ record['pointer'] = pointer
+
+ elif (type == "user"):
+ persons = api.driver.GetPersons([record['email']])
+ if not persons:
+ print>>sys.stderr, " \r\n \r\n ----------- registry_manager_slab register NO PERSON ADD TO LDAP?"
+
+ #if not persons:
+ #pointer = api.driver.AddPerson( dict(record))
+ #else:
+ #pointer = persons[0]['person_id']
+
+ if 'enabled' in record and record['enabled']:
+ api.driver.UpdatePerson(pointer, {'enabled': record['enabled']})
+ # add this persons to the site only if he is being added for the first
+ # time by sfa and doesont already exist in plc
+ if not persons or not persons[0]['site_ids']:
+ login_base = get_leaf(record['authority'])
+ api.driver.AddPersonToSite(pointer, login_base)
+
+ # What roles should this user have?
+ api.driver.AddRoleToPerson('user', pointer)
+ # Add the user's key
+ if pub_key:
+ api.driver.AddPersonKey(pointer, {'key_type' : 'ssh', 'key' : pub_key})
+
+ #elif (type == "node"):
+ #pl_record = api.driver.sfa_fields_to_pl_fields(type, hrn, record)
+ #login_base = hrn_to_pl_login_base(record['authority'])
+ #nodes = api.driver.GetNodes([pl_record['hostname']])
+ #if not nodes:
+ #pointer = api.driver.AddNode(login_base, pl_record)
+ #else:
+ #pointer = nodes[0]['node_id']
+
+ ##record['pointer'] = pointer
+ ##record.set_pointer(pointer)
+ #record_id = table.insert(record)
+ #record['record_id'] = record_id
+
+ # update membership for researchers, pis, owners, operators
+ api.driver.update_membership(None, record)
+
+ return record.get_gid_object().save_to_string(save_parents=True)
+
+def update(api, record_dict):
+ new_record = SfaRecord(dict = record_dict)
+ type = new_record['type']
+ hrn = new_record['hrn']
+ urn = hrn_to_urn(hrn,type)
+ api.auth.verify_object_permission(hrn)
+ table = SfaTable()
+ # make sure the record exists
+ records = table.findObjects({'type': type, 'hrn': hrn})
+ if not records:
+ raise RecordNotFound(hrn)
+ record = records[0]
+ record['last_updated'] = time.gmtime()
+
+ # Update_membership needs the membership lists in the existing record
+ # filled in, so it can see if members were added or removed
+ api.driver.fill_record_info(record)
+
+ # Use the pointer from the existing record, not the one that the user
+ # gave us. This prevents the user from inserting a forged pointer
+ pointer = record['pointer']
+ # update the PLC information that was specified with the record
+
+ if (type == "authority"):
+ api.driver.UpdateSite(pointer, new_record)
+
+ elif type == "slice":
+ pl_record=api.driver.sfa_fields_to_pl_fields(type, hrn, new_record)
+ if 'name' in pl_record:
+ pl_record.pop('name')
+ api.driver.UpdateSlice(pointer, pl_record)
+
+ elif type == "user":
+ # SMBAKER: UpdatePerson only allows a limited set of fields to be
+ # updated. Ideally we should have a more generic way of doing
+ # this. I copied the field names from UpdatePerson.py...
+ update_fields = {}
+ all_fields = new_record
+ for key in all_fields.keys():
+ if key in ['first_name', 'last_name', 'title', 'email',
+ 'password', 'phone', 'url', 'bio', 'accepted_aup',
+ 'enabled']:
+ update_fields[key] = all_fields[key]
+ api.driver.UpdatePerson(pointer, update_fields)
+
+ if 'key' in new_record and new_record['key']:
+ # must check this key against the previous one if it exists
+ persons = api.driver.GetPersons([pointer], ['key_ids'])
+ person = persons[0]
+ keys = person['key_ids']
+ keys = api.driver.GetKeys(person['key_ids'])
+ key_exists = False
+ if isinstance(new_record['key'], types.ListType):
+ new_key = new_record['key'][0]
+ else:
+ new_key = new_record['key']
+
+ # Delete all stale keys
+ for key in keys:
+ if new_record['key'] != key['key']:
+ api.driver.DeleteKey(key['key_id'])
+ else:
+ key_exists = True
+ if not key_exists:
+ api.driver.AddPersonKey(pointer, {'key_type': 'ssh', 'key': new_key})
+
+ # update the openssl key and gid
+ pkey = convert_public_key(new_key)
+ uuid = create_uuid()
+ gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
+ gid = gid_object.save_to_string(save_parents=True)
+ record['gid'] = gid
+ record = SfaRecord(dict=record)
+ table.update(record)
+
+ elif type == "node":
+ api.driver.UpdateNode(pointer, new_record)
+
+ else:
+ raise UnknownSfaType(type)
+
+ # update membership for researchers, pis, owners, operators
+ api.driver.update_membership(record, new_record)
+
+ return 1
+
+# expecting an Xrn instance
+def remove(api, xrn, origin_hrn=None):
+
+ table = SfaTable()
+ filter = {'hrn': xrn.get_hrn()}
+ hrn=xrn.get_hrn()
+ type=xrn.get_type()
+ if type and type not in ['all', '*']:
+ filter['type'] = type
+
+ records = table.find(filter)
+ if not records: raise RecordNotFound(hrn)
+ record = records[0]
+ type = record['type']
+
+ credential = api.getCredential()
+ registries = api.registries
+
+ # Try to remove the object from the PLCDB of federated agg.
+ # This is attempted before removing the object from the local agg's PLCDB and sfa table
+ if hrn.startswith(api.hrn) and type in ['user', 'slice', 'authority']:
+ for registry in registries:
+ if registry not in [api.hrn]:
+ try:
+ result=registries[registry].remove_peer_object(credential, record, origin_hrn)
+ except:
+ pass
+ if type == "user":
+ persons = api.driver.GetPersons(record['pointer'])
+ # only delete this person if he has site ids. if he doesnt, it probably means
+ # he was just removed from a site, not actually deleted
+ if persons and persons[0]['site_ids']:
+ api.driver.DeletePerson(record['pointer'])
+ elif type == "slice":
+ if api.driver.GetSlices(record['pointer']):
+ api.driver.DeleteSlice(record['pointer'])
+ elif type == "node":
+ if api.driver.GetNodes(record['pointer']):
+ api.driver.DeleteNode(record['pointer'])
+ elif type == "authority":
+ if api.driver.GetSites(record['pointer']):
+ api.driver.DeleteSite(record['pointer'])
+ else:
+ raise UnknownSfaType(type)
+
+ table.remove(record)
+
+ return 1
+
+def remove_peer_object(api, record, origin_hrn=None):
+ pass
+
+def register_peer_object(api, record, origin_hrn=None):
+ pass
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="RSpec"/>
+ </start>
+ <define name="RSpec">
+ <element name="RSpec">
+ <attribute name="type">
+ <data type="NMTOKEN"/>
+ </attribute>
+ <choice>
+ <ref name="network"/>
+ <ref name="request"/>
+ </choice>
+ </element>
+ </define>
+ <define name="network">
+ <element name="network">
+ <attribute name="name">
+ <data type="NMTOKEN"/>
+ </attribute>
+ <optional>
+ <attribute name="slice">
+ <data type="NMTOKEN"/>
+ </attribute>
+ </optional>
+ <optional>
+ <ref name="sliver_defaults"/>
+ </optional>
+ <oneOrMore>
+ <ref name="site"/>
+ </oneOrMore>
+ </element>
+ </define>
+ <define name="sliver_defaults">
+ <element name="sliver_defaults">
+ <ref name="sliver_elements"/>
+ </element>
+ </define>
+ <define name="site">
+ <element name="site">
+ <attribute name="id">
+ <data type="ID"/>
+ </attribute>
+ <element name="name">
+ <text/>
+ </element>
+ <zeroOrMore>
+ <ref name="node"/>
+ </zeroOrMore>
+ </element>
+ </define>
+ <define name="node">
+ <element name="node">
+ <attribute name="node_id">
+ <data type="ID"/>
+ </attribute>
+ <element name="hostname">
+ <text/>
+ </element>
+ <attribute name="reservable">
+ <data type="boolean"/>
+ </attribute>
+ <element name="ip_address">
+ <text/>
+ </element>
+ <optional>
+ <element name="urn">
+ <text/>
+ </element>
+ </optional>
+ <optional>
+ <ref name="leases"/>
+ </optional>
+ <optional>
+ <ref name="sliver"/>
+ </optional>
+ </element>
+ </define>
+ <define name="request">
+ <element name="request">
+ <attribute name="name">
+ <data type="NMTOKEN"/>
+ </attribute>
+ <optional>
+ <ref name="sliver_defaults"/>
+ </optional>
+ <oneOrMore>
+ <ref name="sliver"/>
+ </oneOrMore>
+ </element>
+ </define>
+ <define name="sliver">
+ <element name="sliver">
+ <optional>
+ <attribute name="nodeid">
+ <data type="ID"/>
+ </attribute>
+ </optional>
+ <ref name="sliver_elements"/>
+ </element>
+ </define>
+ <define name="sliver_elements">
+ <interleave>
+ <optional>
+ <element name="capabilities">
+ <text/>
+ </element>
+ </optional>
+ <optional>
+ <element name="delegations">
+ <text/>
+ </element>
+ </optional>
+ <optional>
+ <element name="program">
+ <text/>
+ </element>
+ </optional>
+ </interleave>
+ </define>
+ <define name="leases">
+ <element name="leases">
+ <zeroOrMore>
+ <group>
+ <attribute name="slot"/>
+ <data type="dateTime"/>
+ </attribute>
+ <attribute name="slice">
+ <data type="NMTOKEN"/>
+ </attribute>
+ </group>
+ </zeroOrMore>
+</grammar>
--- /dev/null
+#
+import sys
+import time,datetime
+from StringIO import StringIO
+from types import StringTypes
+from copy import deepcopy
+from copy import copy
+from lxml import etree
+
+from sfa.util.sfalogging import logger
+#from sfa.util.sfalogging import sfa_logger
+#from sfa.util.rspecHelper import merge_rspecs
+from sfa.util.xrn import Xrn, urn_to_hrn, hrn_to_urn
+from sfa.util.plxrn import hrn_to_pl_slicename
+#from sfa.util.rspec import *
+#from sfa.util.specdict import *
+from sfa.util.faults import *
+from sfa.util.record import SfaRecord
+#from sfa.rspecs.pg_rspec import PGRSpec
+#from sfa.rspecs.sfa_rspec import SfaRSpec
+from sfa.rspecs.rspec_converter import RSpecConverter
+#from sfa.rspecs.rspec_parser import parse_rspec
+#from sfa.rspecs.rspec_version import RSpecVersion
+#from sfa.rspecs.sfa_rspec import sfa_rspec_version
+#from sfa.rspecs.pg_rspec import pg_rspec_ad_version, pg_rspec_request_version
+from sfa.client.client_helper import sfa_to_pg_users_arg
+from sfa.rspecs.version_manager import VersionManager
+
+#from sfa.rspecs.rspec import RSpec
+from sfa.util.policy import Policy
+from sfa.util.prefixTree import prefixTree
+#from sfa.util.sfaticket import *
+from sfa.trust.credential import Credential
+#from sfa.util.threadmanager import ThreadManager
+#import sfa.util.xmlrpcprotocol as xmlrpcprotocol
+#import sfa.plc.peers as peers
+from sfa.util.version import version_core
+from sfa.util.callids import Callids
+#from sfa.senslab.api import *
+
+
+#api=SfaAPI(interface='slicemgr')
+
+def _call_id_supported(api, server):
+ """
+ Returns true if server support the optional call_id arg, false otherwise.
+ """
+ server_version = api.get_cached_server_version(server)
+
+ if 'sfa' in server_version:
+ code_tag = server_version['code_tag']
+ code_tag_parts = code_tag.split("-")
+
+ version_parts = code_tag_parts[0].split(".")
+ major, minor = version_parts[0:2]
+ rev = code_tag_parts[1]
+ if int(major) > 1:
+ if int(minor) > 0 or int(rev) > 20:
+ return True
+ return False
+
+# we have specialized xmlrpclib.ServerProxy to remember the input url
+# OTOH it's not clear if we're only dealing with XMLRPCServerProxy instances
+def get_serverproxy_url (server):
+ try:
+ return server.get_url()
+ except:
+ logger.warning("GetVersion, falling back to xmlrpclib.ServerProxy internals")
+ return server._ServerProxy__host + server._ServerProxy__handler
+
+def GetVersion(api):
+ # peers explicitly in aggregates.xml
+ peers =dict ([ (peername,get_serverproxy_url(v)) for (peername,v) in api.aggregates.iteritems()
+ if peername != api.hrn])
+ version_manager = VersionManager()
+ ad_rspec_versions = []
+ request_rspec_versions = []
+ for rspec_version in version_manager.versions:
+ if rspec_version.content_type in ['*', 'ad']:
+ ad_rspec_versions.append(rspec_version.to_dict())
+ if rspec_version.content_type in ['*', 'request']:
+ request_rspec_versions.append(rspec_version.to_dict())
+ default_rspec_version = version_manager.get_version("sfa 1").to_dict()
+ xrn=Xrn(api.hrn, 'authority+sa')
+ version_more = {'interface':'slicemgr',
+ 'hrn' : xrn.get_hrn(),
+ 'urn' : xrn.get_urn(),
+ 'peers': peers,
+ 'request_rspec_versions': request_rspec_versions,
+ 'ad_rspec_versions': ad_rspec_versions,
+ 'default_ad_rspec': default_rspec_version
+ }
+ sm_version=version_core(version_more)
+ # local aggregate if present needs to have localhost resolved
+ if api.hrn in api.aggregates:
+ local_am_url=get_serverproxy_url(api.aggregates[api.hrn])
+ sm_version['peers'][api.hrn]=local_am_url.replace('localhost',sm_version['hostname'])
+ return sm_version
+
+
+#def GetVersion(api):
+ ## peers explicitly in aggregates.xml
+ #peers =dict ([ (peername,get_serverproxy_url(v)) for (peername,v) in api.aggregates.iteritems()
+ #if peername != api.hrn])
+ #xrn=Xrn (api.hrn)
+ #request_rspec_versions = [dict(pg_rspec_request_version), dict(sfa_rspec_version)]
+ #ad_rspec_versions = [dict(pg_rspec_ad_version), dict(sfa_rspec_version)]
+ #version_more = {'interface':'slicemgr',
+ #'hrn' : xrn.get_hrn(),
+ #'urn' : xrn.get_urn(),
+ #'peers': peers,
+ #'request_rspec_versions': request_rspec_versions,
+ #'ad_rspec_versions': ad_rspec_versions,
+ #'default_ad_rspec': dict(sfa_rspec_version)
+ #}
+ #sm_version=version_core(version_more)
+ ## local aggregate if present needs to have localhost resolved
+ #if api.hrn in api.aggregates:
+ #local_am_url=get_serverproxy_url(api.aggregates[api.hrn])
+ #sm_version['peers'][api.hrn]=local_am_url.replace('localhost',sm_version['hostname'])
+ #return sm_version
+
+
+def drop_slicemgr_stats(api,rspec):
+ try:
+ stats_elements = rspec.xml.xpath('//statistics')
+ for node in stats_elements:
+ node.getparent().remove(node)
+ except Exception, e:
+ api.logger.warn("drop_slicemgr_stats failed: %s " % (str(e)))
+
+
+
+
+def CreateSliver(api, xrn, creds, rspec_str, users, call_id):
+
+ version_manager = VersionManager()
+ def _CreateSliver(aggregate, server, xrn, credential, rspec, users, call_id):
+
+ tStart = time.time()
+ try:
+ # Need to call GetVersion at an aggregate to determine the supported
+ # rspec type/format beofre calling CreateSliver at an Aggregate.
+ print>>sys.stderr, " \r\n SLICE MANAGERSLAB _CreateSliver server "
+ server_version = api.get_cached_server_version(server)
+ requested_users = users
+ if 'sfa' not in server_version and 'geni_api' in server_version:
+ # sfa aggregtes support both sfa and pg rspecs, no need to convert
+ # if aggregate supports sfa rspecs. otherwise convert to pg rspec
+ rspec = RSpec(RSpecConverter.to_pg_rspec(rspec, 'request'))
+ filter = {'component_manager_id': server_version['urn']}
+ rspec.filter(filter)
+ rspec = rspec.toxml()
+ requested_users = sfa_to_pg_users_arg(users)
+ args = [xrn, credential, rspec, requested_users]
+ if _call_id_supported(api, server):
+ args.append(call_id)
+ rspec = server.CreateSliver(*args)
+ return {"aggregate": aggregate, "rspec": rspec, "elapsed": time.time()-tStart, "status": "success"}
+ except:
+ logger.log_exc('Something wrong in _CreateSliver with URL %s'%server.url)
+ return {"aggregate": aggregate, "elapsed": time.time()-tStart, "status": "exception"}
+
+
+ if Callids().already_handled(call_id): return ""
+
+ # Validate the RSpec against PlanetLab's schema --disabled for now
+ # The schema used here needs to aggregate the PL and VINI schemas
+ # schema = "/var/www/html/schemas/pl.rng"
+ rspec = RSpec(rspec_str)
+ schema = None
+ if schema:
+ rspec.validate(schema)
+
+ print>>sys.stderr, " \r\n \r\n \t\t =======SLICE MANAGER _CreateSliver api %s" %(api)
+ # if there is a <statistics> section, the aggregates don't care about it,
+ # so delete it.
+ drop_slicemgr_stats(api,rspec)
+
+ # attempt to use delegated credential first
+ credential = api.getDelegatedCredential(creds)
+ if not credential:
+ credential = api.getCredential()
+
+ # get the callers hrn
+ hrn, type = urn_to_hrn(xrn)
+ valid_cred = api.auth.checkCredentials(creds, 'createsliver', hrn)[0]
+ caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+ threads = ThreadManager()
+ print>>sys.stderr, " \r\n \r\n \t\t =======SLICE MANAGER _CreateSliver api aggregates %s \t caller_hrn %s api.hrn %s" %(api.aggregates, caller_hrn, api.hrn)
+ for aggregate in api.aggregates:
+ # prevent infinite loop. Dont send request back to caller
+ # unless the caller is the aggregate's SM
+ if caller_hrn == aggregate and aggregate != api.hrn:
+ continue
+ interface = api.aggregates[aggregate]
+ print>>sys.stderr, " \r\n \r\n \t\t =======SLICE MANAGER _CreateSliver aggregate %s interface %s" %(api.aggregates[aggregate],interface)
+ server = api.get_server(interface, credential)
+ if server is None:
+ print>>sys.stderr, " \r\n \r\n \t\t =======SLICE MANAGER _CreateSliver NOSERVERS "
+ # Just send entire RSpec to each aggregate
+ #threads.run(_CreateSliver, aggregate, xrn, [credential], rspec.toxml(), users, call_id)
+ threads.run(_CreateSliver, aggregate, server, xrn, [credential], rspec.toxml(), users, call_id)
+ results = threads.get_results()
+ manifest_version = version_manager._get_version(rspec.version.type, rspec.version.version, 'manifest')
+ result_rspec = RSpec(version=manifest_version)
+ #rspec = SfaRSpec()
+ for result in results:
+ add_slicemgr_stat(result_rspec, "CreateSliver", result["aggregate"], result["elapsed"], result["status"])
+ if result["status"]=="success":
+ try:
+ result_rspec.version.merge(result["rspec"])
+ except:
+ api.logger.log_exc("SM.CreateSliver: Failed to merge aggregate rspec")
+ return result_rspec.toxml()
+ #rspec.merge(result)
+ #return rspec.toxml()
+
+def RenewSliver(api, xrn, creds, expiration_time, call_id):
+ if Callids().already_handled(call_id): return True
+
+ (hrn, type) = urn_to_hrn(xrn)
+ # get the callers hrn
+ valid_cred = api.auth.checkCredentials(creds, 'renewsliver', hrn)[0]
+ caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+
+ # attempt to use delegated credential first
+ credential = api.getDelegatedCredential(creds)
+ if not credential:
+ credential = api.getCredential()
+ threads = ThreadManager()
+ for aggregate in api.aggregates:
+ # prevent infinite loop. Dont send request back to caller
+ # unless the caller is the aggregate's SM
+ if caller_hrn == aggregate and aggregate != api.hrn:
+ continue
+
+ server = api.aggregates[aggregate]
+ threads.run(server.RenewSliver, xrn, [credential], expiration_time, call_id)
+ # 'and' the results
+ return reduce (lambda x,y: x and y, threads.get_results() , True)
+
+def get_ticket(api, xrn, creds, rspec, users):
+ slice_hrn, type = urn_to_hrn(xrn)
+ # get the netspecs contained within the clients rspec
+ aggregate_rspecs = {}
+ tree= etree.parse(StringIO(rspec))
+ elements = tree.findall('./network')
+ for element in elements:
+ aggregate_hrn = element.values()[0]
+ aggregate_rspecs[aggregate_hrn] = rspec
+
+ # get the callers hrn
+ valid_cred = api.auth.checkCredentials(creds, 'getticket', slice_hrn)[0]
+ caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+
+ # attempt to use delegated credential first
+ credential = api.getDelegatedCredential(creds)
+ if not credential:
+ credential = api.getCredential()
+ threads = ThreadManager()
+ for (aggregate, aggregate_rspec) in aggregate_rspecs.iteritems():
+ # prevent infinite loop. Dont send request back to caller
+ # unless the caller is the aggregate's SM
+ if caller_hrn == aggregate and aggregate != api.hrn:
+ continue
+ server = None
+ if aggregate in api.aggregates:
+ server = api.aggregates[aggregate]
+ else:
+ net_urn = hrn_to_urn(aggregate, 'authority')
+ # we may have a peer that knows about this aggregate
+ for agg in api.aggregates:
+ target_aggs = api.aggregates[agg].get_aggregates(credential, net_urn)
+ if not target_aggs or not 'hrn' in target_aggs[0]:
+ continue
+ # send the request to this address
+ url = target_aggs[0]['url']
+ server = xmlrpcprotocol.get_server(url, api.key_file, api.cert_file)
+ # aggregate found, no need to keep looping
+ break
+ if server is None:
+ continue
+ threads.run(server.ParseTicket, xrn, credential, aggregate_rspec, users)
+
+ results = threads.get_results()
+
+ # gather information from each ticket
+ rspecs = []
+ initscripts = []
+ slivers = []
+ object_gid = None
+ for result in results:
+ agg_ticket = SfaTicket(string=result)
+ attrs = agg_ticket.get_attributes()
+ if not object_gid:
+ object_gid = agg_ticket.get_gid_object()
+ rspecs.append(agg_ticket.get_rspec())
+ initscripts.extend(attrs.get('initscripts', []))
+ slivers.extend(attrs.get('slivers', []))
+
+ # merge info
+ attributes = {'initscripts': initscripts,
+ 'slivers': slivers}
+ merged_rspec = merge_rspecs(rspecs)
+
+ # create a new ticket
+ ticket = SfaTicket(subject = slice_hrn)
+ ticket.set_gid_caller(api.auth.client_gid)
+ ticket.set_issuer(key=api.key, subject=api.hrn)
+ ticket.set_gid_object(object_gid)
+ ticket.set_pubkey(object_gid.get_pubkey())
+ #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
+ ticket.set_attributes(attributes)
+ ticket.set_rspec(merged_rspec)
+ ticket.encode()
+ ticket.sign()
+ return ticket.save_to_string(save_parents=True)
+
+
+def DeleteSliver(api, xrn, creds, call_id):
+ if Callids().already_handled(call_id): return ""
+ (hrn, type) = urn_to_hrn(xrn)
+ # get the callers hrn
+ valid_cred = api.auth.checkCredentials(creds, 'deletesliver', hrn)[0]
+ caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+
+ # attempt to use delegated credential first
+ credential = api.getDelegatedCredential(creds)
+ if not credential:
+ credential = api.getCredential()
+ threads = ThreadManager()
+ for aggregate in api.aggregates:
+ # prevent infinite loop. Dont send request back to caller
+ # unless the caller is the aggregate's SM
+ if caller_hrn == aggregate and aggregate != api.hrn:
+ continue
+ server = api.aggregates[aggregate]
+ threads.run(server.DeleteSliver, xrn, credential, call_id)
+ threads.get_results()
+ return 1
+
+def start_slice(api, xrn, creds):
+ hrn, type = urn_to_hrn(xrn)
+
+ # get the callers hrn
+ valid_cred = api.auth.checkCredentials(creds, 'startslice', hrn)[0]
+ caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+
+ # attempt to use delegated credential first
+ credential = api.getDelegatedCredential(creds)
+ if not credential:
+ credential = api.getCredential()
+ threads = ThreadManager()
+ for aggregate in api.aggregates:
+ # prevent infinite loop. Dont send request back to caller
+ # unless the caller is the aggregate's SM
+ if caller_hrn == aggregate and aggregate != api.hrn:
+ continue
+ server = api.aggregates[aggregate]
+ threads.run(server.Start, xrn, credential)
+ threads.get_results()
+ return 1
+
+def stop_slice(api, xrn, creds):
+ hrn, type = urn_to_hrn(xrn)
+
+ # get the callers hrn
+ valid_cred = api.auth.checkCredentials(creds, 'stopslice', hrn)[0]
+ caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+
+ # attempt to use delegated credential first
+ credential = api.getDelegatedCredential(creds)
+ if not credential:
+ credential = api.getCredential()
+ threads = ThreadManager()
+ for aggregate in api.aggregates:
+ # prevent infinite loop. Dont send request back to caller
+ # unless the caller is the aggregate's SM
+ if caller_hrn == aggregate and aggregate != api.hrn:
+ continue
+ server = api.aggregates[aggregate]
+ threads.run(server.Stop, xrn, credential)
+ threads.get_results()
+ return 1
+
+def reset_slice(api, xrn):
+ """
+ Not implemented
+ """
+ return 1
+
+def shutdown(api, xrn, creds):
+ """
+ Not implemented
+ """
+ return 1
+
+def status(api, xrn, creds):
+ """
+ Not implemented
+ """
+ return 1
+
+# Thierry : caching at the slicemgr level makes sense to some extent
+#caching=True
+caching=False
+def ListSlices(api, creds, call_id):
+
+ if Callids().already_handled(call_id): return []
+
+ # look in cache first
+ if caching and api.cache:
+ slices = api.cache.get('slices')
+ if slices:
+ return slices
+
+ # get the callers hrn
+ valid_cred = api.auth.checkCredentials(creds, 'listslices', None)[0]
+ caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+
+ # attempt to use delegated credential first
+ credential = api.getDelegatedCredential(creds)
+ if not credential:
+ credential = api.getCredential()
+ threads = ThreadManager()
+ # fetch from aggregates
+ for aggregate in api.aggregates:
+ # prevent infinite loop. Dont send request back to caller
+ # unless the caller is the aggregate's SM
+ if caller_hrn == aggregate and aggregate != api.hrn:
+ continue
+ server = api.aggregates[aggregate]
+ threads.run(server.ListSlices, credential, call_id)
+
+ # combime results
+ results = threads.get_results()
+ slices = []
+ for result in results:
+ slices.extend(result)
+
+ # cache the result
+ if caching and api.cache:
+ api.cache.add('slices', slices)
+
+ return slices
+
+def add_slicemgr_stat(rspec, callname, aggname, elapsed, status):
+ try:
+ stats_tags = rspec.xml.xpath('//statistics[@call="%s"]' % callname)
+ if stats_tags:
+ stats_tag = stats_tags[0]
+ else:
+ stats_tag = etree.SubElement(rspec.xml.root, "statistics", call=callname)
+
+ etree.SubElement(stats_tag, "aggregate", name=str(aggname), elapsed=str(elapsed), status=str(status))
+ except Exception, e:
+ api.logger.warn("add_slicemgr_stat failed on %s: %s" %(aggname, str(e)))
+
+
+
+
+def ListResources(api, creds, options, call_id):
+ version_manager = VersionManager()
+ def _ListResources(aggregate, server, credential, opts, call_id):
+
+ my_opts = copy(opts)
+ args = [credential, my_opts]
+ tStart = time.time()
+ try:
+ if _call_id_supported(api, server):
+ args.append(call_id)
+ version = api.get_cached_server_version(server)
+ # force ProtoGENI aggregates to give us a v2 RSpec
+ if 'sfa' not in version.keys():
+ my_opts['rspec_version'] = version_manager.get_version('ProtoGENI 2').to_dict()
+ rspec = server.ListResources(*args)
+ return {"aggregate": aggregate, "rspec": rspec, "elapsed": time.time()-tStart, "status": "success"}
+ except Exception, e:
+ api.logger.log_exc("ListResources failed at %s" %(server.url))
+ return {"aggregate": aggregate, "elapsed": time.time()-tStart, "status": "exception"}
+
+ if Callids().already_handled(call_id): return ""
+
+ # get slice's hrn from options
+ xrn = options.get('geni_slice_urn', '')
+ (hrn, type) = urn_to_hrn(xrn)
+ if 'geni_compressed' in options:
+ del(options['geni_compressed'])
+
+ # get the rspec's return format from options
+ rspec_version = version_manager.get_version(options.get('rspec_version'))
+ version_string = "rspec_%s" % (rspec_version.to_string())
+
+ # look in cache first
+ if caching and api.cache and not xrn:
+ rspec = api.cache.get(version_string)
+ if rspec:
+ return rspec
+
+ # get the callers hrn
+ valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0]
+ caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+
+ # attempt to use delegated credential first
+ cred = api.getDelegatedCredential(creds)
+ if not cred:
+ cred = api.getCredential()
+ threads = ThreadManager()
+ for aggregate in api.aggregates:
+ # prevent infinite loop. Dont send request back to caller
+ # unless the caller is the aggregate's SM
+ if caller_hrn == aggregate and aggregate != api.hrn:
+ continue
+
+ # get the rspec from the aggregate
+ interface = api.aggregates[aggregate]
+ server = api.get_server(interface, cred)
+ threads.run(_ListResources, aggregate, server, [cred], options, call_id)
+
+
+ results = threads.get_results()
+ rspec_version = version_manager.get_version(options.get('rspec_version'))
+ if xrn:
+ result_version = version_manager._get_version(rspec_version.type, rspec_version.version, 'manifest')
+ else:
+ result_version = version_manager._get_version(rspec_version.type, rspec_version.version, 'ad')
+ rspec = RSpec(version=result_version)
+ for result in results:
+ add_slicemgr_stat(rspec, "ListResources", result["aggregate"], result["elapsed"], result["status"])
+ if result["status"]=="success":
+ try:
+ rspec.version.merge(result["rspec"])
+ except:
+ api.logger.log_exc("SM.ListResources: Failed to merge aggregate rspec")
+
+ # cache the result
+ if caching and api.cache and not xrn:
+ api.cache.add(version_string, rspec.toxml())
+
+ print >>sys.stderr, "\r\n slice_manager \r\n" , rspec
+ return rspec.toxml()
+
+#def ListResources(api, creds, options, call_id):
+
+ #if Callids().already_handled(call_id): return ""
+
+ ## get slice's hrn from options
+ #xrn = options.get('geni_slice_urn', '')
+ #(hrn, type) = urn_to_hrn(xrn)
+ #print >>sys.stderr, " SM_ListResources xrn " , xrn
+ ##print >>sys.stderr, " SM ListResources api.__dict__ " , api.__dict__.keys()
+ ##print >>sys.stderr, " SM ListResources dir(api)" , dir(api)
+ #print >>sys.stderr, " \r\n avant RspecVersion \r\n \r\n"
+ ## get the rspec's return format from options
+ #rspec_version = RSpecVersion(options.get('rspec_version'))
+ #print >>sys.stderr, " \r\n \r\n ListResources RSpecVersion ", rspec_version
+ #version_string = "rspec_%s" % (rspec_version.get_version_name())
+
+ ##panos adding the info option to the caching key (can be improved)
+ #if options.get('info'):
+ #version_string = version_string + "_"+options.get('info')
+
+ #print>>sys.stderr,"version string = ",version_string
+
+ ## look in cache first
+ #if caching and api.cache and not xrn:
+ #print>>sys.stderr," \r\n caching %s and api.cache %s and not xrn %s"%(caching , api.cache,xrn)
+ #rspec = api.cache.get(version_string)
+ #if rspec:
+ #return rspec
+
+ ## get the callers hrn
+ #print >>sys.stderr, " SM ListResources get the callers hrn "
+ #valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0]
+ #caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+ #print >>sys.stderr, " \r\n SM ListResources get the callers caller_hrn hrn %s "%(caller_hrn)
+ ## attempt to use delegated credential first
+ #credential = api.getDelegatedCredential(creds)
+ #print >>sys.stderr, " \r\n SM ListResources get the callers credential %s "%(credential)
+ #if not credential:
+ #credential = api.getCredential()
+ #threads = ThreadManager()
+ #print >>sys.stderr, " \r\n SM ListResources get the callers api.aggregates %s "%(api.aggregates)
+ #for aggregate in api.aggregates:
+ ## prevent infinite loop. Dont send request back to caller
+ ## unless the caller is the aggregate's SM
+ #if caller_hrn == aggregate and aggregate != api.hrn:
+ #continue
+ ## get the rspec from the aggregate
+ #server = api.aggregates[aggregate]
+ #print >>sys.stderr, " Slice Mgr ListResources, server" ,server
+ #my_opts = copy(options)
+ #my_opts['geni_compressed'] = False
+ #threads.run(server.ListResources, credential, my_opts, call_id)
+ #print >>sys.stderr, "\r\n !!!!!!!!!!!!!!!! \r\n"
+ #results = threads.get_results()
+ ##results.append(open('/root/protogeni.rspec', 'r').read())
+ #rspec_version = RSpecVersion(my_opts.get('rspec_version'))
+ #if rspec_version['type'].lower() == 'protogeni':
+ #rspec = PGRSpec()
+ #else:
+ #rspec = SfaRSpec()
+
+ #for result in results:
+ #print >>sys.stderr, "\r\n slice_manager result" , result
+ #try:
+ #print >>sys.stderr, "avant merge" , rspec
+ #rspec.merge(result)
+ #print >>sys.stderr, "AFTERMERGE" , rspec
+ #except:
+ #raise
+ #api.logger.info("SM.ListResources: Failed to merge aggregate rspec")
+
+ ## cache the result
+ #if caching and api.cache and not xrn:
+ #api.cache.add(version_string, rspec.toxml())
+
+ #print >>sys.stderr, "\r\n slice_manager \r\n" , rspec
+ #return rspec.toxml()
+
+# first draft at a merging SliverStatus
+def SliverStatus(api, slice_xrn, creds, call_id):
+ if Callids().already_handled(call_id): return {}
+ # attempt to use delegated credential first
+ credential = api.getDelegatedCredential(creds)
+ if not credential:
+ credential = api.getCredential()
+ threads = ThreadManager()
+ for aggregate in api.aggregates:
+ server = api.aggregates[aggregate]
+ threads.run (server.SliverStatus, slice_xrn, credential, call_id)
+ results = threads.get_results()
+
+ # get rid of any void result - e.g. when call_id was hit where by convention we return {}
+ results = [ result for result in results if result and result['geni_resources']]
+
+ # do not try to combine if there's no result
+ if not results : return {}
+
+ # otherwise let's merge stuff
+ overall = {}
+
+ # mmh, it is expected that all results carry the same urn
+ overall['geni_urn'] = results[0]['geni_urn']
+
+ # consolidate geni_status - simple model using max on a total order
+ states = [ 'ready', 'configuring', 'failed', 'unknown' ]
+ # hash name to index
+ shash = dict ( zip ( states, range(len(states)) ) )
+ def combine_status (x,y):
+ return shash [ max (shash(x),shash(y)) ]
+ overall['geni_status'] = reduce (combine_status, [ result['geni_status'] for result in results], 'ready' )
+
+ # {'ready':0,'configuring':1,'failed':2,'unknown':3}
+ # append all geni_resources
+ overall['geni_resources'] = \
+ reduce (lambda x,y: x+y, [ result['geni_resources'] for result in results] , [])
+
+ return overall
+
+def main():
+ r = RSpec()
+ r.parseFile(sys.argv[1])
+ rspec = r.toDict()
+ CreateSliver(None,'plc.princeton.tmacktestslice',rspec,'create-slice-tmacktestslice')
+
+if __name__ == "__main__":
+ main()
+
--- /dev/null
+from sfa.rspecs.sfa_rspec import sfa_rspec_version
+from sfa.rspecs.pg_rspec import pg_rspec_ad_version, pg_rspec_request_version
+
+ad_rspec_versions = [
+ pg_rspec_ad_version,
+ sfa_rspec_version
+ ]
+
+request_rspec_versions = ad_rspec_versions
+
+default_rspec_version = { 'type': 'SFA', 'version': '1' }
+
+supported_rspecs = {'ad_rspec_versions': ad_rspec_versions,
+ 'request_rspec_versions': request_rspec_versions,
+ 'default_ad_rspec': default_rspec_version}
+
--- /dev/null
+
+
+
+import ldap
+from sfa.util.config import *
+from sfa.trust.gid import *
+from sfa.trust.hierarchy import *
+from sfa.trust.auth import *
+from sfa.trust.certificate import *
+
+class LDAPapi :
+ def __init__(self, record_filter = None):
+ self.ldapserv=ldap.open("192.168.0.251")
+ self.senslabauth=Hierarchy()
+ config=Config()
+ self.authname=config.SFA_REGISTRY_ROOT_AUTH
+ authinfo=self.senslabauth.get_auth_info(self.authname)
+
+ self.auth=Auth()
+ gid=authinfo.get_gid_object()
+
+ def ldapFind(self, record_filter = None, columns=None):
+
+ results = []
+
+ if 'authority' in record_filter:
+ # ask for authority
+ if record_filter['authority']==self.authname:
+ # which is SFA_REGISTRY_ROOT_AUTH
+ # request all records which are under our authority, ie all ldap entries
+ ldapfilter="cn=*"
+ else:
+ #which is NOT SFA_REGISTRY_ROOT_AUTH
+ return []
+ else :
+ if not 'hrn' in record_filter:
+ print >>sys.stderr,"find : don't know how to handle filter ",record_filter
+ return []
+ else:
+ hrns=[]
+ h=record_filter['hrn']
+ if isinstance(h,list):
+ hrns=h
+ else :
+ hrns.append(h)
+
+ ldapfilter="(|"
+ for hrn in hrns:
+ splited_hrn=hrn.split(".")
+ if splited_hrn[0] != self.authname :
+ print >>sys.stderr,"i know nothing about",hrn, " my authname is ", self.authname, " not ", splited_hrn[0]
+ else :
+ login=splited_hrn[1]
+ ldapfilter+="(uid="
+ ldapfilter+=login
+ ldapfilter+=")"
+ ldapfilter+=")"
+
+
+ rindex=self.ldapserv.search("ou=people,dc=senslab,dc=info",ldap.SCOPE_SUBTREE,ldapfilter, ['mail','givenName', 'sn', 'uid','sshPublicKey'])
+ ldapresponse=self.ldapserv.result(rindex,1)
+ for ldapentry in ldapresponse[1]:
+ hrn=self.authname+"."+ldapentry[1]['uid'][0]
+ uuid=create_uuid()
+
+ RSA_KEY_STRING=ldapentry[1]['sshPublicKey'][0]
+
+ pkey=convert_public_key(RSA_KEY_STRING)
+
+ gid=self.senslabauth.create_gid("urn:publicid:IDN+"+self.authname+"+user+"+ldapentry[1]['uid'][0], uuid, pkey, CA=False)
+
+ parent_hrn = get_authority(hrn)
+ parent_auth_info = self.senslabauth.get_auth_info(parent_hrn)
+
+ results.append( {
+ 'type': 'user',
+# 'email': ldapentry[1]['mail'][0],
+# 'first_name': ldapentry[1]['givenName'][0],
+# 'last_name': ldapentry[1]['sn'][0],
+# 'phone': 'none',
+ 'gid': gid.save_to_string(),
+ 'serial': 'none',
+ 'authority': self.authname,
+ 'peer_authority': '',
+ 'pointer' : '',
+ 'hrn': hrn,
+ 'date_created' : 'none',
+ 'last_updated': 'none'
+ } )
+ return results
--- /dev/null
+# import modules used here -- sys is a very standard one
+import sys
+import httplib
+import json
+from sfa.senslab.parsing import *
+from sfa.senslab.SenslabImportUsers import *
+
+#OARIP='10.127.255.254'
+OARIP='192.168.0.109'
+
+
+OARrequests_list = ["GET_version", "GET_timezone", "GET_jobs", "GET_jobs_table", "GET_jobs_details",
+"GET_resources_full", "GET_resources"]
+
+OARrequests_uri_list = ['/oarapi/version.json','/oarapi/timezone.json', '/oarapi/jobs.json',
+'/oarapi/jobs/details.json', '/oarapi/resources/full.json', '/oarapi/resources.json']
+
+OARrequests_get_uri_dict = { 'GET_version': '/oarapi/version.json',
+ 'GET_timezone':'/oarapi/timezone.json' ,
+ 'GET_jobs': '/oarapi/jobs.json',
+ 'GET_jobs_table': '/oarapi/jobs/table.json',
+ 'GET_jobs_details': '/oarapi/jobs/details.json',
+ 'GET_resources_full': '/oarapi/resources/full.json',
+ 'GET_resources':'/oarapi/resources.json',
+}
+
+POSTformat = { #'yaml': {'content':"text/yaml", 'object':yaml}
+'json' : {'content':"application/json",'object':json},
+#'http': {'content':"applicaton/x-www-form-urlencoded",'object': html},
+}
+
+class OARrestapi:
+ def __init__(self):
+ self.oarserver= {}
+ self.oarserver['ip'] = OARIP
+ self.oarserver['port'] = 80
+ self.oarserver['uri'] = None
+ self.oarserver['postformat'] = None
+
+ def GETRequestToOARRestAPI(self, request ):
+ self.oarserver['uri'] = OARrequests_get_uri_dict[request]
+ try :
+ conn = httplib.HTTPConnection(self.oarserver['ip'],self.oarserver['port'])
+ conn.request("GET",self.oarserver['uri'] )
+ resp = ( conn.getresponse()).read()
+ conn.close()
+ except:
+ raise ServerError("GET_OAR_SRVR : Could not reach OARserver")
+ try:
+ js = json.loads(resp)
+ return js
+
+ except ValueError:
+ raise ServerError("Failed to parse Server Response:" + js)
+
+
+
+ def POSTRequestToOARRestAPI(self, uri,format, data):
+ self.oarserver['uri'] = uri
+ if format in POSTformat:
+ try :
+ conn = httplib.HTTPConnection(self.oarserver['ip'],self.oarserver['port'])
+ conn.putrequest("POST",self.oarserver['uri'] )
+ self.oarserver['postformat'] = POSTformat[format]
+ conn.putheader('content-type', self.oarserver['postformat']['content'])
+ conn.putheader('content-length', str(len(data)))
+ conn.endheaders()
+ conn.send(data)
+ resp = ( conn.getresponse()).read()
+ conn.close()
+
+ except:
+ raise ServerError("POST_OAR_SRVR : error")
+
+ try:
+ answer = self.oarserver['postformat']['object'].loads(resp)
+ return answer
+
+ except ValueError:
+ raise ServerError("Failed to parse Server Response:" + answer)
+ else:
+ print>>sys.stderr, "\r\n POSTRequestToOARRestAPI : ERROR_POST_FORMAT"
+
+
+class OARGETParser:
+
+ #Insert a new node into the dictnode dictionary
+ def AddNodeId(self,dictnode,value):
+ #Inserts new key. The value associated is a tuple list.
+ node_id = int(value)
+ dictnode[node_id] = [('node_id',node_id) ]
+ return node_id
+
+ def AddNodeNetworkAddr(self,tuplelist,value):
+ #tuplelist.append(('hostname',str(value)))
+ tuplelist.append(('hostname',str(value)+'.demolab.fr'))
+ tuplelist.append(('site_id',3))
+
+ def AddNodeSite(self,tuplelist,value):
+ tuplelist.append(('site_login_base',str(value)))
+
+ def AddNodeRadio(self,tuplelist,value):
+ tuplelist.append(('radio',str(value)))
+
+
+ def AddMobility(self,tuplelist,value):
+ if value :
+ tuplelist.append(('mobile',int(value)))
+ return 0
+
+
+ def AddPosX(self,tuplelist,value):
+ tuplelist.append(('posx',value))
+
+
+ def AddPosY(self,tuplelist,value):
+ tuplelist.append(('posy',value))
+
+
+
+ def ParseVersion(self) :
+ print self.raw_json
+ #print >>sys.stderr, self.raw_json
+ if 'oar_version' in self.raw_json :
+ self.version_json_dict.update(api_version=self.raw_json['api_version'] ,
+ apilib_version=self.raw_json['apilib_version'],
+ api_timezone=self.raw_json['api_timezone'],
+ api_timestamp=self.raw_json['api_timestamp'],
+ oar_version=self.raw_json['oar_version'] )
+ else :
+ self.version_json_dict.update(api_version=self.raw_json['api'] ,
+ apilib_version=self.raw_json['apilib'],
+ api_timezone=self.raw_json['api_timezone'],
+ api_timestamp=self.raw_json['api_timestamp'],
+ oar_version=self.raw_json['oar'] )
+
+ print self.version_json_dict['apilib_version']
+
+ def ParseTimezone(self) :
+ print " ParseTimezone"
+
+ def ParseJobs(self) :
+ self.jobs_list = []
+ print " ParseJobs "
+
+ def ParseJobsTable(self) :
+ print "ParseJobsTable"
+
+ def ParseJobsDetails (self):
+ print "ParseJobsDetails"
+
+ def ParseResources(self) :
+ print>>sys.stderr, " \r\n \t\t\t ParseResources__________________________ "
+ #resources are listed inside the 'items' list from the json
+ self.raw_json = self.raw_json['items']
+ self.ParseNodes()
+ self.ParseSites()
+
+
+
+ def ParseResourcesFull(self ) :
+ print>>sys.stderr, " \r\n \t\t\t ParseResourcesFull_____________________________ "
+ #print self.raw_json[1]
+ #resources are listed inside the 'items' list from the json
+ if self.version_json_dict['apilib_version'] != "0.2.10" :
+ self.raw_json = self.raw_json['items']
+ self.ParseNodes()
+ self.ParseSites()
+
+
+ #Parse nodes properties from OAR
+ #Put them into a dictionary with key = node id and value is a dictionary
+ #of the node properties and properties'values.
+ def ParseNodes(self):
+ node_id = None
+ for dictline in self.raw_json:
+ for k in dictline.keys():
+ if k in self.resources_fulljson_dict:
+ # dictionary is empty and/or a new node has to be inserted
+ if node_id is None :
+ node_id = self.resources_fulljson_dict[k](self,self.node_dictlist, dictline[k])
+ else:
+ ret = self.resources_fulljson_dict[k](self,self.node_dictlist[node_id], dictline[k])
+ #If last property has been inserted in the property tuple list, reset node_id
+ if ret == 0:
+ #Turn the property tuple list (=dict value) into a dictionary
+ self.node_dictlist[node_id] = dict(self.node_dictlist[node_id])
+ node_id = None
+
+ else:
+ pass
+
+ #Retourne liste de dictionnaires contenant attributs des sites
+ def ParseSites(self):
+ nodes_per_site = {}
+ # Create a list of nodes per site_id
+ for node_id in self.node_dictlist.keys():
+ node = self.node_dictlist[node_id]
+ if node['site_id'] not in nodes_per_site.keys():
+ nodes_per_site[node['site_id']] = []
+ nodes_per_site[node['site_id']].append(node['node_id'])
+ else:
+ if node['node_id'] not in nodes_per_site[node['site_id']]:
+ nodes_per_site[node['site_id']].append(node['node_id'])
+ #Create a site dictionary with key is site_login_base (name of the site)
+ # and value is a dictionary of properties, including the list of the node_ids
+ for node_id in self.node_dictlist.keys():
+ node = self.node_dictlist[node_id]
+ if node['site_id'] not in self.site_dict.keys():
+ self.site_dict[node['site_login_base']] = [('site_id',node['site_id']),\
+ ('login_base', node['site_login_base']),\
+ ('node_ids',nodes_per_site[node['site_id']]),\
+ ('latitude',"48.83726"),\
+ ('longitude',"- 2.10336"),('name',"demolab"),\
+ ('pcu_ids', []), ('max_slices', None), ('ext_consortium_id', None),\
+ ('max_slivers', None), ('is_public', True), ('peer_site_id', None),\
+ ('abbreviated_name', "demolab"), ('address_ids', []),\
+ ('url', "http,//www.sdemolab.fr"), ('person_ids', []),\
+ ('site_tag_ids', []), ('enabled', True), ('slice_ids', []),\
+ ('date_created', None), ('peer_id', None),]
+ self.site_dict[node['site_login_base']] = dict(self.site_dict[node['site_login_base']])
+
+ #print>>sys.stderr, "\r\n \r\n =============\t\t ParseSites site dict %s \r\n"%(self.site_dict)
+
+
+ def GetNodesFromOARParse(self):
+ #print>>sys.stderr, " \r\n =========GetNodesFromOARParse: node_dictlist %s "%(self.node_dictlist)
+ return self.node_dictlist
+
+ def GetSitesFromOARParse(self):
+ return self.site_dict
+
+ def GetJobsFromOARParse(self):
+ return self.jobs_list
+
+ OARrequests_uri_dict = {
+ 'GET_version': {'uri':'/oarapi/version.json', 'parse_func': ParseVersion},
+ 'GET_timezone':{'uri':'/oarapi/timezone.json' ,'parse_func': ParseTimezone },
+ 'GET_jobs': {'uri':'/oarapi/jobs.json','parse_func': ParseJobs},
+ 'GET_jobs_table': {'uri':'/oarapi/jobs/table.json','parse_func': ParseJobsTable},
+ 'GET_jobs_details': {'uri':'/oarapi/jobs/details.json','parse_func': ParseJobsDetails},
+ 'GET_resources_full': {'uri':'/oarapi/resources/full.json','parse_func': ParseResourcesFull},
+ 'GET_resources':{'uri':'/oarapi/resources.json' ,'parse_func': ParseResources},
+ }
+ resources_fulljson_dict= {
+ 'resource_id' : AddNodeId,
+ 'network_address' : AddNodeNetworkAddr,
+ 'site': AddNodeSite,
+ 'radio': AddNodeRadio,
+ 'mobile': AddMobility,
+ 'posx': AddPosX,
+ 'posy': AddPosY,
+ }
+
+
+ def __init__(self, srv ):
+ self.version_json_dict= { 'api_version' : None , 'apilib_version' :None, 'api_timezone': None, 'api_timestamp': None, 'oar_version': None ,}
+ self.timezone_json_dict = { 'timezone': None, 'api_timestamp': None, }
+ self.jobs_json_dict = { 'total' : None, 'links' : [] , 'offset':None , 'items' : [] , }
+ self.jobs_table_json_dict = self.jobs_json_dict
+ self.jobs_details_json_dict = self.jobs_json_dict
+ self.server = srv
+ self.node_dictlist = {}
+ self.site_dict = {}
+ self.SendRequest("GET_version")
+
+ def SendRequest(self,request):
+ if request in OARrequests_get_uri_dict:
+ self.raw_json = self.server.GETRequestToOARRestAPI(request)
+ self.OARrequests_uri_dict[request]['parse_func'](self)
+ else:
+ print>>sys.stderr, "\r\n OARGetParse __init__ : ERROR_REQUEST " ,request
+
+class OARapi:
+
+ def __init__(self):
+ self.server = OARrestapi()
+ self.parser = OARGETParser(self.server)
+
+ def GetNodes(self,node_filter= None, return_fields=None):
+ #print>>sys.stderr, " \r\n GetNodes node_filter %s return_fields %s" %(node_filter,return_fields)
+ self.parser.SendRequest("GET_resources_full")
+ node_dict = self.parser.GetNodesFromOARParse()
+ return_node_list = []
+ #print>>sys.stderr, " \r\n GetNodes node_dict %s" %(node_dict)
+ if not (node_filter or return_fields):
+ return_node_list = node_dict.values()
+ return return_node_list
+
+ return_node_list= parse_filter(node_dict.values(),node_filter ,'node', return_fields)
+ return return_node_list
+
+
+ def GetSites(self, site_filter= None, return_fields=None):
+ print>>sys.stderr, " \r\n GetSites+++++++++++++++++"
+ self.parser.SendRequest("GET_resources_full")
+ site_dict = self.parser.GetSitesFromOARParse()
+ return_site_list = []
+ site = site_dict.values()[0]
+ Users = SenslabImportUsers()
+
+ #print>>sys.stderr, " \r\n GetSites sites_dict %s site_filter %s \r\n \r\n \r\n \r\n------site %s" %(site_dict,site_filter,site )
+ admins_dict ={'person_ids': Users.GetPIs(site['site_id'])}
+ site.update(admins_dict)
+
+ slice_list = Users.GetSlices()
+ for sl in slice_list:
+ #print>>sys.stderr, " \r\n GetSites sl %s" %(sl)
+ if sl['site_id'] == site['site_id']:
+ site['slice_ids'].append(sl['slice_id'])
+ #print>>sys.stderr, " \r\n GetSites -site['site_id'] %s --slice_list %s" %(site['site_id'],slice_list )
+
+
+ #print>>sys.stderr, " \r\n GetSites -site['site_id'] %s --admins_dict %s---site %s" %(site['site_id'],admins_dict,site )
+ if not (site_filter or return_fields):
+ return_site_list = site_dict.values()
+ return return_site_list
+
+ return_site_list = parse_filter(site_dict.values(),site_filter ,'site', return_fields)
+ return return_site_list
+
+
+ def GetJobs(self):
+ print>>sys.stderr, " \r\n GetJobs"
+ self.parser.SendRequest("GET_jobs")
+ return self.parser.GetJobsFromOARParse()
+
--- /dev/null
+
+#!/usr/bin/python
+
+# import modules used here -- sys is a very standard one
+import sys
+import httplib
+import json
+
+
+from sfa.util.xrn import *
+from sfa.util.plxrn import *
+#from sfa.rspecs.sfa_rspec import SfaRSpec
+from sfa.rspecs.rspec import RSpec
+#from sfa.rspecs.pg_rspec import PGRSpec
+#from sfa.rspecs.rspec_version import RSpecVersion
+from sfa.rspecs.version_manager import VersionManager
+from sfa.senslab.OARrestapi import *
+
+class OARrspec:
+
+
+ sites = {}
+ nodes = {}
+ api = None
+ interfaces = {}
+ links = {}
+ node_tags = {}
+
+ prepared=False
+ #panos new user options variable
+ user_options = {}
+
+ def __init__(self ,api, user_options={}):
+ self.OARImporter = OARapi()
+ self.user_options = user_options
+ self.api = api
+ print >>sys.stderr,"\r\n \r\n \t\t_____________INIT OARRSPEC__ api : %s" %(api)
+
+ def prepare_sites(self, force=False):
+ print >>sys.stderr,'\r\n \r\n ++++++++++++++\t\t prepare_sites'
+ if not self.sites or force:
+ for site in self.OARImporter.GetSites():
+ print >>sys.stderr,'prepare_sites : site ', site
+ self.sites[site['site_id']] = site
+
+
+ def prepare_nodes(self, force=False):
+ if not self.nodes or force:
+ for node in self.OARImporter.GetNodes():
+ self.nodes[node['node_id']] = node
+ print >>sys.stderr,'prepare_nodes:node', node
+
+ #def prepare_interfaces(self, force=False):
+ #if not self.interfaces or force:
+ #for interface in self.api.plshell.GetInterfaces(self.api.plauth):
+ #self.interfaces[interface['interface_id']] = interface
+
+ #def prepare_node_tags(self, force=False):
+ #if not self.node_tags or force:
+ #for node_tag in self.api.plshell.GetNodeTags(self.api.plauth):
+ #self.node_tags[node_tag['node_tag_id']] = node_tag
+
+ def prepare_links(self, force=False):
+ if not self.links or force:
+ pass
+
+ def prepare(self, force=False):
+ if not self.prepared or force:
+ self.prepare_sites(force)
+ self.prepare_nodes(force)
+ #self.prepare_links(force)
+ #self.prepare_interfaces(force)
+ #self.prepare_node_tags(force)
+ # add site/interface info to nodes
+ for node_id in self.nodes:
+ node = self.nodes[node_id]
+ site = self.sites[node['site_id']]
+ #interfaces = [self.interfaces[interface_id] for interface_id in node['interface_ids']]
+ #tags = [self.node_tags[tag_id] for tag_id in node['node_tag_ids']]
+ node['network'] = "senslab"
+ node['network_urn'] = hrn_to_urn(node['network'], 'authority+am')
+ node['urn'] = hostname_to_urn(node['network'], site['login_base'], node['hostname'])
+ node['site_urn'] = hrn_to_urn(PlXrn.site_hrn(node['network'], site['login_base']), 'authority+sa')
+ node['site'] = site
+ #node['interfaces'] = interfaces
+ #node['tags'] = tags
+ #print >>sys.stderr, "\r\n OAR prepare ", node
+
+ self.prepared = True
+
+#from plc/aggregate.py
+ def get_rspec(self, slice_xrn=None, version = None):
+ print>>sys.stderr, " \r\n OARrspec \t\t get_spec **************\r\n"
+ self.prepare()
+
+ rspec = None
+ version_manager = VersionManager()
+ version = version_manager.get_version(version)
+ #rspec_version = RSpecVersion(version)
+ #print >>sys.stderr, '\r\n \t\t rspec_version type',version_manager['type']
+
+ if not slice_xrn:
+ rspec_version = version_manager._get_version(version.type, version.version, 'ad')
+ else:
+ rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
+
+ rspec = RSpec(version=rspec_version, user_options=self.user_options)
+ rspec.version.add_nodes(self.nodes.values())
+ print >>sys.stderr, 'after add_nodes'
+
+
+ #rspec.add_links(self.links.values())
+
+ #if slice_xrn:
+ ## get slice details
+ #slice_hrn, _ = urn_to_hrn(slice_xrn)
+ #slice_name = hrn_to_pl_slicename(slice_hrn)
+ #slices = self.api.plshell.GetSlices(self.api.plauth, slice_name)
+ #if slices:
+ #slice = slices[0]
+ #slivers = []
+ #tags = self.api.plshell.GetSliceTags(self.api.plauth, slice['slice_tag_ids'])
+ #for node_id in slice['node_ids']:
+ #sliver = {}
+ #sliver['hostname'] = self.nodes[node_id]['hostname']
+ #sliver['tags'] = []
+ #slivers.append(sliver)
+ #for tag in tags:
+ ## if tag isn't bound to a node then it applies to all slivers
+ #if not tag['node_id']:
+ #sliver['tags'].append(tag)
+ #else:
+ #tag_host = self.nodes[tag['node_id']]['hostname']
+ #if tag_host == sliver['hostname']:
+ #sliver.tags.append(tag)
+ #rspec.add_slivers(slivers)
+ return rspec.toxml()
--- /dev/null
+
+#
+# The import tool assumes that the existing PLC hierarchy should all be part
+# of "planetlab.us" (see the root_auth and level1_auth variables below).
+#
+# Public keys are extracted from the users' SSH keys automatically and used to
+# create GIDs. This is relatively experimental as a custom tool had to be
+# written to perform conversion from SSH to OpenSSL format. It only supports
+# RSA keys at this time, not DSA keys.
+##
+
+import getopt
+import sys
+import tempfile
+from sfa.util.sfalogging import _SfaLogger
+#from sfa.util.sfalogging import sfa_logger_goes_to_import,sfa_logger
+
+from sfa.util.record import *
+from sfa.util.table import SfaTable
+from sfa.util.xrn import get_authority, hrn_to_urn
+from sfa.util.plxrn import email_to_hrn
+from sfa.util.config import Config
+from sfa.trust.certificate import convert_public_key, Keypair
+from sfa.trust.trustedroots import *
+from sfa.trust.hierarchy import *
+from sfa.trust.gid import create_uuid
+
+
+
+def _un_unicode(str):
+ if isinstance(str, unicode):
+ return str.encode("ascii", "ignore")
+ else:
+ return str
+
+def _cleanup_string(str):
+ # pgsql has a fit with strings that have high ascii in them, so filter it
+ # out when generating the hrns.
+ tmp = ""
+ for c in str:
+ if ord(c) < 128:
+ tmp = tmp + c
+ str = tmp
+
+ str = _un_unicode(str)
+ str = str.replace(" ", "_")
+ str = str.replace(".", "_")
+ str = str.replace("(", "_")
+ str = str.replace("'", "_")
+ str = str.replace(")", "_")
+ str = str.replace('"', "_")
+ return str
+
+class SenslabImport:
+
+ def __init__(self):
+ self.logger = _SfaLogger(logfile='/var/log/sfa_import.log', loggername='importlog')
+
+ #sfa_logger_goes_to_import()
+ #self.logger = sfa_logger()
+ self.AuthHierarchy = Hierarchy()
+ self.config = Config()
+ self.TrustedRoots = TrustedRoots(Config.get_trustedroots_dir(self.config))
+ print>>sys.stderr, "\r\n ========= \t\t SenslabImport TrustedRoots\r\n" , self.TrustedRoots
+ self.plc_auth = self.config.get_plc_auth()
+ print>>sys.stderr, "\r\n ========= \t\t SenslabImport self.plc_auth %s \r\n" %(self.plc_auth )
+ self.root_auth = self.config.SFA_REGISTRY_ROOT_AUTH
+
+ def create_sm_client_record(self):
+ """
+ Create a user record for the Slicemanager service.
+ """
+ hrn = self.config.SFA_INTERFACE_HRN + '.slicemanager'
+ urn = hrn_to_urn(hrn, 'user')
+ if not self.AuthHierarchy.auth_exists(urn):
+ self.logger.info("Import: creating Slice Manager user")
+ self.AuthHierarchy.create_auth(urn)
+
+ auth_info = self.AuthHierarchy.get_auth_info(hrn)
+ table = SfaTable()
+ sm_user_record = table.find({'type': 'user', 'hrn': hrn})
+ if not sm_user_record:
+ record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="user", pointer=-1)
+ record['authority'] = get_authority(record['hrn'])
+ table.insert(record)
+
+ def create_top_level_auth_records(self, hrn):
+ """
+ Create top level records (includes root and sub authorities (local/remote)
+ """
+ print>>sys.stderr, "\r\n =========SenslabImport create_top_level_auth_records\r\n"
+ urn = hrn_to_urn(hrn, 'authority')
+ # make sure parent exists
+ parent_hrn = get_authority(hrn)
+ if not parent_hrn:
+ parent_hrn = hrn
+ if not parent_hrn == hrn:
+ self.create_top_level_auth_records(parent_hrn)
+
+ # create the authority if it doesnt already exist
+ if not self.AuthHierarchy.auth_exists(urn):
+ self.logger.info("Import: creating top level authorities")
+ self.AuthHierarchy.create_auth(urn)
+
+ # create the db record if it doesnt already exist
+ auth_info = self.AuthHierarchy.get_auth_info(hrn)
+ table = SfaTable()
+ auth_record = table.find({'type': 'authority', 'hrn': hrn})
+
+ if not auth_record:
+ auth_record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="authority", pointer=-1)
+ auth_record['authority'] = get_authority(auth_record['hrn'])
+ self.logger.info("Import: inserting authority record for %s"%hrn)
+ table.insert(auth_record)
+ print>>sys.stderr, "\r\n ========= \t\t SenslabImport NO AUTH RECORD \r\n" ,auth_record['authority']
+
+
+ def create_interface_records(self):
+ """
+ Create a record for each SFA interface
+ """
+ # just create certs for all sfa interfaces even if they
+ # arent enabled
+ interface_hrn = self.config.SFA_INTERFACE_HRN
+ interfaces = ['authority+sa', 'authority+am', 'authority+sm']
+ table = SfaTable()
+ auth_info = self.AuthHierarchy.get_auth_info(interface_hrn)
+ pkey = auth_info.get_pkey_object()
+ for interface in interfaces:
+ interface_record = table.find({'type': interface, 'hrn': interface_hrn})
+ if not interface_record:
+ self.logger.info("Import: interface %s %s " % (interface_hrn, interface))
+ urn = hrn_to_urn(interface_hrn, interface)
+ gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+ record = SfaRecord(hrn=interface_hrn, gid=gid, type=interface, pointer=-1)
+ record['authority'] = get_authority(interface_hrn)
+ print>>sys.stderr,"\r\n ==========create_interface_records", record['authority']
+ table.insert(record)
+
+ def import_person(self, parent_hrn, person, keys):
+ """
+ Register a user record
+ """
+ hrn = email_to_hrn(parent_hrn, person['email'])
+
+ print >>sys.stderr , "\r\n_____00______SenslabImport : person", person
+ # ASN.1 will have problems with hrn's longer than 64 characters
+ if len(hrn) > 64:
+ hrn = hrn[:64]
+ print >>sys.stderr , "\r\n_____0______SenslabImport : parent_hrn", parent_hrn
+ self.logger.info("Import: person %s"%hrn)
+ key_ids = []
+ # choper les cles ssh des users , sont ils dans OAR
+ if 'key_ids' in person and person['key_ids']:
+ key_ids = person["key_ids"]
+ # get the user's private key from the SSH keys they have uploaded
+ # to planetlab
+
+ print >>sys.stderr , "\r\n_____1______SenslabImport : self.plc_auth %s \r\n \t keys %s key[0] %s" %(self.plc_auth,keys, keys[0])
+ key = keys[0]['key']
+ pkey = convert_public_key(key)
+ print >>sys.stderr , "\r\n_____2______SenslabImport : key %s pkey %s"% (key,pkey.as_pem())
+ if not pkey:
+ pkey = Keypair(create=True)
+ else:
+ # the user has no keys
+ self.logger.warning("Import: person %s does not have a PL public key"%hrn)
+ # if a key is unavailable, then we still need to put something in the
+ # user's GID. So make one up.
+ pkey = Keypair(create=True)
+ print >>sys.stderr , "\r\n___ELSE________SenslabImport pkey : %s"%(pkey.key)
+ # create the gid
+ urn = hrn_to_urn(hrn, 'user')
+ print >>sys.stderr , "\r\n \t\t : urn ", urn
+ person_gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+ table = SfaTable()
+ person_record = SfaRecord(hrn=hrn, gid=person_gid, type="user", pointer=person['person_id'])
+ person_record['authority'] = get_authority(person_record['hrn'])
+ existing_records = table.find({'hrn': hrn, 'type': 'user', 'pointer': person['person_id']})
+ if not existing_records:
+ table.insert(person_record)
+ else:
+ self.logger.info("Import: %s exists, updating " % hrn)
+ existing_record = existing_records[0]
+ person_record['record_id'] = existing_record['record_id']
+ table.update(person_record)
+
+ def import_slice(self, parent_hrn, slice):
+ #slicename = slice['name'].split("_",1)[-1]
+
+ slicename = _cleanup_string(slice['name'])
+
+ if not slicename:
+ self.logger.error("Import: failed to parse slice name %s" %slice['name'])
+ return
+
+ hrn = parent_hrn + "." + slicename
+ self.logger.info("Import: slice %s"%hrn)
+
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(hrn, 'slice')
+ slice_gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+ slice_record = SfaRecord(hrn=hrn, gid=slice_gid, type="slice", pointer=slice['slice_id'])
+ slice_record['authority'] = get_authority(slice_record['hrn'])
+ table = SfaTable()
+ existing_records = table.find({'hrn': hrn, 'type': 'slice', 'pointer': slice['slice_id']})
+ if not existing_records:
+ table.insert(slice_record)
+ else:
+ self.logger.info("Import: %s exists, updating " % hrn)
+ existing_record = existing_records[0]
+ slice_record['record_id'] = existing_record['record_id']
+ table.update(slice_record)
+
+ def import_node(self, hrn, node):
+ self.logger.info("Import: node %s" % hrn)
+ # ASN.1 will have problems with hrn's longer than 64 characters
+ if len(hrn) > 64:
+ hrn = hrn[:64]
+
+ table = SfaTable()
+ node_record = table.find({'type': 'node', 'hrn': hrn})
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(hrn, 'node')
+ node_gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+ node_record = SfaRecord(hrn=hrn, gid=node_gid, type="node", pointer=node['node_id'])
+ node_record['authority'] = get_authority(node_record['hrn'])
+ existing_records = table.find({'hrn': hrn, 'type': 'node', 'pointer': node['node_id']})
+ if not existing_records:
+ table.insert(node_record)
+ else:
+ self.logger.info("Import: %s exists, updating " % hrn)
+ existing_record = existing_records[0]
+ node_record['record_id'] = existing_record['record_id']
+ table.update(node_record)
+
+
+ def import_site(self, parent_hrn, site):
+ plc_auth = self.plc_auth
+ sitename = site['login_base']
+ sitename = _cleanup_string(sitename)
+ hrn = parent_hrn + "." + sitename
+
+ urn = hrn_to_urn(hrn, 'authority')
+ self.logger.info("Import: site %s"%hrn)
+
+ # create the authority
+ if not self.AuthHierarchy.auth_exists(urn):
+ self.AuthHierarchy.create_auth(urn)
+
+ auth_info = self.AuthHierarchy.get_auth_info(urn)
+
+ table = SfaTable()
+ auth_record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="authority", pointer=site['site_id'])
+ auth_record['authority'] = get_authority(auth_record['hrn'])
+ existing_records = table.find({'hrn': hrn, 'type': 'authority', 'pointer': site['site_id']})
+ if not existing_records:
+ table.insert(auth_record)
+ else:
+ self.logger.info("Import: %s exists, updating " % hrn)
+ existing_record = existing_records[0]
+ auth_record['record_id'] = existing_record['record_id']
+ table.update(auth_record)
+
+ return hrn
+
+
+ def delete_record(self, hrn, type):
+ # delete the record
+ table = SfaTable()
+ record_list = table.find({'type': type, 'hrn': hrn})
+ for record in record_list:
+ self.logger.info("Import: removing record %s %s" % (type, hrn))
+ table.remove(record)
--- /dev/null
+#!/usr/bin/python
+
+# import modules used here -- sys is a very standard one
+import sys
+import httplib
+import json
+import datetime
+import time
+from sfa.senslab.parsing import *
+
+
+
+
+class SenslabImportUsers:
+
+
+ def __init__(self):
+ self.person_list = []
+ self.keys_list = []
+ self.slices_list= []
+ #self.resources_fulldict['keys'] = []
+ self.InitPersons()
+ self.InitKeys()
+ self.InitSlices()
+
+
+ def InitSlices(self):
+ slices_per_site = {}
+ dflt_slice = { 'instantiation': None, 'description': "Senslab Slice Test", 'node_ids': [], 'url': "http://localhost.localdomain/", 'max_nodes': 256, 'site_id': 3,'peer_slice_id': None, 'slice_tag_ids': [], 'peer_id': None, 'hrn' :None}
+ for person in self.person_list:
+ if 'user' or 'pi' in person['roles']:
+ def_slice = {}
+ #print>>sys.stderr, "\r\n \rn \t\t _____-----------************def_slice person %s \r\n \rn " %(person['person_id'])
+ def_slice['person_ids'] = []
+ def_slice['person_ids'].append(person['person_id'])
+ def_slice['slice_id'] = person['person_id']
+ def_slice['creator_person_id'] = person['person_id']
+ extime = datetime.datetime.utcnow()
+ def_slice['created'] = int(time.mktime(extime.timetuple()))
+ extime = extime + datetime.timedelta(days=365)
+ def_slice['expires'] = int(time.mktime(extime.timetuple()))
+ #print>>sys.stderr, "\r\n \rn \t\t _____-----------************def_slice expires %s \r\n \r\n "%(def_slice['expires'])
+ def_slice['name'] = person['email'].replace('@','_',1)
+ #print>>sys.stderr, "\r\n \rn \t\t _____-----------************def_slice %s \r\n \r\n " %(def_slice['name'])
+ def_slice.update(dflt_slice)
+ self.slices_list.append(def_slice)
+
+ print>>sys.stderr, "InitSlices SliceLIST", self.slices_list
+
+ def InitPersons(self):
+ persons_per_site = {}
+ person_id = 7
+ persons_per_site[person_id] = {'person_id': person_id,'site_ids': [3],'email': 'a_rioot@senslab.fr', 'key_ids':[1], 'roles': ['pi'], 'role_ids':[20],'first_name':'A','last_name':'rioot'}
+ person_id = 8
+ persons_per_site[person_id] = {'person_id': person_id,'site_ids': [3],'email': 'lost@senslab.fr','key_ids':[1],'roles': ['pi'], 'role_ids':[20],'first_name':'L','last_name':'lost'}
+ person_id = 9
+ persons_per_site[person_id] = {'person_id': person_id,'site_ids': [3],'email': 'user@senslab.fr','key_ids':[1],'roles': ['user'], 'role_ids':[1],'first_name':'U','last_name':'senslab'}
+ for person_id in persons_per_site.keys():
+ person = persons_per_site[person_id]
+ if person['person_id'] not in self.person_list:
+ self.person_list.append(person)
+ #print>>sys.stderr, "InitPersons PERSON DICLIST", self.person_list
+
+
+ def InitKeys(self):
+ #print>>sys.stderr, " InitKeys HEYYYYYYY\r\n"
+
+ self.keys_list = [{'peer_key_id': None, 'key_type': 'ssh', 'key' :"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEArcdW0X2la754SoFE+URbDsYP07AZJjrspMlvUc6u+4o6JpGRkqiv7XdkgOMIn6w3DF3cYCcA1Mc6XSG7gSD7eQx614cjlLmXzHpxSeidSs/LgZaAQpq9aQ0KhEiFxg0gp8TPeB5Z37YOPUumvcJr1ArwL/8tAOx3ClwgRhccr2HOe10YtZbMEboCarTlzNHiGolo7RYIJjGuG2RBSeAg6SMZrtnn0OdKBwp3iUlOfkS98eirVtWUp+G5+SZggip3fS3k5Oj7OPr1qauva8Rizt02Shz30DN9ikFNqV2KuPg54nC27/DQsQ6gtycARRVY91VvchmOk0HxFiW/9kS2GQ== root@FlabFedora2",'person_id': 7, 'key_id':1, 'peer_id':None },
+ {'peer_key_id': None, 'key_type': 'ssh', 'key' :"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEArcdW0X2la754SoFE+URbDsYP07AZJjrspMlvUc6u+4o6JpGRkqiv7XdkgOMIn6w3DF3cYCcA1Mc6XSG7gSD7eQx614cjlLmXzHpxSeidSs/LgZaAQpq9aQ0KhEiFxg0gp8TPeB5Z37YOPUumvcJr1ArwL/8tAOx3ClwgRhccr2HOe10YtZbMEboCarTlzNHiGolo7RYIJjGuG2RBSeAg6SMZrtnn0OdKBwp3iUlOfkS98eirVtWUp+G5+SZggip3fS3k5Oj7OPr1qauva8Rizt02Shz30DN9ikFNqV2KuPg54nC27/DQsQ6gtycARRVY91VvchmOk0HxFiW/9kS2GQ== root@FlabFedora2",'person_id': 8, 'key_id':1, 'peer_id':None },
+ {'peer_key_id': None, 'key_type': 'ssh', 'key' :"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEArcdW0X2la754SoFE+URbDsYP07AZJjrspMlvUc6u+4o6JpGRkqiv7XdkgOMIn6w3DF3cYCcA1Mc6XSG7gSD7eQx614cjlLmXzHpxSeidSs/LgZaAQpq9aQ0KhEiFxg0gp8TPeB5Z37YOPUumvcJr1ArwL/8tAOx3ClwgRhccr2HOe10YtZbMEboCarTlzNHiGolo7RYIJjGuG2RBSeAg6SMZrtnn0OdKBwp3iUlOfkS98eirVtWUp+G5+SZggip3fS3k5Oj7OPr1qauva8Rizt02Shz30DN9ikFNqV2KuPg54nC27/DQsQ6gtycARRVY91VvchmOk0HxFiW/9kS2GQ== root@FlabFedora2",'person_id': 9, 'key_id':1, 'peer_id':None }]
+
+
+
+
+ def GetPersons(self, person_filter=None, return_fields=None):
+ #print>>sys.stderr, " \r\n GetPersons person_filter %s return_fields %s list: %s" %(person_filter,return_fields, self.person_list)
+ if not self.person_list :
+ print>>sys.stderr, " \r\n ========>GetPersons NO PERSON LIST DAMMIT<========== \r\n"
+
+ if not (person_filter or return_fields):
+ return self.person_list
+
+ return_person_list= []
+ return_person_list = parse_filter(self.person_list,person_filter ,'persons', return_fields)
+ return return_person_list
+
+
+ def GetPIs(self,site_id):
+ return_person_list= []
+ for person in self.person_list :
+ if site_id in person['site_ids'] and 'pi' in person['roles'] :
+ return_person_list.append(person['person_id'])
+ #print>>sys.stderr, " \r\n GetPIs return_person_list %s :" %(return_person_list)
+ return return_person_list
+
+
+ def GetKeys(self,key_filter=None, return_fields=None):
+ return_key_list= []
+ print>>sys.stderr, " \r\n GetKeys"
+
+ if not (key_filter or return_fields):
+ return self.keys_list
+ return_key_list = parse_filter(self.keys_list,key_filter ,'keys', return_fields)
+ return return_key_list
+
+ #return_key_list= []
+ #print>>sys.stderr, " \r\n GetKeys"
+
+ #if not (key_filter or return_fields):
+ #return self.keys_list
+
+ #elif key_filter or return_fields:
+ #for key in self.keys_list:
+ #tmp_key = {}
+ #if key_filter:
+ #for k_filter in key_filter:
+ #if key['key_id'] == k_filter :
+ #if return_fields:
+ #for field in return_fields:
+ #if field in key.keys():
+ #tmp_key[field] = key[field]
+ #else:
+ #tmp_key = key
+
+ #print>>sys.stderr, " \r\n tmp_key",tmp_key
+ #return_key_list.append(tmp_key)
+ #print>>sys.stderr," \r\n End GetKeys with filter ", return_key_list
+ #return return_key_list
+
+ def GetSlices( self,slice_filter=None, return_fields=None):
+ return_slice_list= []
+ print>>sys.stderr, "\r\n\r\n\t =======================GetSlices "
+ if not (slice_filter or return_fields):
+ return self.slices_list
+ return_slice_list= parse_filter(self.slices_list, slice_filter,'slice', return_fields)
+ return return_slice_list
+
+
+ def AddSlice(self, slice_fields):
+ print>>sys.stderr, " \r\n \r\nAddSlice "
+
+
+ def AddPersonToSlice(self,person_id_or_email, slice_id_or_name):
+ print>>sys.stderr, " \r\n \r\n AddPersonToSlice"
+
+ def DeletePersonFromSlice(self,person_id_or_email, slice_id_or_name):
+ print>>sys.stderr, " \r\n \r\n DeletePersonFromSlice "
--- /dev/null
+
+# import modules used here -- sys is a very standard one
+import sys
+import httplib
+import json
+
+
+def strip_dictionnary (dict_to_strip):
+ stripped_filter = []
+ stripped_filterdict = {}
+ for f in dict_to_strip :
+ stripped_filter.append(str(f).strip('|'))
+
+ stripped_filterdict = dict(zip(stripped_filter, dict_to_strip.values()))
+
+ return stripped_filterdict
+
+
+def filter_return_fields( dict_to_filter, return_fields):
+ filtered_dict = {}
+ #print>>sys.stderr, " \r\n \t \tfilter_return_fields return fields %s " %(return_fields)
+ for field in return_fields:
+ #print>>sys.stderr, " \r\n \t \tfield %s " %(field)
+ if field in dict_to_filter:
+ filtered_dict[field] = dict_to_filter[field]
+ #print>>sys.stderr, " \r\n \t\t filter_return_fields filtered_dict %s " %(filtered_dict)
+ return filtered_dict
+
+
+
+def parse_filter(list_to_filter, param_filter, type_of_list, return_fields=None) :
+ list_type = { 'persons': {'str': 'email','int':'person_id'},\
+ 'keys':{'int':'key_id'},\
+ 'site':{'str':'login_base','int':'site_id'},\
+ 'node':{'str':'hostname','int':'node_id'},\
+ 'slice':{'str':'name','int':'slice_id'}}
+
+ if type_of_list not in list_type:
+ print>>sys.stderr, " \r\n type_of_list Error parse_filter %s " %(type_of_list)
+ return []
+
+ #print>>sys.stderr, " \r\n ____FIRST ENTRY parse_filter param_filter %s type %s list %s " %(param_filter, type(param_filter), list_to_filter)
+ return_filtered_list= []
+
+ for item in list_to_filter:
+ tmp_item = {}
+
+ if type(param_filter) is list :
+ #print>>sys.stderr, " \r\n p_filter LIST %s " %(param_filter)
+
+ for p_filter in param_filter:
+ #print>>sys.stderr, " \r\n p_filter %s \t item %s " %(p_filter,item)
+ if type(p_filter) is int:
+ if item[list_type[type_of_list]['int']] == p_filter :
+ if return_fields:
+ tmp_item = filter_return_fields(item,return_fields)
+ else:
+ tmp_item = item
+ return_filtered_list.append(tmp_item)
+ #print>>sys.stderr, " \r\n 1tmp_item",tmp_item
+
+ if type(p_filter) is str:
+ if item[list_type[type_of_list]['str']] == p_filter :
+ if return_fields:
+ tmp_item = filter_return_fields(item,return_fields)
+ else:
+ tmp_item = item
+ return_filtered_list.append(tmp_item)
+ #print>>sys.stderr, " \r\n 2tmp_item",tmp_item
+
+
+ elif type(param_filter) is dict:
+ stripped_filterdict = strip_dictionnary(param_filter)
+
+ tmp_copy = {}
+ tmp_copy = item.copy()
+ #print>>sys.stderr, " \r\n \t\t ________tmp_copy %s " %(tmp_copy)
+ key_list = tmp_copy.keys()
+ for key in key_list:
+ print>>sys.stderr, " \r\n \t\t key %s " %(key)
+ if key not in stripped_filterdict.keys():
+ del tmp_copy[key]
+
+
+ #print>>sys.stderr, " \r\n tmp_copy %s param_filter %s cmp = %s " %(tmp_copy, param_filter,cmp(tmp_copy, stripped_filterdict))
+
+ if cmp(tmp_copy, stripped_filterdict) == 0:
+ if return_fields:
+ tmp_item = filter_return_fields(item,return_fields)
+ else:
+
+ tmp_item = item
+ return_filtered_list.append(tmp_item)
+
+ return return_filtered_list
\ No newline at end of file
--- /dev/null
+#!/usr/bin/python
+#
+##
+# Import PLC records into the SFA database. It is indended that this tool be
+# run once to create SFA records that reflect the current state of the
+# planetlab database.
+#
+# The import tool assumes that the existing PLC hierarchy should all be part
+# of "planetlab.us" (see the root_auth and level1_auth variables below).
+#
+# Public keys are extracted from the users' SSH keys automatically and used to
+# create GIDs. This is relatively experimental as a custom tool had to be
+# written to perform conversion from SSH to OpenSSL format. It only supports
+# RSA keys at this time, not DSA keys.
+##
+
+import getopt
+import sys
+import tempfile
+
+
+from sfa.util.record import *
+from sfa.util.table import SfaTable
+from sfa.util.xrn import get_leaf, get_authority
+from sfa.util.plxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_pl_slicename
+from sfa.util.config import Config
+from sfa.trust.certificate import convert_public_key, Keypair
+from sfa.trust.trustedroots import *
+from sfa.trust.hierarchy import *
+from sfa.util.xrn import Xrn
+from sfa.trust.gid import create_uuid
+
+
+from sfa.senslab.SenslabImportUsers import *
+from sfa.senslab.OARrestapi import *
+
+from sfa.senslab.SenslabImport import SenslabImport
+
+
+
+
+
+oarserver = {}
+oarserver['ip'] = '10.127.255.254'
+oarserver['port'] = 80
+oarserver['uri'] = '/oarapi/resources/full.json'
+
+
+def process_options():
+
+ (options, args) = getopt.getopt(sys.argv[1:], '', [])
+ for opt in options:
+ name = opt[0]
+ val = opt[1]
+
+
+def load_keys(filename):
+ keys = {}
+ tmp_dict = {}
+ try:
+ execfile(filename, tmp_dict)
+ if 'keys' in tmp_dict:
+ keys = tmp_dict['keys']
+ return keys
+ except:
+ return keys
+
+def save_keys(filename, keys):
+ f = open(filename, 'w')
+ f.write("keys = %s" % str(keys))
+ f.close()
+
+def main():
+
+ process_options()
+ config = Config()
+ if not config.SFA_REGISTRY_ENABLED:
+ sys.exit(0)
+ root_auth = config.SFA_REGISTRY_ROOT_AUTH
+ interface_hrn = config.SFA_INTERFACE_HRN
+ print interface_hrn, root_auth
+ keys_filename = config.config_path + os.sep + 'person_keys.py'
+
+ SenslabImporter = SenslabImport()
+ SenslabUsers = SenslabImportUsers()
+
+ OARImporter = OARapi()
+ #print '\r\n =====OAR Importer list===== '
+ #for node in OARImporter.OARserver.GetNodes().keys():
+ #print node, OARImporter.OARserver.GetNodes[node]
+
+
+ #if config.SFA_API_DEBUG: SenslabImporter.logger.setLevelDebug()
+ #shell = sfaImporter.shell
+ #plc_auth = sfaImporter.plc_auth
+ #print plc_auth
+
+ # initialize registry db table
+ table = SfaTable()
+ if not table.exists():
+ table.create()
+
+ # create root authority
+ SenslabImporter.create_top_level_auth_records(root_auth)
+ if not root_auth == interface_hrn:
+ SenslabImporter.create_top_level_auth_records(interface_hrn)
+
+ # create s user record for the slice manager
+ SenslabImporter.create_sm_client_record()
+
+ # create interface records ADDED 12 JUILLET 2011
+ SenslabImporter.logger.info("Import: creating interface records")
+ SenslabImporter.create_interface_records()
+
+ # add local root authority's cert to trusted list ADDED 12 JUILLET 2011
+ SenslabImporter.logger.info("Import: adding " + interface_hrn + " to trusted list")
+ authority = SenslabImporter.AuthHierarchy.get_auth_info(interface_hrn)
+ SenslabImporter.TrustedRoots.add_gid(authority.get_gid_object())
+
+
+ print "\r\n \r\n create dict of all existing sfa records"
+ # create dict of all existing sfa records
+ existing_records = {}
+ existing_hrns = []
+ key_ids = []
+ person_keys = {}
+ results = table.find()
+ for result in results:
+ existing_records[(result['hrn'], result['type'])] = result
+ existing_hrns.append(result['hrn'])
+
+
+
+
+ #Get Senslab nodes
+ nodes_dict = OARImporter.GetNodes()
+ print "\r\n NODES8DICT ",nodes_dict
+
+ persons_list = SenslabUsers.GetPersons()
+ print "\r\n PERSONS_LIST ",persons_list
+
+ keys_list = SenslabUsers.GetKeys()
+ print "\r\n KEYSS_LIST ",keys_list
+
+ slices_list = SenslabUsers.GetSlices()
+ print "\r\n SLICES_LIST ",slices_list
+
+ # Get all Senslab sites
+ sites_dict = OARImporter.GetSites()
+ print "\r\n sSITES_DICT" , sites_dict
+
+ # start importing
+ for site in sites_dict:
+ site_hrn = interface_hrn + "." + site['login_base']
+ #sfa_logger().info("Importing site: %s" % site_hrn)
+ print "HRN %s %s site existing in hrn ? %s" %( site['login_base'],site_hrn, site_hrn in existing_hrns)
+ # import if hrn is not in list of existing hrns or if the hrn exists
+ # but its not a site record
+ if site_hrn not in existing_hrns or \
+ (site_hrn, 'authority') not in existing_records:
+ print "SITE HRN UNKNOWN" , site, site_hrn
+ site_hrn = SenslabImporter.import_site(interface_hrn, site)
+
+ print "\r\n \r\n ===========IMPORT NODE_RECORDS ==========\r\n site %s \r\n \t nodes_dict %s" %(site,nodes_dict)
+ # import node records
+ for node_id in site['node_ids']:
+ #for[node['node_id'] for node in nodes_dict]:
+ #print '\r\n \t **NODE_ID %s node %s '%( node_id, node)
+ #continue
+ for node in nodes_dict:
+ if node_id is node['node_id']:
+ #node = nodes_dict[node_id]
+ print '\r\n \t NODE_ID %s node %s '%( node_id, node)
+ hrn = hostname_to_hrn(interface_hrn, site['login_base'], node['hostname'])
+ break
+
+ if hrn not in existing_hrns or \
+ (hrn, 'node') not in existing_records:
+ print "\t\t NODE HRN NOT in existing records!" ,hrn
+ SenslabImporter.import_node(hrn, node)
+
+ # import persons
+ for person in persons_list:
+ hrn = email_to_hrn(site_hrn, person['email'])
+ print >>sys.stderr, "\r\n\r\n^^^^^^^^^^^^^PERSON hrn %s person %s site hrn %s" %(hrn,person,site_hrn)
+ SenslabImporter.import_person( site_hrn, person,keys_list)
+
+# import slices
+ for slice_id in site['slice_ids']:
+ print >>sys.stderr, "\r\n\r\n \t ^^^^^^^\\\\\\\\\\\\\\\^^^^^^ slice_id %s " %(slice_id)
+ for sl in slices_list:
+ if slice_id is sl['slice_id']:
+ #hrn = slicename_to_hrn(interface_hrn, sl['name'])
+ hrn = email_to_hrn(site_hrn, sl['name'])
+ print >>sys.stderr, "\r\n\r\n^^^^^^^^^^^^^SLICE ID hrn %s site_hrn %s" %(hrn,site_hrn)
+ if hrn not in existing_hrns or \
+ (hrn, 'slice') not in existing_records:
+ SenslabImporter.import_slice(site_hrn, sl)
+
+
+ # remove stale records
+ system_records = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
+ for (record_hrn, type) in existing_records.keys():
+ if record_hrn in system_records:
+ continue
+
+ record = existing_records[(record_hrn, type)]
+ if record['peer_authority']:
+ continue
+ ## remove stale records
+ #for (record_hrn, type) in existing_records.keys():
+ #record = existing_records[(record_hrn, type)]
+ #print" \r\n ****record hrn %s \t\t TYPE %s " %(record_hrn,type)
+ ## if this is the interface name dont do anything
+ #if record_hrn == interface_hrn or \
+ #record_hrn == root_auth or \
+ #record['peer_authority']:
+ #continue
+
+
+ found = False
+
+ if type == 'authority':
+ for site in sites_dict:
+ print "\t type : authority : ", site
+ site_hrn = interface_hrn + "." + site['login_base']
+ if site_hrn == record_hrn and site['site_id'] == record['pointer']:
+ found = True
+ print "\t \t Found :", found
+ break
+
+ elif type == 'node':
+ login_base = get_leaf(get_authority(record_hrn))
+
+ nodename = Xrn.unescape(get_leaf(record_hrn))
+ print "type: node : login_base %s nodename %s" %(login_base, nodename)
+ if login_base in sites_dict:
+ site = sites_dict[login_base]
+ print "\t type node : login base %s site %s" %(login_base, site)
+ for node in nodes_dict.values():
+ tmp_nodename = node['hostname']
+ if tmp_nodename == nodename and \
+ node['site_id'] == site['site_id'] and \
+ node['node_id'] == record['pointer']:
+ found = True
+ print "\t Nodename: %s site id %s node id %s record %s" %( nodename, node['site_id'], node['node_id'],record['pointer'])
+ break
+ else:
+ continue
+
+ if not found:
+ record_object = existing_records[(record_hrn, type)]
+ print "\t\t NOT FOUND ! "
+ SenslabImporter.delete_record(record_hrn, type)
+
+ # save pub keys
+ SenslabImporter.logger.info('Import: saving current pub keys')
+ save_keys(keys_filename, person_keys)
+
+
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+#!/bin/bash
+#
+# sfa starts sfa service
+#
+# chkconfig: 2345 61 39
+#
+# description: starts sfa service
+#
+
+# Source config
+[ -f /etc/sfa/sfa_config ] && . /etc/sfa/sfa_config
+
+# source function library
+. /etc/init.d/functions
+
+start() {
+
+ if [ "$SFA_REGISTRY_ENABLED" -eq 1 ]; then
+ action $"SFA Registry" daemon /usr/bin/sfa-server.py -r -d $OPTIONS
+ fi
+
+ if [ "$SFA_AGGREGATE_ENABLED" -eq 1 ]; then
+ action $"SFA Aggregate" daemon /usr/bin/sfa-server.py -a -d $OPTIONS
+ fi
+
+ if [ "$SFA_SM_ENABLED" -eq 1 ]; then
+ action "SFA SliceMgr" daemon /usr/bin/sfa-server.py -s -d $OPTIONS
+ fi
+
+ if [ "$SFA_FLASHPOLICY_ENABLED" -eq 1 ]; then
+ action "Flash Policy Server" daemon /usr/bin/sfa_flashpolicy.py --file="$SFA_FLASHPOLICY_CONFIG_FILE" --port=$SFA_FLASHPOLICY_PORT -d
+ fi
+
+ RETVAL=$?
+ [ $RETVAL -eq 0 ] && touch /var/lock/subsys/sfa-server.py
+
+}
+
+stop() {
+ action $"Shutting down SFA" killproc sfa-server.py
+ RETVAL=$?
+
+ [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/sfa-server.py
+}
+
+
+case "$1" in
+ start) start ;;
+ stop) stop ;;
+ reload) reload force ;;
+ restart) stop; start ;;
+ condrestart)
+ if [ -f /var/lock/subsys/sfa-server.py ]; then
+ stop
+ start
+ fi
+ ;;
+ status)
+ status sfa-server.py
+ RETVAL=$?
+ ;;
+ *)
+ echo $"Usage: $0 {start|stop|reload|restart|condrestart|status}"
+ exit 1
+ ;;
+esac
+
+exit $RETVAL
+
--- /dev/null
+#
+# The import tool assumes that the existing PLC hierarchy should all be part
+# of "planetlab.us" (see the root_auth and level1_auth variables below).
+#
+# Public keys are extracted from the users' SSH keys automatically and used to
+# create GIDs. This is relatively experimental as a custom tool had to be
+# written to perform conversion from SSH to OpenSSL format. It only supports
+# RSA keys at this time, not DSA keys.
+##
+
+import getopt
+import sys
+import tempfile
+
+from sfa.util.sfalogging import sfa_logger_goes_to_import,sfa_logger
+
+from sfa.util.record import *
+from sfa.util.table import SfaTable
+from sfa.util.xrn import get_authority, hrn_to_urn
+from sfa.util.plxrn import email_to_hrn
+from sfa.util.config import Config
+from sfa.trust.certificate import convert_public_key, Keypair
+from sfa.trust.trustedroot import *
+from sfa.trust.hierarchy import *
+from sfa.trust.gid import create_uuid
+
+
+def _un_unicode(str):
+ if isinstance(str, unicode):
+ return str.encode("ascii", "ignore")
+ else:
+ return str
+
+def _cleanup_string(str):
+ # pgsql has a fit with strings that have high ascii in them, so filter it
+ # out when generating the hrns.
+ tmp = ""
+ for c in str:
+ if ord(c) < 128:
+ tmp = tmp + c
+ str = tmp
+
+ str = _un_unicode(str)
+ str = str.replace(" ", "_")
+ str = str.replace(".", "_")
+ str = str.replace("(", "_")
+ str = str.replace("'", "_")
+ str = str.replace(")", "_")
+ str = str.replace('"', "_")
+ return str
+
+class sfaImport:
+
+ def __init__(self):
+ sfa_logger_goes_to_import()
+ self.logger = sfa_logger()
+ self.AuthHierarchy = Hierarchy()
+ self.config = Config()
+ self.TrustedRoots = TrustedRootList(Config.get_trustedroots_dir(self.config))
+
+ self.plc_auth = self.config.get_plc_auth()
+ self.root_auth = self.config.SFA_REGISTRY_ROOT_AUTH
+ print>>sys.stderr, "\r\n ========= \t\t sfaImport plc_auth %s root_auth %s \r\n" %( self.plc_auth, self.root_auth )
+ # connect to planetlab
+ self.shell = None
+ if "Url" in self.plc_auth:
+ from sfa.plc.remoteshell import RemoteShell
+ self.shell = RemoteShell(self.logger)
+ else:
+ import PLC.Shell
+ self.shell = PLC.Shell.Shell(globals = globals())
+
+ def create_top_level_auth_records(self, hrn):
+ """
+ Create top level records (includes root and sub authorities (local/remote)
+ """
+
+ urn = hrn_to_urn(hrn, 'authority')
+ # make sure parent exists
+ parent_hrn = get_authority(hrn)
+ if not parent_hrn:
+ parent_hrn = hrn
+ if not parent_hrn == hrn:
+ self.create_top_level_auth_records(parent_hrn)
+ print>>sys.stderr, "\r\n =========create_top_level_auth_records parent_hrn \r\n", parent_hrn
+
+ # create the authority if it doesnt already exist
+ if not self.AuthHierarchy.auth_exists(urn):
+ self.logger.info("Import: creating top level authorities")
+ self.AuthHierarchy.create_auth(urn)
+
+ # create the db record if it doesnt already exist
+ auth_info = self.AuthHierarchy.get_auth_info(hrn)
+ table = SfaTable()
+ auth_record = table.find({'type': 'authority', 'hrn': hrn})
+
+ if not auth_record:
+ auth_record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="authority", pointer=-1)
+ auth_record['authority'] = get_authority(auth_record['hrn'])
+ self.logger.info("Import: inserting authority record for %s"%hrn)
+ table.insert(auth_record)
+ print>>sys.stderr, "\r\n ========= \t\t NO AUTH RECORD \r\n" ,auth_record['authority']
+
+
+ def create_interface_records(self):
+ """
+ Create a record for each SFA interface
+ """
+ # just create certs for all sfa interfaces even if they
+ # arent enabled
+ interface_hrn = self.config.SFA_INTERFACE_HRN
+ interfaces = ['authority+sa', 'authority+am', 'authority+sm']
+ table = SfaTable()
+ auth_info = self.AuthHierarchy.get_auth_info(interface_hrn)
+ pkey = auth_info.get_pkey_object()
+ for interface in interfaces:
+ interface_record = table.find({'type': interface, 'hrn': interface_hrn})
+ if not interface_record:
+ self.logger.info("Import: interface %s %s " % (interface_hrn, interface))
+ urn = hrn_to_urn(interface_hrn, interface)
+ gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+ record = SfaRecord(hrn=interface_hrn, gid=gid, type=interface, pointer=-1)
+ record['authority'] = get_authority(interface_hrn)
+ print>>sys.stderr,"\r\n ==========create_interface_records", record['authority']
+ table.insert(record)
+
+ def import_person(self, parent_hrn, person):
+ """
+ Register a user record
+ """
+ hrn = email_to_hrn(parent_hrn, person['email'])
+
+ print >>sys.stderr , "\r\n_____00______SfaImport : person", person
+ # ASN.1 will have problems with hrn's longer than 64 characters
+ if len(hrn) > 64:
+ hrn = hrn[:64]
+ print >>sys.stderr , "\r\n_____0______SfaImport : parent_hrn", parent_hrn
+ self.logger.info("Import: person %s"%hrn)
+ key_ids = []
+ if 'key_ids' in person and person['key_ids']:
+ key_ids = person["key_ids"]
+ # get the user's private key from the SSH keys they have uploaded
+ # to planetlab
+ keys = self.shell.GetKeys(self.plc_auth, key_ids)
+ print >>sys.stderr , "\r\n_____1______SfaImport : self.plc_auth %s \r\n \t keys %s " %(self.plc_auth,keys)
+ key = keys[0]['key']
+ pkey = convert_public_key(key)
+ print >>sys.stderr , "\r\n_____2______SfaImport : key %s pkey %s"% (key,pkey.as_pem())
+ if not pkey:
+ pkey = Keypair(create=True)
+ else:
+ # the user has no keys
+ self.logger.warning("Import: person %s does not have a PL public key"%hrn)
+ # if a key is unavailable, then we still need to put something in the
+ # user's GID. So make one up.
+ pkey = Keypair(create=True)
+ print >>sys.stderr , "\r\n___ELSE________SfaImport pkey : %s \r\n \t pkey.key.bits%s "%(dir(pkey.key), pkey.as_pem())
+ # create the gid
+ urn = hrn_to_urn(hrn, 'user')
+ print >>sys.stderr , "\r\n \t\t : urn ", urn
+ person_gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+ table = SfaTable()
+ person_record = SfaRecord(hrn=hrn, gid=person_gid, type="user", pointer=person['person_id'])
+ person_record['authority'] = get_authority(person_record['hrn'])
+ existing_records = table.find({'hrn': hrn, 'type': 'user', 'pointer': person['person_id']})
+ if not existing_records:
+ table.insert(person_record)
+ else:
+ self.logger.info("Import: %s exists, updating " % hrn)
+ existing_record = existing_records[0]
+ person_record['record_id'] = existing_record['record_id']
+ table.update(person_record)
+
+ def import_slice(self, parent_hrn, slice):
+ slicename = slice['name'].split("_",1)[-1]
+ slicename = _cleanup_string(slicename)
+
+ if not slicename:
+ self.logger.error("Import: failed to parse slice name %s" %slice['name'])
+ return
+
+ hrn = parent_hrn + "." + slicename
+ self.logger.info("Import: slice %s"%hrn)
+
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(hrn, 'slice')
+ slice_gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+ slice_record = SfaRecord(hrn=hrn, gid=slice_gid, type="slice", pointer=slice['slice_id'])
+ slice_record['authority'] = get_authority(slice_record['hrn'])
+ table = SfaTable()
+ existing_records = table.find({'hrn': hrn, 'type': 'slice', 'pointer': slice['slice_id']})
+ if not existing_records:
+ table.insert(slice_record)
+ else:
+ self.logger.info("Import: %s exists, updating " % hrn)
+ existing_record = existing_records[0]
+ slice_record['record_id'] = existing_record['record_id']
+ table.update(slice_record)
+
+ def import_node(self, hrn, node):
+ self.logger.info("Import: node %s" % hrn)
+ # ASN.1 will have problems with hrn's longer than 64 characters
+ if len(hrn) > 64:
+ hrn = hrn[:64]
+
+ table = SfaTable()
+ node_record = table.find({'type': 'node', 'hrn': hrn})
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(hrn, 'node')
+ node_gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+ node_record = SfaRecord(hrn=hrn, gid=node_gid, type="node", pointer=node['node_id'])
+ node_record['authority'] = get_authority(node_record['hrn'])
+ existing_records = table.find({'hrn': hrn, 'type': 'node', 'pointer': node['node_id']})
+ if not existing_records:
+ table.insert(node_record)
+ else:
+ self.logger.info("Import: %s exists, updating " % hrn)
+ existing_record = existing_records[0]
+ node_record['record_id'] = existing_record['record_id']
+ table.update(node_record)
+
+
+ def import_site(self, parent_hrn, site):
+ shell = self.shell
+ plc_auth = self.plc_auth
+ print >>sys.stderr , " \r\n !!!!!!!!! import_site plc_shell %s \r\n \t type %s dir %s" %(shell, type(shell),dir(shell))
+ sitename = site['login_base']
+ sitename = _cleanup_string(sitename)
+ hrn = parent_hrn + "." + sitename
+
+ # Hardcode 'internet2' into the hrn for sites hosting
+ # internet2 nodes. This is a special operation for some vini
+ # sites only
+ if ".vini" in parent_hrn and parent_hrn.endswith('vini'):
+ if sitename.startswith("i2"):
+ #sitename = sitename.replace("ii", "")
+ hrn = ".".join([parent_hrn, "internet2", sitename])
+ elif sitename.startswith("nlr"):
+ #sitename = sitename.replace("nlr", "")
+ hrn = ".".join([parent_hrn, "internet2", sitename])
+
+ urn = hrn_to_urn(hrn, 'authority')
+ self.logger.info("Import: site %s"%hrn)
+ print >>sys.stderr , " \r\n !!!!!!!!! import_site sitename %s sitename %s \r\n \t hrn %s urn %s" %(site['login_base'],sitename, hrn,urn)
+ # create the authority
+ if not self.AuthHierarchy.auth_exists(urn):
+ self.AuthHierarchy.create_auth(urn)
+
+ auth_info = self.AuthHierarchy.get_auth_info(urn)
+
+ table = SfaTable()
+ auth_record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="authority", pointer=site['site_id'])
+ auth_record['authority'] = get_authority(auth_record['hrn'])
+ existing_records = table.find({'hrn': hrn, 'type': 'authority', 'pointer': site['site_id']})
+ if not existing_records:
+ table.insert(auth_record)
+ else:
+ self.logger.info("Import: %s exists, updating " % hrn)
+ existing_record = existing_records[0]
+ auth_record['record_id'] = existing_record['record_id']
+ table.update(auth_record)
+
+ return hrn
+
+
+ def delete_record(self, hrn, type):
+ # delete the record
+ table = SfaTable()
+ record_list = table.find({'type': type, 'hrn': hrn})
+ for record in record_list:
+ self.logger.info("Import: removing record %s %s" % (type, hrn))
+ table.remove(record)
--- /dev/null
+from sfa.util.faults import MissingSfaInfo
+from sfa.util.sfalogging import logger
+from sfa.util.table import SfaTable
+from sfa.util.defaultdict import defaultdict
+
+from sfa.util.xrn import hrn_to_urn
+from sfa.util.plxrn import slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename, hrn_to_pl_login_base
+
+## thierry: everything that is API-related (i.e. handling incoming requests)
+# is taken care of
+# SlabDriver should be really only about talking to the senslab testbed
+
+## thierry : please avoid wildcard imports :)
+from sfa.senslab.OARrestapi import *
+from sfa.senslab.SenslabImportUsers import *
+
+def list_to_dict(recs, key):
+ """
+ convert a list of dictionaries into a dictionary keyed on the
+ specified dictionary key
+ """
+ # print>>sys.stderr, " \r\n \t\t 1list_to_dict : rec %s \r\n \t\t list_to_dict key %s" %(recs,key)
+ keys = [rec[key] for rec in recs]
+ #print>>sys.stderr, " \r\n \t\t list_to_dict : rec %s \r\n \t\t list_to_dict keys %s" %(recs,keys)
+ return dict(zip(keys, recs))
+
+# thierry : note
+# this inheritance scheme is so that the driver object can receive
+# GetNodes or GetSites sorts of calls directly
+# and thus minimize the differences in the managers with the pl version
+class SlabDriver (OARapi, SenslabImportUsers):
+
+ def __init__(self, config):
+ self.config=config
+ self.hrn = config.SFA_INTERFACE_HRN
+
+ print >>sys.stderr, "\r\n_____________ SFA SENSLAB DRIVER \r\n"
+ # thierry - just to not break the rest of this code
+ #self.oar = OARapi()
+ #self.users = SenslabImportUsers()
+ self.oar = self
+ self.users = self
+ self.time_format = "%Y-%m-%d %H:%M:%S"
+ #self.logger=sfa_logger()
+ #print >>sys.stderr, "\r\n \t\t___________PSFA SENSLAN /API.PY __init__ STOP ",self.interface #dir(self)
+
+ ##
+ # Convert SFA fields to PLC fields for use when registering up updating
+ # registry record in the PLC database
+ #
+ # @param type type of record (user, slice, ...)
+ # @param hrn human readable name
+ # @param sfa_fields dictionary of SFA fields
+ # @param pl_fields dictionary of PLC fields (output)
+
+ def sfa_fields_to_pl_fields(self, type, hrn, record):
+
+ def convert_ints(tmpdict, int_fields):
+ for field in int_fields:
+ if field in tmpdict:
+ tmpdict[field] = int(tmpdict[field])
+
+ pl_record = {}
+ #for field in record:
+ # pl_record[field] = record[field]
+
+ if type == "slice":
+ if not "instantiation" in pl_record:
+ pl_record["instantiation"] = "plc-instantiated"
+ pl_record["name"] = hrn_to_pl_slicename(hrn)
+ if "url" in record:
+ pl_record["url"] = record["url"]
+ if "description" in record:
+ pl_record["description"] = record["description"]
+ if "expires" in record:
+ pl_record["expires"] = int(record["expires"])
+
+ elif type == "node":
+ if not "hostname" in pl_record:
+ if not "hostname" in record:
+ raise MissingSfaInfo("hostname")
+ pl_record["hostname"] = record["hostname"]
+ if not "model" in pl_record:
+ pl_record["model"] = "geni"
+
+ elif type == "authority":
+ pl_record["login_base"] = hrn_to_pl_login_base(hrn)
+
+ if not "name" in pl_record:
+ pl_record["name"] = hrn
+
+ if not "abbreviated_name" in pl_record:
+ pl_record["abbreviated_name"] = hrn
+
+ if not "enabled" in pl_record:
+ pl_record["enabled"] = True
+
+ if not "is_public" in pl_record:
+ pl_record["is_public"] = True
+
+ return pl_record
+
+ def fill_record_pl_info(self, records):
+ """
+ Fill in the planetlab specific fields of a SFA record. This
+ involves calling the appropriate PLC method to retrieve the
+ database record for the object.
+
+ PLC data is filled into the pl_info field of the record.
+
+ @param record: record to fill in field (in/out param)
+ """
+ # get ids by type
+ #print>>sys.stderr, "\r\n \r\rn \t\t >>>>>>>>>>fill_record_pl_info records %s : "%(records)
+ node_ids, site_ids, slice_ids = [], [], []
+ person_ids, key_ids = [], []
+ type_map = {'node': node_ids, 'authority': site_ids,
+ 'slice': slice_ids, 'user': person_ids}
+
+ for record in records:
+ for type in type_map:
+ #print>>sys.stderr, "\r\n \t\t \t fill_record_pl_info : type %s. record['pointer'] %s "%(type,record['pointer'])
+ if type == record['type']:
+ type_map[type].append(record['pointer'])
+ #print>>sys.stderr, "\r\n \t\t \t fill_record_pl_info : records %s... \r\n \t\t \t fill_record_pl_info : type_map %s"%(records,type_map)
+ # get pl records
+ nodes, sites, slices, persons, keys = {}, {}, {}, {}, {}
+ if node_ids:
+ node_list = self.oar.GetNodes( node_ids)
+ #print>>sys.stderr, " \r\n \t\t\t BEFORE LIST_TO_DICT_NODES node_ids : %s" %(node_ids)
+ nodes = list_to_dict(node_list, 'node_id')
+ if site_ids:
+ site_list = self.oar.GetSites( site_ids)
+ sites = list_to_dict(site_list, 'site_id')
+ #print>>sys.stderr, " \r\n \t\t\t site_ids %s sites : %s" %(site_ids,sites)
+ if slice_ids:
+ slice_list = self.users.GetSlices( slice_ids)
+ slices = list_to_dict(slice_list, 'slice_id')
+ if person_ids:
+ #print>>sys.stderr, " \r\n \t\t \t fill_record_pl_info BEFORE GetPersons person_ids: %s" %(person_ids)
+ person_list = self.users.GetPersons( person_ids)
+ persons = list_to_dict(person_list, 'person_id')
+ #print>>sys.stderr, "\r\n fill_record_pl_info persons %s \r\n \t\t person_ids %s " %(persons, person_ids)
+ for person in persons:
+ key_ids.extend(persons[person]['key_ids'])
+ #print>>sys.stderr, "\r\n key_ids %s " %(key_ids)
+
+ pl_records = {'node': nodes, 'authority': sites,
+ 'slice': slices, 'user': persons}
+
+ if key_ids:
+ key_list = self.users.GetKeys( key_ids)
+ keys = list_to_dict(key_list, 'key_id')
+ # print>>sys.stderr, "\r\n fill_record_pl_info persons %s \r\n \t\t keys %s " %(keys)
+ # fill record info
+ for record in records:
+ # records with pointer==-1 do not have plc info.
+ # for example, the top level authority records which are
+ # authorities, but not PL "sites"
+ if record['pointer'] == -1:
+ continue
+
+ for type in pl_records:
+ if record['type'] == type:
+ if record['pointer'] in pl_records[type]:
+ record.update(pl_records[type][record['pointer']])
+ break
+ # fill in key info
+ if record['type'] == 'user':
+ if 'key_ids' not in record:
+ #print>>sys.stderr, " NO_KEY_IDS fill_record_pl_info key_ids record: %s" %(record)
+ logger.info("user record has no 'key_ids' - need to import ?")
+ else:
+ pubkeys = [keys[key_id]['key'] for key_id in record['key_ids'] if key_id in keys]
+ record['keys'] = pubkeys
+
+ #print>>sys.stderr, "\r\n \r\rn \t\t <<<<<<<<<<<<<<<<<< fill_record_pl_info records %s : "%(records)
+ # fill in record hrns
+ records = self.fill_record_hrns(records)
+
+ return records
+
+ def fill_record_hrns(self, records):
+ """
+ convert pl ids to hrns
+ """
+ #print>>sys.stderr, "\r\n \r\rn \t\t \t >>>>>>>>>>>>>>>>>>>>>> fill_record_hrns records %s : "%(records)
+ # get ids
+ slice_ids, person_ids, site_ids, node_ids = [], [], [], []
+ for record in records:
+ #print>>sys.stderr, "\r\n \r\rn \t\t \t record %s : "%(record)
+ if 'site_id' in record:
+ site_ids.append(record['site_id'])
+ if 'site_ids' in records:
+ site_ids.extend(record['site_ids'])
+ if 'person_ids' in record:
+ person_ids.extend(record['person_ids'])
+ if 'slice_ids' in record:
+ slice_ids.extend(record['slice_ids'])
+ if 'node_ids' in record:
+ node_ids.extend(record['node_ids'])
+
+ # get pl records
+ slices, persons, sites, nodes = {}, {}, {}, {}
+ if site_ids:
+ site_list = self.oar.GetSites( site_ids, ['site_id', 'login_base'])
+ sites = list_to_dict(site_list, 'site_id')
+ #print>>sys.stderr, " \r\n \r\n \t\t ____ site_list %s \r\n \t\t____ sites %s " % (site_list,sites)
+ if person_ids:
+ person_list = self.users.GetPersons( person_ids, ['person_id', 'email'])
+ #print>>sys.stderr, " \r\n \r\n \t\t____ person_lists %s " %(person_list)
+ persons = list_to_dict(person_list, 'person_id')
+ if slice_ids:
+ slice_list = self.users.GetSlices( slice_ids, ['slice_id', 'name'])
+ slices = list_to_dict(slice_list, 'slice_id')
+ if node_ids:
+ node_list = self.oar.GetNodes( node_ids, ['node_id', 'hostname'])
+ nodes = list_to_dict(node_list, 'node_id')
+
+ # convert ids to hrns
+ for record in records:
+
+ # get all relevant data
+ type = record['type']
+ pointer = record['pointer']
+ auth_hrn = self.hrn
+ login_base = ''
+ if pointer == -1:
+ continue
+
+ #print>>sys.stderr, " \r\n \r\n \t\t fill_record_hrns : sites %s \r\n \t\t record %s " %(sites, record)
+ if 'site_id' in record:
+ site = sites[record['site_id']]
+ #print>>sys.stderr, " \r\n \r\n \t\t \t fill_record_hrns : sites %s \r\n \t\t\t site sites[record['site_id']] %s " %(sites,site)
+ login_base = site['login_base']
+ record['site'] = ".".join([auth_hrn, login_base])
+ if 'person_ids' in record:
+ emails = [persons[person_id]['email'] for person_id in record['person_ids'] \
+ if person_id in persons]
+ usernames = [email.split('@')[0] for email in emails]
+ person_hrns = [".".join([auth_hrn, login_base, username]) for username in usernames]
+ #print>>sys.stderr, " \r\n \r\n \t\t ____ person_hrns : %s " %(person_hrns)
+ record['persons'] = person_hrns
+ if 'slice_ids' in record:
+ slicenames = [slices[slice_id]['name'] for slice_id in record['slice_ids'] \
+ if slice_id in slices]
+ slice_hrns = [slicename_to_hrn(auth_hrn, slicename) for slicename in slicenames]
+ record['slices'] = slice_hrns
+ if 'node_ids' in record:
+ hostnames = [nodes[node_id]['hostname'] for node_id in record['node_ids'] \
+ if node_id in nodes]
+ node_hrns = [hostname_to_hrn(auth_hrn, login_base, hostname) for hostname in hostnames]
+ record['nodes'] = node_hrns
+ if 'site_ids' in record:
+ login_bases = [sites[site_id]['login_base'] for site_id in record['site_ids'] \
+ if site_id in sites]
+ site_hrns = [".".join([auth_hrn, lbase]) for lbase in login_bases]
+ record['sites'] = site_hrns
+ #print>>sys.stderr, "\r\n \r\rn \t\t \t <<<<<<<<<<<<<<<<<<<<<<<< fill_record_hrns records %s : "%(records)
+ return records
+
+ def fill_record_sfa_info(self, records):
+
+ def startswith(prefix, values):
+ return [value for value in values if value.startswith(prefix)]
+
+ SenslabUsers = SenslabImportUsers()
+ # get person ids
+ person_ids = []
+ site_ids = []
+ for record in records:
+ person_ids.extend(record.get("person_ids", []))
+ site_ids.extend(record.get("site_ids", []))
+ if 'site_id' in record:
+ site_ids.append(record['site_id'])
+
+ #print>>sys.stderr, "\r\n \r\n _fill_record_sfa_info ___person_ids %s \r\n \t\t site_ids %s " %(person_ids, site_ids)
+
+ # get all pis from the sites we've encountered
+ # and store them in a dictionary keyed on site_id
+ site_pis = {}
+ if site_ids:
+ pi_filter = {'|roles': ['pi'], '|site_ids': site_ids}
+ pi_list = SenslabUsers.GetPersons( pi_filter, ['person_id', 'site_ids'])
+ #print>>sys.stderr, "\r\n \r\n _fill_record_sfa_info ___ GetPersons ['person_id', 'site_ids'] pi_ilist %s" %(pi_list)
+
+ for pi in pi_list:
+ # we will need the pi's hrns also
+ person_ids.append(pi['person_id'])
+
+ # we also need to keep track of the sites these pis
+ # belong to
+ for site_id in pi['site_ids']:
+ if site_id in site_pis:
+ site_pis[site_id].append(pi)
+ else:
+ site_pis[site_id] = [pi]
+
+ # get sfa records for all records associated with these records.
+ # we'll replace pl ids (person_ids) with hrns from the sfa records
+ # we obtain
+
+ # get the sfa records
+ table = SfaTable()
+ person_list, persons = [], {}
+ person_list = table.find({'type': 'user', 'pointer': person_ids})
+ # create a hrns keyed on the sfa record's pointer.
+ # Its possible for multiple records to have the same pointer so
+ # the dict's value will be a list of hrns.
+ persons = defaultdict(list)
+ for person in person_list:
+ persons[person['pointer']].append(person)
+
+ # get the pl records
+ pl_person_list, pl_persons = [], {}
+ pl_person_list = SenslabUsers.GetPersons(person_ids, ['person_id', 'roles'])
+ pl_persons = list_to_dict(pl_person_list, 'person_id')
+ #print>>sys.stderr, "\r\n \r\n _fill_record_sfa_info ___ _list %s \r\n \t\t SenslabUsers.GetPersons ['person_id', 'roles'] pl_persons %s \r\n records %s" %(pl_person_list, pl_persons,records)
+ # fill sfa info
+
+ for record in records:
+ # skip records with no pl info (top level authorities)
+ #Sandrine 24 oct 11 2 lines
+ #if record['pointer'] == -1:
+ #continue
+ sfa_info = {}
+ type = record['type']
+ if (type == "slice"):
+ # all slice users are researchers
+ #record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice') ? besoin ou pas ?
+ record['PI'] = []
+ record['researcher'] = []
+ for person_id in record.get('person_ids', []):
+ #Sandrine 24 oct 11 line
+ #for person_id in record['person_ids']:
+ hrns = [person['hrn'] for person in persons[person_id]]
+ record['researcher'].extend(hrns)
+
+ # pis at the slice's site
+ pl_pis = site_pis[record['site_id']]
+ pi_ids = [pi['person_id'] for pi in pl_pis]
+ for person_id in pi_ids:
+ hrns = [person['hrn'] for person in persons[person_id]]
+ record['PI'].extend(hrns)
+ record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice')
+ record['geni_creator'] = record['PI']
+
+ elif (type == "authority"):
+ record['PI'] = []
+ record['operator'] = []
+ record['owner'] = []
+ for pointer in record['person_ids']:
+ if pointer not in persons or pointer not in pl_persons:
+ # this means there is not sfa or pl record for this user
+ continue
+ hrns = [person['hrn'] for person in persons[pointer]]
+ roles = pl_persons[pointer]['roles']
+ if 'pi' in roles:
+ record['PI'].extend(hrns)
+ if 'tech' in roles:
+ record['operator'].extend(hrns)
+ if 'admin' in roles:
+ record['owner'].extend(hrns)
+ # xxx TODO: OrganizationName
+ elif (type == "node"):
+ sfa_info['dns'] = record.get("hostname", "")
+ # xxx TODO: URI, LatLong, IP, DNS
+
+ elif (type == "user"):
+ sfa_info['email'] = record.get("email", "")
+ sfa_info['geni_urn'] = hrn_to_urn(record['hrn'], 'user')
+ sfa_info['geni_certificate'] = record['gid']
+ # xxx TODO: PostalAddress, Phone
+
+ #print>>sys.stderr, "\r\n \r\rn \t\t \t <<<<<<<<<<<<<<<<<<<<<<<< fill_record_sfa_info sfa_info %s \r\n record %s : "%(sfa_info,record)
+ record.update(sfa_info)
+
+ def fill_record_info(self, records):
+ """
+ Given a SFA record, fill in the PLC specific and SFA specific
+ fields in the record.
+ """
+ #print >>sys.stderr, "\r\n \t\t fill_record_info %s"%(records)
+ if not isinstance(records, list):
+ records = [records]
+ #print >>sys.stderr, "\r\n \t\t BEFORE fill_record_pl_info %s" %(records)
+ self.fill_record_pl_info(records)
+ #print >>sys.stderr, "\r\n \t\t after fill_record_pl_info %s" %(records)
+ self.fill_record_sfa_info(records)
+ #print >>sys.stderr, "\r\n \t\t after fill_record_sfa_info"
+
+ def update_membership_list(self, oldRecord, record, listName, addFunc, delFunc):
+ # get a list of the HRNs tht are members of the old and new records
+ if oldRecord:
+ oldList = oldRecord.get(listName, [])
+ else:
+ oldList = []
+ newList = record.get(listName, [])
+
+ # if the lists are the same, then we don't have to update anything
+ if (oldList == newList):
+ return
+
+ # build a list of the new person ids, by looking up each person to get
+ # their pointer
+ newIdList = []
+ table = SfaTable()
+ records = table.find({'type': 'user', 'hrn': newList})
+ for rec in records:
+ newIdList.append(rec['pointer'])
+
+ # build a list of the old person ids from the person_ids field
+ if oldRecord:
+ oldIdList = oldRecord.get("person_ids", [])
+ containerId = oldRecord.get_pointer()
+ else:
+ # if oldRecord==None, then we are doing a Register, instead of an
+ # update.
+ oldIdList = []
+ containerId = record.get_pointer()
+
+ # add people who are in the new list, but not the oldList
+ for personId in newIdList:
+ if not (personId in oldIdList):
+ addFunc(self.plauth, personId, containerId)
+
+ # remove people who are in the old list, but not the new list
+ for personId in oldIdList:
+ if not (personId in newIdList):
+ delFunc(self.plauth, personId, containerId)
+
+ def update_membership(self, oldRecord, record):
+ if record.type == "slice":
+ self.update_membership_list(oldRecord, record, 'researcher',
+ self.users.AddPersonToSlice,
+ self.users.DeletePersonFromSlice)
+ elif record.type == "authority":
+ # xxx TODO
+ pass
+
+### thierry
+# I don't think you plan on running a component manager at this point
+# let me clean up the mess of ComponentAPI that is deprecated anyways
--- /dev/null
+#
+# implements support for SFA records stored in db tables
+#
+# TODO: Use existing PLC database methods? or keep this separate?
+
+
+from sfa.trust.gid import *
+from sfa.util.record import *
+from sfa.util.config import *
+from sfa.util.filter import *
+from sfa.trust.hierarchy import *
+from sfa.trust.certificate import *
+from sfa.trust.auth import *
+from sfa.senslab.OARrestapi import *
+from sfa.senslab.LDAPapi import *
+
+class SfaTable(list):
+ authname=""
+ def __init__(self, record_filter = None):
+ self.oar = OARapi()
+ self.ldap = LDAPapi()
+ self.senslabauth=Hierarchy()
+ config=Config()
+ self.authname=config.SFA_REGISTRY_ROOT_AUTH
+ authinfo=self.senslabauth.get_auth_info(self.authname)
+
+ self.auth=Auth()
+ gid=authinfo.get_gid_object()
+
+ def exists(self):
+ return True
+
+ def db_fields(self, obj=None):
+ return dict( [ ] )
+
+ @staticmethod
+ def is_writable (key,value,dict):
+ # if not mentioned, assume it's writable (e.g. deleted ...)
+ if key not in dict: return True
+ # if mentioned but not linked to a Parameter object, idem
+ if not isinstance(dict[key], Parameter): return True
+ # if not marked ro, it's writable
+ if not dict[key].ro: return True
+
+ return False
+
+
+ def create(self):
+ return True
+
+ def remove(self, record):
+ return 0
+
+ def insert(self, record):
+ return 0
+
+ def update(self, record):
+ return 0
+
+ def quote_string(self, value):
+ return str(self.db.quote(value))
+
+ def quote(self, value):
+ return self.db.quote(value)
+
+
+ def oarFind(self, record_filter = None, columns=None):
+ results=[]
+ node_ids=[]
+
+ if 'authority' in record_filter:
+ # ask for authority
+ if record_filter['authority']== self.authname :
+ # which is senslab
+ print>> sys.stderr , "ET MERDE !!!!"
+ node_ids=""
+ else:
+ # which is NOT senslab
+ return []
+ else :
+ if not 'hrn' in record_filter:
+ print >>sys.stderr,"find : don't know how to handle filter ",record_filter
+ return []
+ else:
+ hrns=[]
+ h=record_filter['hrn']
+ if isinstance(h,list):
+ hrns=h
+ else :
+ hrns.append(h)
+
+ for hrn in hrns:
+ head,sep,tail=hrn.partition(".")
+ if head != self.authname :
+ print >>sys.stderr,"i know nothing about",hrn, " my authname is ", self.authname, " not ", splited_hrn[0]
+ else :
+ node_ids.append(tail)
+
+ node_list = self.oar.GetNodes( node_ids)
+
+ for node in node_list:
+ hrn=self.authname+"."+node['hostname']
+ results.append( {
+ 'type': 'node',
+# 'email': ldapentry[1]['mail'][0],
+# 'first_name': ldapentry[1]['givenName'][0],
+# 'last_name': ldapentry[1]['sn'][0],
+# 'phone': 'none',
+# 'gid': gid.save_to_string(),
+# 'serial': 'none',
+ 'authority': self.authname,
+ 'peer_authority': '',
+ 'pointer' : '',
+ 'hrn': hrn,
+ 'date_created' : 'none',
+ 'last_updated': 'none'
+ } )
+
+ return results
+
+ def find(self, record_filter = None, columns=None):
+ # senslab stores its users in an ldap dictionnary
+ # and nodes in a oar scheduller database
+ # both should be interrogated.
+ print >>sys.stderr,"find : ",record_filter
+ if not isinstance(record_filter,dict):
+ print >>sys.stderr,"find : record_filter is not a dict"
+ print >>sys.stderr,record_filter.__class__
+ return []
+ allResults=[]
+ if 'type' in record_filter:
+ if record_filter['type'] == 'slice':
+ print >>sys.stderr,"find : don't know how to handle slices yet"
+ return []
+ if record_filter['type'] == 'authority':
+ if 'hrn' in record_filter and record_filter['hrn']==self.authname:
+ return []
+ else:
+ print >>sys.stderr,"find which authority ?"
+ return []
+ if record_filter['type'] == 'user':
+ return self.ldap.ldapFind(record_filter, columns)
+ if record_filter['type'] == 'node':
+ return self.ldap.ldapFind(record_filter, columns)
+ else:
+ print >>sys.stderr,"unknown type to find : ", record_filter['type']
+ return []
+ else:
+ allResults = self.ldap.ldapFind(record_filter, columns)
+ allResults+= self.oarFind(record_filter, columns)
+
+ return allResults
+
+ def findObjects(self, record_filter = None, columns=None):
+
+ print >>sys.stderr,"find : ",record_filter
+ # print record_filter['type']
+ # if record_filter['type'] in ['authority']:
+ # print "findObjectAuthority"
+ results = self.find(record_filter, columns)
+ result_rec_list = []
+ for result in results:
+ if result['type'] in ['authority']:
+ result_rec_list.append(AuthorityRecord(dict=result))
+ elif result['type'] in ['node']:
+ result_rec_list.append(NodeRecord(dict=result))
+ elif result['type'] in ['slice']:
+ result_rec_list.append(SliceRecord(dict=result))
+ elif result['type'] in ['user']:
+ result_rec_list.append(UserRecord(dict=result))
+ else:
+ result_rec_list.append(SfaRecord(dict=result))
+
+ return result_rec_list
+
+
+ def drop(self):
+ return 0
+
+ def sfa_records_purge(self):
+ return 0
+
@param name human readable name to test
"""
object_hrn = self.object_gid.get_hrn()
- if object_hrn == name:
+ strname = str(name).strip("['']")
+
+ if object_hrn == strname:
return
- if name.startswith(object_hrn + "."):
+ if strname.startswith((object_hrn + ".")) is True:
return
#if name.startswith(get_authority(name)):
#return
-
+ print>>sys.stderr, " \r\n \t AUTH.PY verify_object_permission GROSECHECDELENFER "
raise PermissionError(name)
def determine_user_rights(self, caller_hrn, record):
#!/usr/bin/python
+# just checking write access on repo
import sys
import unittest