Merge branch 'master' into senslab2
authorSandrine Avakian <sandrine.avakian@inria.fr>
Fri, 16 Dec 2011 13:56:38 +0000 (14:56 +0100)
committerSandrine Avakian <sandrine.avakian@inria.fr>
Fri, 16 Dec 2011 13:56:38 +0000 (14:56 +0100)
32 files changed:
setup.py
sfa/client/client_helper.py
sfa/client/sfi.py
sfa/generic/slab.py [new file with mode: 0644]
sfa/managers/aggregate_manager_slab.py [new file with mode: 0644]
sfa/managers/driver.py
sfa/managers/registry_manager_slab.py [new file with mode: 0644]
sfa/managers/senslab/sl.rng [new file with mode: 0644]
sfa/managers/slice_manager_slab.py [new file with mode: 0644]
sfa/methods/CreateSliver.py
sfa/methods/ListResources.py
sfa/plc/plslices.py
sfa/rspecs/pl_rspec_version.py [new file with mode: 0644]
sfa/senslab/LDAPapi.py [new file with mode: 0644]
sfa/senslab/OARrestapi.py [new file with mode: 0644]
sfa/senslab/SenslabImport.py [new file with mode: 0644]
sfa/senslab/SenslabImportUsers.py [new file with mode: 0644]
sfa/senslab/__init__.py [new file with mode: 0644]
sfa/senslab/parsing.py [new file with mode: 0644]
sfa/senslab/sfa-bare [new file with mode: 0755]
sfa/senslab/sfaImport.py [new file with mode: 0644]
sfa/senslab/slab-import.py [new file with mode: 0644]
sfa/senslab/slabaggregate.py [new file with mode: 0644]
sfa/senslab/slabdriver.py [new file with mode: 0644]
sfa/senslab/slabpostgres.py [new file with mode: 0644]
sfa/senslab/slabslices.py [new file with mode: 0644]
sfa/senslab/table_slab.py [new file with mode: 0644]
sfa/trust/auth.py
sfa/trust/credential.py
sfa/util/sfatablesRuntime.py
sfa/util/xrn.py
tests/testXrn.py

index f48cbaf..a8740fc 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -36,6 +36,7 @@ packages = [
     'sfa/managers',
     'sfa/importer',
     'sfa/plc',
+    'sfa/senslab',
     'sfa/rspecs',
     'sfa/rspecs/elements',
     'sfa/rspecs/elements/versions',
index 32e21a1..b33911f 100644 (file)
@@ -1,4 +1,4 @@
-
+import sys
 def pg_users_arg(records):
     users = []  
     for record in records:
@@ -11,19 +11,25 @@ def pg_users_arg(records):
 
 def sfa_users_arg(records, slice_record):
     users = []
+    print>>sys.stderr, " \r\n \r\n \t CLIENT_HELPER.PY sfa_users_arg slice_record %s \r\n records %s"%(slice_record,records)
     for record in records:
         if record['type'] != 'user': 
             continue
-        user = {'urn': record['geni_urn'], #
-                'keys': record['keys'],
-                'email': record['email'], # needed for MyPLC
-                'person_id': record['person_id'], # needed for MyPLC
-                'first_name': record['first_name'], # needed for MyPLC
-                'last_name': record['last_name'], # needed for MyPLC
+        user = {#'urn': record['geni_urn'], 
+                #'keys': record['keys'],
+                #'email': record['email'], # needed for MyPLC
+                'person_id': record['record_id'], 
+                'hrn': record['hrn'],
+                'type': record['type'],
+                'authority' : record['authority'],
+                'gid' : record['gid'],
+                #'first_name': record['first_name'], # needed for MyPLC
+                #'last_name': record['last_name'], # needed for MyPLC
                 'slice_record': slice_record, # needed for legacy refresh peer
-                'key_ids': record['key_ids'] # needed for legacy refresh peer
+                #'key_ids': record['key_ids'] # needed for legacy refresh peer
                 }         
-        users.append(user)
+        users.append(user)   
+        print>>sys.stderr, " \r\n \r\n \t CLIENT_HELPER.PY sfa_users_arg user %s",user
     return users        
 
 def sfa_to_pg_users_arg(users):
index 35165f6..9ed3c91 100644 (file)
@@ -906,6 +906,7 @@ or with an slice hrn, shows currently provisioned resources
                 rspec.filter({'component_manager_id': server_version['urn']})
                 rspec = RSpecConverter.to_pg_rspec(rspec.toxml(), content_type='request')
             else:
+                print >>sys.stderr, "\r\n \r\n \r\n WOOOOOO"
                 users = sfa_users_arg(user_records, slice_record)
         
         # do not append users, keys, or slice tags. Anything 
diff --git a/sfa/generic/slab.py b/sfa/generic/slab.py
new file mode 100644 (file)
index 0000000..5c9f780
--- /dev/null
@@ -0,0 +1,35 @@
+from sfa.generic import Generic
+
+import sfa.server.sfaapi
+import sfa.senslab.slabdriver
+import sfa.managers.registry_manager_slab
+import sfa.managers.slice_manager
+import sfa.managers.aggregate_manager_slab
+
+class slab (Generic):
+    
+    # use the standard api class
+    def api_class (self):
+        return sfa.server.sfaapi.SfaApi
+
+    # the manager classes for the server-side services
+    def registry_manager_class (self) : 
+        return sfa.managers.registry_manager_slab
+    def slicemgr_manager_class (self) : 
+        return sfa.managers.slice_manager.SliceManager
+    def aggregate_manager_class (self) :
+        return sfa.managers.aggregate_manager_slab.AggregateManager
+
+    # driver class for server-side services, talk to the whole testbed
+    def driver_class (self):
+        return sfa.senslab.slabdriver.SlabDriver
+
+    # slab does not have a component manager yet
+    # manager class
+    def component_manager_class (self):
+        return None
+    # driver_class
+    def component_driver_class (self):
+        return None
+
+
diff --git a/sfa/managers/aggregate_manager_slab.py b/sfa/managers/aggregate_manager_slab.py
new file mode 100644 (file)
index 0000000..c9ebc06
--- /dev/null
@@ -0,0 +1,418 @@
+import datetime
+import time
+import sys
+
+from sfa.util.sfalogging import logger
+from sfa.util.faults import RecordNotFound, SliverDoesNotExist
+from sfa.util.xrn import get_authority, hrn_to_urn, urn_to_hrn, Xrn, urn_to_sliver_id
+from sfa.util.plxrn import slicename_to_hrn, hrn_to_pl_slicename
+from sfa.util.version import version_core
+from sfa.util.sfatime import utcparse
+from sfa.util.callids import Callids
+
+from sfa.trust.sfaticket import SfaTicket
+from sfa.trust.credential import Credential
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec
+
+from sfa.server.sfaapi import SfaApi
+from sfa.senslab.slabaggregate import SlabAggregate
+import sfa.plc.peers as peers
+
+from sfa.senslab.slices import SlabSlices
+
+class AggregateManager:
+    def __init__ (self, config): pass
+    # essentially a union of the core version, the generic version (this code) and
+    # whatever the driver needs to expose
+    
+    
+    def GetVersion(self, api, options):
+
+        xrn=Xrn(api.hrn)
+        version = version_core()
+        version_generic = {'interface':'aggregate',
+                            'sfa': 2,
+                            'geni_api': 2,
+                            'hrn':xrn.get_hrn(),
+                            'urn':xrn.get_urn(),
+                            }
+        version.update(version_generic)
+        testbed_version = self.driver.aggregate_version()
+        version.update(testbed_version)
+        return version
+     
+    #def GetVersion(self, api, options={}):
+    
+        #version_manager = VersionManager()
+        #ad_rspec_versions = []
+        #request_rspec_versions = []
+        #for rspec_version in version_manager.versions:
+            #if rspec_version.content_type in ['*', 'ad']:
+                #ad_rspec_versions.append(rspec_version.to_dict())
+            #if rspec_version.content_type in ['*', 'request']:
+                #request_rspec_versions.append(rspec_version.to_dict()) 
+        #xrn=Xrn(api.hrn)
+        #version_more = {'interface':'aggregate',
+                        #'sfa': 2,
+                        #'geni_api': api.config.SFA_AGGREGATE_API_VERSION,
+                        #'testbed':'myplc',
+                        #'hrn':xrn.get_hrn(),
+                        #'geni_request_rspec_versions': request_rspec_versions,
+                        #'geni_ad_rspec_versions': ad_rspec_versions,
+                        #}
+        #return version_core(version_more)
+    
+    def _get_registry_objects(self, slice_xrn, creds, users):
+        """
+    
+        """
+        hrn, _ = urn_to_hrn(slice_xrn)
+    
+        hrn_auth = get_authority(hrn)
+    
+        # Build up objects that an SFA registry would return if SFA
+        # could contact the slice's registry directly
+        reg_objects = None
+    
+        if users:
+            # dont allow special characters in the site login base
+            #only_alphanumeric = re.compile('[^a-zA-Z0-9]+')
+            #login_base = only_alphanumeric.sub('', hrn_auth[:20]).lower()
+            slicename = hrn_to_pl_slicename(hrn)
+            login_base = slicename.split('_')[0]
+            reg_objects = {}
+            site = {}
+            site['site_id'] = 0
+            site['name'] = 'geni.%s' % login_base 
+            site['enabled'] = True
+            site['max_slices'] = 100
+    
+            # Note:
+            # Is it okay if this login base is the same as one already at this myplc site?
+            # Do we need uniqueness?  Should use hrn_auth instead of just the leaf perhaps?
+            site['login_base'] = login_base
+            site['abbreviated_name'] = login_base
+            site['max_slivers'] = 1000
+            reg_objects['site'] = site
+    
+            slice = {}
+            
+            # get_expiration always returns a normalized datetime - no need to utcparse
+            extime = Credential(string=creds[0]).get_expiration()
+            # If the expiration time is > 60 days from now, set the expiration time to 60 days from now
+            if extime > datetime.datetime.utcnow() + datetime.timedelta(days=60):
+                extime = datetime.datetime.utcnow() + datetime.timedelta(days=60)
+            slice['expires'] = int(time.mktime(extime.timetuple()))
+            slice['hrn'] = hrn
+            slice['name'] = hrn_to_pl_slicename(hrn)
+            slice['url'] = hrn
+            slice['description'] = hrn
+            slice['pointer'] = 0
+            reg_objects['slice_record'] = slice
+    
+            reg_objects['users'] = {}
+            for user in users:
+                user['key_ids'] = []
+                hrn, _ = urn_to_hrn(user['urn'])
+                user['email'] = hrn_to_pl_slicename(hrn) + "@geni.net"
+                user['first_name'] = hrn
+                user['last_name'] = hrn
+                reg_objects['users'][user['email']] = user
+    
+            return reg_objects
+    
+    def SliverStatus(self, api, slice_xrn, creds, options={}):
+        call_id = options.get('call_id')
+        if Callids().already_handled(call_id): return {}
+    
+        (hrn, _) = urn_to_hrn(slice_xrn)
+        # find out where this slice is currently running
+        slicename = hrn_to_pl_slicename(hrn)
+        
+        slices = api.driver.GetSlices([slicename], ['slice_id', 'node_ids','person_ids','name','expires'])
+        if len(slices) == 0:        
+            raise Exception("Slice %s not found (used %s as slicename internally)" % (slice_xrn, slicename))
+        slice = slices[0]
+        
+        # report about the local nodes only
+        nodes = api.driver.GetNodes({'node_id':slice['node_ids'],'peer_id':None},
+                                     ['node_id', 'hostname', 'site_id', 'boot_state', 'last_contact'])
+        site_ids = [node['site_id'] for node in nodes]
+    
+        result = {}
+        top_level_status = 'unknown'
+        if nodes:
+            top_level_status = 'ready'
+        slice_urn = Xrn(slice_xrn, 'slice').get_urn()
+        result['geni_urn'] = slice_urn
+        result['pl_login'] = slice['name']
+        result['pl_expires'] = datetime.datetime.fromtimestamp(slice['expires']).ctime()
+        
+        resources = []
+        for node in nodes:
+            res = {}
+            res['pl_hostname'] = node['hostname']
+            res['pl_boot_state'] = node['boot_state']
+            res['pl_last_contact'] = node['last_contact']
+            if node['last_contact'] is not None:
+                res['pl_last_contact'] = datetime.datetime.fromtimestamp(node['last_contact']).ctime()
+            sliver_id = urn_to_sliver_id(slice_urn, slice['slice_id'], node['node_id']) 
+            res['geni_urn'] = sliver_id
+            if node['boot_state'] == 'boot':
+                res['geni_status'] = 'ready'
+            else:
+                res['geni_status'] = 'failed'
+                top_level_status = 'failed' 
+                
+            res['geni_error'] = ''
+    
+            resources.append(res)
+            
+        result['geni_status'] = top_level_status
+        result['geni_resources'] = resources
+        return result
+    
+    def CreateSliver(self, api, slice_xrn, creds, rspec_string, users, options={}):
+        """
+        Create the sliver[s] (slice) at this aggregate.    
+        Verify HRN and initialize the slice record in PLC if necessary.
+        """
+        call_id = options.get('call_id')
+        if Callids().already_handled(call_id): return ""
+        aggregate = SlabAggregate(api)
+        #aggregate = Aggregate(api)
+        slices = SlabSlices(api)
+        (hrn, _) = urn_to_hrn(slice_xrn)
+        peer = slices.get_peer(hrn)
+        sfa_peer = slices.get_sfa_peer(hrn)
+        slice_record=None    
+        if users:
+            slice_record = users[0].get('slice_record', {})
+            print >>sys.stderr, " \r\n \t AGGREGATESLAB.PY Slice slice_record : ", slice_record
+    
+        # parse rspec
+        rspec = RSpec(rspec_string)
+        requested_attributes = rspec.version.get_slice_attributes()
+        
+        # ensure site record exists
+        site = slices.verify_site(hrn, slice_record, peer, sfa_peer)
+        # ensure slice record exists
+        print>>sys.stderr, " \r\n \t AGGREGATESLAB.PY Slice users : ", users
+        slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)
+        print >>sys.stderr, " \r\n \t AGGREGATESLAB.PY Slice slice : ", slice
+        # ensure person records exists
+        persons = slices.verify_persons(hrn, slice, users)
+        #persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)
+        # ensure slice attributes exists
+        #slices.verify_slice_attributes(slice, requested_attributes)
+        
+        # add/remove slice from nodes
+        requested_slivers = [node.get('component_name') for node in rspec.version.get_nodes_with_slivers()]
+        print >>sys.stderr, " \r\n \t AGGREGATESLAB.PY Slice requested_slivers : ", requested_slivers
+        slices.verify_slice_nodes(slice, requested_slivers, peer) 
+   
+        # add/remove links links 
+        #slices.verify_slice_links(slice, rspec.version.get_link_requests(), aggregate)
+    
+        # handle MyPLC peer association.
+        # only used by plc and ple.
+        slices.handle_peer(site, slice, persons, peer)
+        
+        return aggregate.get_rspec(slice_xrn=slice_xrn, version=rspec.version)
+    
+    
+    def RenewSliver(self, api, xrn, creds, expiration_time, options={}):
+        call_id = options.get('call_id')
+        if Callids().already_handled(call_id): return True
+        (hrn, _) = urn_to_hrn(xrn)
+        slicename = hrn_to_pl_slicename(hrn)
+        slices = api.driver.GetSlices({'name': slicename}, ['slice_id'])
+        if not slices:
+            raise RecordNotFound(hrn)
+        slice = slices[0]
+        requested_time = utcparse(expiration_time)
+        record = {'expires': int(time.mktime(requested_time.timetuple()))}
+        try:
+            api.driver.UpdateSlice(slice['slice_id'], record)
+            return True
+        except:
+            return False
+    
+    def start_slice(self, api, xrn, creds):
+        (hrn, _) = urn_to_hrn(xrn)
+        slicename = hrn_to_pl_slicename(hrn)
+        slices = api.driver.GetSlices({'name': slicename}, ['slice_id'])
+        if not slices:
+            raise RecordNotFound(hrn)
+        slice_id = slices[0]['slice_id']
+        slice_tags = api.driver.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'}, ['slice_tag_id'])
+        # just remove the tag if it exists
+        if slice_tags:
+            api.driver.DeleteSliceTag(slice_tags[0]['slice_tag_id'])
+    
+        return 1
+     
+    def stop_slice(self, api, xrn, creds):
+        hrn, _ = urn_to_hrn(xrn)
+        slicename = hrn_to_pl_slicename(hrn)
+        slices = api.driver.GetSlices({'name': slicename}, ['slice_id'])
+        if not slices:
+            raise RecordNotFound(hrn)
+        slice_id = slices[0]['slice_id']
+        slice_tags = api.driver.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'})
+        if not slice_tags:
+            api.driver.AddSliceTag(slice_id, 'enabled', '0')
+        elif slice_tags[0]['value'] != "0":
+            tag_id = slice_tags[0]['slice_tag_id']
+            api.driver.UpdateSliceTag(tag_id, '0')
+        return 1
+    
+    def reset_slice(self, api, xrn):
+        # XX not implemented at this interface
+        return 1
+    
+    def DeleteSliver(self, api, xrn, creds, options={}):
+        call_id = options.get('call_id')
+        if Callids().already_handled(call_id): return ""
+        (hrn, _) = urn_to_hrn(xrn)
+        slicename = hrn_to_pl_slicename(hrn)
+        slices = api.driver.GetSlices({'name': slicename})
+        if not slices:
+            return 1
+        slice = slices[0]
+    
+        # determine if this is a peer slice
+        peer = peers.get_peer(api, hrn)
+        try:
+            if peer:
+                api.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
+            api.driver.DeleteSliceFromNodes(slicename, slice['node_ids'])
+        finally:
+            if peer:
+                api.driver.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
+        return 1
+    
+    def ListSlices(self, api, creds, options={}):
+        call_id = options.get('call_id')
+        if Callids().already_handled(call_id): return []
+        # look in cache first
+        #if self.caching and api.cache:
+            #slices = api.cache.get('slices')
+            #if slices:
+                #return slices
+    
+        # get data from db 
+        slices = api.driver.GetSlices({'peer_id': None}, ['name'])
+        slice_hrns = [slicename_to_hrn(api.hrn, slice['name']) for slice in slices]
+        slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
+    
+        # cache the result
+        #if self.caching and api.cache:
+            #api.cache.add('slices', slice_urns) 
+    
+        return slice_urns
+        
+    def ListResources(self, api, creds, options={}):
+        call_id = options.get('call_id')
+        if Callids().already_handled(call_id): return ""
+        # get slice's hrn from options
+        xrn = options.get('geni_slice_urn', None)
+        cached = options.get('cached', True) 
+        (hrn, _) = urn_to_hrn(xrn)
+    
+        version_manager = VersionManager()
+        # get the rspec's return format from options
+        rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
+        version_string = "rspec_%s" % (rspec_version)
+    
+        #panos adding the info option to the caching key (can be improved)
+        if options.get('info'):
+            version_string = version_string + "_"+options.get('info', 'default')
+    
+        # look in cache first
+        #if self.cache and api.cache and not xrn and cached:
+            #rspec = api.cache.get(version_string)
+            #if rspec:
+                #api.logger.info("aggregate.ListResources: returning cached value for hrn %s"%hrn)
+                #return rspec 
+    
+        #panos: passing user-defined options
+        #print "manager options = ",options
+        aggregate = SlabAggregate(api)
+        #aggregate = Aggregate(api)
+        rspec =  aggregate.get_rspec(slice_xrn=xrn, version=rspec_version, options=options)
+    
+        # cache the result
+        #if self.caching and api.cache and not xrn:
+            #api.cache.add(version_string, rspec)
+    
+        return rspec
+    
+    
+    def GetTicket(self, api, xrn, creds, rspec, users, options={}):
+    
+        (slice_hrn, _) = urn_to_hrn(xrn)
+        slices = SlabSlices(api)
+        peer = slices.get_peer(slice_hrn)
+        sfa_peer = slices.get_sfa_peer(slice_hrn)
+    
+        # get the slice record
+        credential = api.getCredential()
+        interface = api.registries[api.hrn]
+        registry = api.server_proxy(interface, credential)
+        records = registry.Resolve(xrn, credential)
+    
+        # make sure we get a local slice record
+        record = None
+        for tmp_record in records:
+            if tmp_record['type'] == 'slice' and \
+               not tmp_record['peer_authority']:
+    #Error (E0602, GetTicket): Undefined variable 'SliceRecord'
+                record = SliceRecord(dict=tmp_record)
+        if not record:
+            raise RecordNotFound(slice_hrn)
+        
+        # similar to CreateSliver, we must verify that the required records exist
+        # at this aggregate before we can issue a ticket
+        # parse rspec
+        rspec = RSpec(rspec_string)
+        requested_attributes = rspec.version.get_slice_attributes()
+    
+        # ensure site record exists
+        site = slices.verify_site(hrn, slice_record, peer, sfa_peer)
+        # ensure slice record exists
+        slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)
+        # ensure person records exists
+        persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)
+        # ensure slice attributes exists
+        slices.verify_slice_attributes(slice, requested_attributes)
+        
+        # get sliver info
+        slivers = slices.get_slivers(slice_hrn)
+    
+        if not slivers:
+            raise SliverDoesNotExist(slice_hrn)
+    
+        # get initscripts
+        initscripts = []
+        data = {
+            'timestamp': int(time.time()),
+            'initscripts': initscripts,
+            'slivers': slivers
+        }
+    
+        # create the ticket
+        object_gid = record.get_gid_object()
+        new_ticket = SfaTicket(subject = object_gid.get_subject())
+        new_ticket.set_gid_caller(api.auth.client_gid)
+        new_ticket.set_gid_object(object_gid)
+        new_ticket.set_issuer(key=api.key, subject=api.hrn)
+        new_ticket.set_pubkey(object_gid.get_pubkey())
+        new_ticket.set_attributes(data)
+        new_ticket.set_rspec(rspec)
+        #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
+        new_ticket.encode()
+        new_ticket.sign()
+    
+        return new_ticket.save_to_string(save_parents=True)
index f48964f..17358cf 100644 (file)
@@ -2,7 +2,7 @@
 # an attempt to document what a driver class should provide, 
 # and implement reasonable defaults
 #
-
+import sys
 class Driver:
     
     def __init__ (self, config): 
@@ -29,6 +29,7 @@ class Driver:
     # this constraint, based on the principle that SFA should not rely on the
     # testbed database to perform such a core operation (i.e. getting rights right)
     def augment_records_with_testbed_info (self, sfa_records):
+        print >>sys.stderr, "  \r\n \r\n DRIVER.PY augment_records_with_testbed_info sfa_records ",sfa_records
         return sfa_records
 
     # incoming record, as provided by the client to the Register API call
diff --git a/sfa/managers/registry_manager_slab.py b/sfa/managers/registry_manager_slab.py
new file mode 100644 (file)
index 0000000..c35175e
--- /dev/null
@@ -0,0 +1,488 @@
+import types
+import time 
+import sys
+
+from sfa.util.faults import RecordNotFound, AccountNotEnabled, PermissionError, MissingAuthority, \
+    UnknownSfaType, ExistingRecord
+from sfa.util.prefixTree import prefixTree
+from sfa.util.record import SfaRecord
+from sfa.util.table import SfaTable
+from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn, urn_to_hrn
+from sfa.util.version import version_core
+
+from sfa.trust.gid import GID 
+from sfa.trust.credential import Credential
+from sfa.trust.certificate import Certificate, Keypair, convert_public_key
+from sfa.trust.gid import create_uuid
+
+#myapi=SfaAPI()
+# The GENI GetVersion call
+def GetVersion(api):
+    
+    # Bugfix TP 09/11/2011
+    #peers =dict ([ (peername,v._ServerProxy__host) for (peername,v) in api.registries.iteritems()
+    peers =dict ([ (peername,v.get_url()) for (peername,v) in api.registries.iteritems()
+        if peername != api.hrn])
+    xrn=Xrn(api.hrn)
+    return version_core({'interface':'registry',
+                         'hrn':xrn.get_hrn(),
+                         'urn':xrn.get_urn(),
+                         'peers':peers})
+
+def GetCredential(api, xrn, type, is_self=False):
+    # convert xrn to hrn     
+    if type:
+        hrn = urn_to_hrn(xrn)[0]
+    else:
+        hrn, type = urn_to_hrn(xrn)
+        
+    # Is this a root or sub authority
+    auth_hrn = api.auth.get_authority(hrn)
+    print>> sys.stderr , " \r\n        REGISTRY get_credential auth_hrn:" , auth_hrn,"hrn : ", hrn, " Type : ", type, "is self : " , is_self,"<<"
+    if not auth_hrn or hrn == api.config.SFA_INTERFACE_HRN:
+        auth_hrn = hrn
+    # get record info
+    auth_info = api.auth.get_auth_info(auth_hrn)
+    table = SfaTable()
+    print >> sys.stderr , " findObject ", type, hrn
+    records = table.findObjects({'type': type, 'hrn': hrn})
+    print>> sys.stderr , " \r\n    ++    REGISTRY get_credential hrn %s records %s " %(hrn, records)      
+    if not records:
+        raise RecordNotFound(hrn)
+    record = records[0]
+
+    # verify_cancreate_credential requires that the member lists
+    # (researchers, pis, etc) be filled in
+    #api.driver.fill_record_info(record, api.aggregates)
+    api.driver.fill_record_info(record)
+    record['enabled'] = True
+    print>> sys.stderr , " \r\n    ++    REGISTRY get_credential hrn %s record['enabled'] %s is_self %s" %(hrn, record['enabled'], is_self)    
+    if record['type']=='user':
+       if not record['enabled']:
+          print>> sys.stderr , " \r\n    ++    REGISTRY get_credential hrn %s ACCOUNT Not enabled"
+          raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record['email']))
+
+    # get the callers gid
+    # if this is a self cred the record's gid is the caller's gid
+    if is_self:
+        caller_hrn = hrn
+        caller_gid = record.get_gid_object()
+       print>>sys.stderr, " \r\n REGISTRY IS SELF OK caller_hrn %s--- \r\n caller_gid %s---------" %(caller_hrn,caller_gid)
+    else:
+       print>> sys.stderr , " \r\n    ++  ELSE   "     
+        caller_gid = api.auth.client_cred.get_gid_caller() 
+       print>> sys.stderr , " \r\n    ++  ELSE  caller_gid %s record %s" %(caller_gid, record) 
+        caller_hrn = caller_gid.get_hrn()
+       print>> sys.stderr , " \r\n    ++  ELSE  caller_hrn %s " %(caller_hrn)
+                 
+    object_hrn = record.get_gid_object().get_hrn()
+    print>> sys.stderr , " \r\n    ++  ELSE object_hrn  %s " %(object_hrn)
+       
+    rights = api.auth.determine_user_rights(caller_hrn, record)
+    print>> sys.stderr , " \r\n    ++  After rights record: %s \r\n ====RIGHTS %s  " %(record , rights)
+     
+    # make sure caller has rights to this object
+    if rights.is_empty():
+        raise PermissionError(caller_hrn + " has no rights to " + record['name'])
+
+    object_gid = GID(string=record['gid'])
+    new_cred = Credential(subject = object_gid.get_subject())
+    new_cred.set_gid_caller(caller_gid)
+    new_cred.set_gid_object(object_gid)
+    new_cred.set_issuer_keys(auth_info.get_privkey_filename(), auth_info.get_gid_filename())
+    #new_cred.set_pubkey(object_gid.get_pubkey())
+    new_cred.set_privileges(rights)
+    new_cred.get_privileges().delegate_all_privileges(True)
+    if 'expires' in record:
+        new_cred.set_expiration(int(record['expires']))
+    auth_kind = "authority,ma,sa"
+    # Parent not necessary, verify with certs
+    #new_cred.set_parent(api.auth.hierarchy.get_auth_cred(auth_hrn, kind=auth_kind))
+    new_cred.encode()
+    new_cred.sign()
+
+    return new_cred.save_to_string(save_parents=True)
+
+
+def Resolve(api, xrns, type=None, full=True):
+
+    # load all known registry names into a prefix tree and attempt to find
+    # the longest matching prefix
+    print >>sys.stderr , '\t\t REGISTRY MANAGER : resolve=========api ', api
+    print >>sys.stderr , '\t\t REGISTRY MANAGER : resolve=========xrns ', xrns
+    if not isinstance(xrns, types.ListType):
+        if not type:
+            type = Xrn(xrns).get_type()
+        xrns = [xrns]
+    hrns = [urn_to_hrn(xrn)[0] for xrn in xrns] 
+    print >>sys.stderr , '\t\t =========hrns ', hrns
+    # create a dict where key is a registry hrn and its value is a
+    # hrns at that registry (determined by the known prefix tree).  
+    xrn_dict = {}
+    print >>sys.stderr, '\r\n REGISTRY MANAGER : resolve xrns '  , xrns #api.__dict__.keys()
+    registries = api.registries
+    tree = prefixTree()
+    registry_hrns = registries.keys()
+    print >>sys.stderr, '\r\n \t\t REGISTRY MANAGER registry_hrns'  , registry_hrns
+    tree.load(registry_hrns)
+    for xrn in xrns:
+        registry_hrn = tree.best_match(urn_to_hrn(xrn)[0])
+       print >>sys.stderr, '\t\tREGISTRY MANAGER  *****tree.best_match ', registry_hrn
+        if registry_hrn not in xrn_dict:
+            xrn_dict[registry_hrn] = []
+        xrn_dict[registry_hrn].append(xrn)
+       print >>sys.stderr, '\t\tREGISTRY MANAGER  *****xrn_dict[registry_hrn] ',xrn_dict[registry_hrn]
+    records = [] 
+    for registry_hrn in xrn_dict:
+        # skip the hrn without a registry hrn
+        # XX should we let the user know the authority is unknown?       
+       print >>sys.stderr, '\t\t registry_hrn in xrn_dict ', registry_hrn    
+        if not registry_hrn:
+            continue
+
+        # if the best match (longest matching hrn) is not the local registry,
+        # forward the request
+        xrns = xrn_dict[registry_hrn]
+        if registry_hrn != api.hrn:
+            credential = api.getCredential()
+            interface = api.registries[registry_hrn]
+            server = api.server_proxy(interface, credential)
+            peer_records = server.Resolve(xrns, credential)
+            print >>sys.stderr , '\t\t peer_records ', peer_records
+            records.extend([SfaRecord(dict=record).as_dict() for record in peer_records])
+
+    print >>sys.stderr,'\t\t hrns ' , hrns
+    # try resolving the remaining unfound records at the local registry
+    remaining_hrns = set(hrns).difference([record['hrn'] for record in records])
+    # convert set to list
+    remaining_hrns = [hrn for hrn in remaining_hrns] 
+    print >>sys.stderr, '\t\t remaining_hrns', remaining_hrns
+    table = SfaTable()
+    local_records = table.findObjects({'hrn': remaining_hrns})
+
+    print >>sys.stderr, '\t\t LOCAL REC !', local_records  
+    for rec in local_records:
+        print >>sys.stderr, '\t\t resolve regmanager : rec ', rec    
+                   
+    if full:
+       print >>sys.stderr, '\r\n \r\n REGISTRY:_FULL', api     
+  
+        api.driver.fill_record_info(local_records)
+    
+    # convert local record objects to dicts
+    records.extend([dict(record) for record in local_records])
+    #print >>sys.stderr, "\r\n \t\t records extends %s" %(records)      
+    if not records:
+        raise RecordNotFound(str(hrns))
+
+    if type:
+        records = filter(lambda rec: rec['type'] in [type], records)
+
+    return records
+
+def List(api, xrn, origin_hrn=None):
+    hrn, type = urn_to_hrn(xrn)
+    # load all know registry names into a prefix tree and attempt to find
+    # the longest matching prefix
+    records = []
+    registries = api.registries
+    registry_hrns = registries.keys()
+    tree = prefixTree()
+    tree.load(registry_hrns)
+    registry_hrn = tree.best_match(hrn)
+   
+    #if there was no match then this record belongs to an unknow registry
+    if not registry_hrn:
+        raise MissingAuthority(xrn)
+    # if the best match (longest matching hrn) is not the local registry,
+    # forward the request
+    records = []    
+    if registry_hrn != api.hrn:
+        credential = api.getCredential()
+       print>>sys.stderr, "Registries : ", registries
+        interface = api.registries[registry_hrn]
+        server = api.server_proxy(interface, credential)
+        record_list = server.List(xrn, credential)
+        records = [SfaRecord(dict=record).as_dict() for record in record_list]
+    
+    # if we still have not found the record yet, try the local registry
+    if not records:
+        if not api.auth.hierarchy.auth_exists(hrn):
+            raise MissingAuthority(hrn)
+
+        table = SfaTable()
+        records = table.find({'authority': hrn})
+
+    return records
+
+
+def Register(api, record):
+
+    #hrn, type = record['hrn'], record['type']
+    hrn = str(record['hrn']).strip("['']")
+    type = str( record['type']).strip("['']")
+    urn = hrn_to_urn(hrn,type)
+    # validate the type
+    if type not in ['authority', 'slice', 'node', 'user']:
+        raise UnknownSfaType(type) 
+    
+    # check if record already exists
+    table = SfaTable()
+    existing_records = table.find({'type': type, 'hrn': hrn})
+    if existing_records:
+        raise ExistingRecord(hrn)
+       
+    record = SfaRecord(dict = record)
+
+    print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAN.PY  register  SfaRecordrecord %s" %(record)
+    #record['authority'] = get_authority(record['hrn'])
+    record['authority'] = get_authority(hrn)
+    
+    #type_of_rec = record['type']
+    #hrn = record['hrn']
+    
+    #api.auth.verify_object_permission(hrn)
+    api.auth.verify_object_permission( record['hrn'])
+    auth_info = api.auth.get_auth_info(record['authority'])
+  
+    
+
+    pub_key = None
+    # make sure record has a gid
+    if 'gid' not in record:
+        uuid = create_uuid()
+        pkey = Keypair(create=True)
+        if 'key' in record and record['key']:
+            if isinstance(record['key'], types.ListType):
+                pub_key = record['key'][0]
+            else:
+                pub_key = record['key']
+            pkey = convert_public_key(pub_key)
+
+        gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
+        gid = gid_object.save_to_string(save_parents=True)
+        record['gid'] = gid
+        record.set_gid(gid)
+       print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAB.PY   record['gid']  %s" %(record['gid'])   
+       print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAB.PY  register type %s"%(type)
+
+    if type in ["authority"]:
+        # update the tree
+        if not api.auth.hierarchy.auth_exists(hrn):
+            api.auth.hierarchy.create_auth(hrn_to_urn(hrn,'authority'))
+
+        # get the GID from the newly created authority
+        gid = auth_info.get_gid_object()
+        record.set_gid(gid.save_to_string(save_parents=True))
+       
+        #pl_record = api.driver.sfa_fields_to_pl_fields(type, hrn, record)
+       print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAB.PY  register : type in [authority ] sfa_fields_to_pl_fields FIELDS A CHANGER"    
+       
+        # thierry: ideally we'd like to be able to write api.driver.GetSites
+        # in which case the code would become mostly the same as for pl
+        sites = api.driver.GetSites([pl_record['login_base']])
+        if not sites:
+            # thierry
+            # Error (E0601, register): Using variable 'pl_record' before assignment
+            pointer = api.driver.AddSite( pl_record)
+        else:
+            pointer = sites[0]['site_id']
+
+        record.set_pointer(pointer)
+        record['pointer'] = pointer
+
+    elif (type == "slice"):
+        acceptable_fields=['url', 'instantiation', 'name', 'description']
+        pl_record = api.driver.sfa_fields_to_pl_fields(type, hrn, record)
+       print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAB.PY  register  slice pl_record %s"%(pl_record)
+        for key in pl_record.keys():
+            if key not in acceptable_fields:
+                pl_record.pop(key)
+        slices = api.driver.GetSlices([pl_record['name']])
+        if not slices:
+             pointer = api.driver.AddSlice(pl_record)
+        else:
+             pointer = slices[0]['slice_id']
+        record.set_pointer(pointer)
+        record['pointer'] = pointer
+
+    elif  (type == "user"):
+        persons = api.driver.GetPersons([record['email']])
+       if not persons:
+           print>>sys.stderr, "  \r\n \r\n ----------- registry_manager_slab  register NO PERSON ADD TO LDAP?"
+      
+        #if not persons:
+            #pointer = api.driver.AddPerson( dict(record))
+        #else:
+            #pointer = persons[0]['person_id']
+
+        if 'enabled' in record and record['enabled']:
+            api.driver.UpdatePerson(pointer, {'enabled': record['enabled']})
+        # add this persons to the site only if he is being added for the first
+        # time by sfa and doesont already exist in plc
+        if not persons or not persons[0]['site_ids']:
+            login_base = get_leaf(record['authority'])
+
+            api.driver.AddPersonToSite(pointer, login_base)
+
+        # What roles should this user have?
+        api.driver.AddRoleToPerson('user', pointer)
+        # Add the user's key
+        if pub_key:
+            api.driver.AddPersonKey(pointer, {'key_type' : 'ssh', 'key' : pub_key})
+
+    #elif (type == "node"):
+        #pl_record = api.driver.sfa_fields_to_pl_fields(type, hrn, record)
+        #login_base = hrn_to_pl_login_base(record['authority'])
+        #nodes = api.driver.GetNodes([pl_record['hostname']])
+        #if not nodes:
+            #pointer = api.driver.AddNode(login_base, pl_record)
+        #else:
+            #pointer = nodes[0]['node_id']
+
+    ##record['pointer'] = pointer
+    ##record.set_pointer(pointer)
+    #record_id = table.insert(record)
+    #record['record_id'] = record_id
+
+    # update membership for researchers, pis, owners, operators
+    api.driver.update_membership(None, record)
+
+    return record.get_gid_object().save_to_string(save_parents=True)
+
+def Update(api, record_dict):
+    new_record = SfaRecord(dict = record_dict)
+    type = new_record['type']
+    hrn = new_record['hrn']
+    urn = hrn_to_urn(hrn,type)
+    api.auth.verify_object_permission(hrn)
+    table = SfaTable()
+    # make sure the record exists
+    records = table.findObjects({'type': type, 'hrn': hrn})
+    if not records:
+        raise RecordNotFound(hrn)
+    record = records[0]
+    record['last_updated'] = time.gmtime()
+
+    # Update_membership needs the membership lists in the existing record
+    # filled in, so it can see if members were added or removed
+    api.driver.fill_record_info(record)
+
+    # Use the pointer from the existing record, not the one that the user
+    # gave us. This prevents the user from inserting a forged pointer
+    pointer = record['pointer']
+    # update the PLC information that was specified with the record
+
+    if (type == "authority"):
+        api.driver.UpdateSite(pointer, new_record)
+
+    elif type == "slice":
+        pl_record=api.driver.sfa_fields_to_pl_fields(type, hrn, new_record)
+        if 'name' in pl_record:
+            pl_record.pop('name')
+            api.driver.UpdateSlice(pointer, pl_record)
+
+    elif type == "user":
+        # SMBAKER: UpdatePerson only allows a limited set of fields to be
+        #    updated. Ideally we should have a more generic way of doing
+        #    this. I copied the field names from UpdatePerson.py...
+        update_fields = {}
+        all_fields = new_record
+        for key in all_fields.keys():
+            if key in ['first_name', 'last_name', 'title', 'email',
+                       'password', 'phone', 'url', 'bio', 'accepted_aup',
+                       'enabled']:
+                update_fields[key] = all_fields[key]
+        api.driver.UpdatePerson(pointer, update_fields)
+
+        if 'key' in new_record and new_record['key']:
+            # must check this key against the previous one if it exists
+            persons = api.driver.GetPersons([pointer], ['key_ids'])
+            person = persons[0]
+            keys = person['key_ids']
+            keys = api.driver.GetKeys(person['key_ids'])
+            key_exists = False
+            if isinstance(new_record['key'], types.ListType):
+                new_key = new_record['key'][0]
+            else:
+                new_key = new_record['key']
+            
+            # Delete all stale keys
+            for key in keys:
+                if new_record['key'] != key['key']:
+                    api.driver.DeleteKey(key['key_id'])
+                else:
+                    key_exists = True
+            if not key_exists:
+                api.driver.AddPersonKey(pointer, {'key_type': 'ssh', 'key': new_key})
+
+            # update the openssl key and gid
+            pkey = convert_public_key(new_key)
+            uuid = create_uuid()
+            gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
+            gid = gid_object.save_to_string(save_parents=True)
+            record['gid'] = gid
+            record = SfaRecord(dict=record)
+            table.update(record)
+
+    elif type == "node":
+        api.driver.UpdateNode(pointer, new_record)
+
+    else:
+        raise UnknownSfaType(type)
+
+    # update membership for researchers, pis, owners, operators
+    api.driver.update_membership(record, new_record)
+    
+    return 1 
+
+# expecting an Xrn instance
+def Remove(api, xrn, origin_hrn=None):
+
+    table = SfaTable()
+    filter = {'hrn': xrn.get_hrn()}
+    hrn=xrn.get_hrn()
+    type=xrn.get_type()
+    if type and type not in ['all', '*']:
+        filter['type'] = type
+
+    records = table.find(filter)
+    if not records: raise RecordNotFound(hrn)
+    record = records[0]
+    type = record['type']
+
+    credential = api.getCredential()
+    registries = api.registries
+
+    # Try to remove the object from the PLCDB of federated agg.
+    # This is attempted before removing the object from the local agg's PLCDB and sfa table
+    if hrn.startswith(api.hrn) and type in ['user', 'slice', 'authority']:
+        for registry in registries:
+            if registry not in [api.hrn]:
+                try:
+                    result=registries[registry].remove_peer_object(credential, record, origin_hrn)
+                except:
+                    pass
+    if type == "user":
+        persons = api.driver.GetPersons(record['pointer'])
+        # only delete this person if he has site ids. if he doesnt, it probably means
+        # he was just removed from a site, not actually deleted
+        if persons and persons[0]['site_ids']:
+            api.driver.DeletePerson(record['pointer'])
+    elif type == "slice":
+        if api.driver.GetSlices(record['pointer']):
+            api.driver.DeleteSlice(record['pointer'])
+    elif type == "node":
+        if api.driver.GetNodes(record['pointer']):
+            api.driver.DeleteNode(record['pointer'])
+    elif type == "authority":
+        if api.driver.GetSites(record['pointer']):
+            api.driver.DeleteSite(record['pointer'])
+    else:
+        raise UnknownSfaType(type)
+
+    table.remove(record)
+
+    return 1
+
diff --git a/sfa/managers/senslab/sl.rng b/sfa/managers/senslab/sl.rng
new file mode 100644 (file)
index 0000000..627b6fd
--- /dev/null
@@ -0,0 +1,134 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+  <start>
+    <ref name="RSpec"/>
+  </start>
+  <define name="RSpec">
+    <element name="RSpec">
+      <attribute name="type">
+        <data type="NMTOKEN"/>
+      </attribute>
+      <choice>
+        <ref name="network"/>
+        <ref name="request"/>
+      </choice>
+    </element>
+  </define>
+  <define name="network">
+    <element name="network">
+      <attribute name="name">
+        <data type="NMTOKEN"/>
+      </attribute>
+      <optional>
+        <attribute name="slice">
+          <data type="NMTOKEN"/>
+        </attribute>
+      </optional>
+      <optional>
+        <ref name="sliver_defaults"/>
+      </optional>
+      <oneOrMore>
+        <ref name="site"/>
+      </oneOrMore>
+    </element>
+  </define>
+  <define name="sliver_defaults">
+    <element name="sliver_defaults">
+      <ref name="sliver_elements"/>
+    </element>
+  </define>
+  <define name="site">
+    <element name="site">
+      <attribute name="id">
+        <data type="ID"/>
+      </attribute>
+      <element name="name">
+        <text/>
+      </element>
+      <zeroOrMore>
+        <ref name="node"/>
+      </zeroOrMore>
+    </element>
+  </define>
+  <define name="node">
+    <element name="node">
+      <attribute name="node_id">
+        <data type="ID"/>
+      </attribute>
+      <element name="hostname">
+        <text/>
+      </element> 
+      <attribute name="reservable">
+        <data type="boolean"/>
+      </attribute>
+      <element name="ip_address">
+        <text/>
+      </element>
+      <optional>
+        <element name="urn">
+            <text/>
+        </element>
+      </optional>
+      <optional>
+        <ref name="leases"/>
+       </optional>
+      <optional>
+        <ref name="sliver"/>
+       </optional>
+    </element>
+  </define>
+  <define name="request">
+    <element name="request">
+      <attribute name="name">
+        <data type="NMTOKEN"/>
+      </attribute>
+      <optional>
+        <ref name="sliver_defaults"/>
+      </optional>
+      <oneOrMore>
+        <ref name="sliver"/>
+      </oneOrMore>
+    </element>
+  </define>
+  <define name="sliver">
+    <element name="sliver">
+      <optional>
+        <attribute name="nodeid">
+          <data type="ID"/>
+        </attribute>
+      </optional>
+      <ref name="sliver_elements"/>
+    </element>
+  </define>
+  <define name="sliver_elements">
+    <interleave>
+      <optional>
+        <element name="capabilities">
+          <text/>
+        </element>
+      </optional>
+      <optional>
+        <element name="delegations">
+          <text/>
+        </element>
+      </optional>
+      <optional>
+        <element name="program">
+          <text/>
+        </element>
+      </optional>     
+      </interleave>
+  </define>
+ <define name="leases">
+    <element name="leases">
+      <zeroOrMore>
+       <group>
+        <attribute name="slot"/>
+          <data type="dateTime"/>
+        </attribute>
+        <attribute name="slice">
+          <data type="NMTOKEN"/>
+        </attribute>
+       </group>
+      </zeroOrMore>
+</grammar>
diff --git a/sfa/managers/slice_manager_slab.py b/sfa/managers/slice_manager_slab.py
new file mode 100644 (file)
index 0000000..e67d2b5
--- /dev/null
@@ -0,0 +1,670 @@
+# 
+import sys
+import time,datetime
+from StringIO import StringIO
+from types import StringTypes
+from copy import deepcopy
+from copy import copy
+from lxml import etree
+
+from sfa.util.sfalogging import logger
+#from sfa.util.sfalogging import sfa_logger
+#from sfa.util.rspecHelper import merge_rspecs
+from sfa.util.xrn import Xrn, urn_to_hrn, hrn_to_urn
+from sfa.util.plxrn import hrn_to_pl_slicename
+#from sfa.util.rspec import *
+#from sfa.util.specdict import *
+from sfa.util.faults import *
+from sfa.util.record import SfaRecord
+#from sfa.rspecs.pg_rspec import PGRSpec
+#from sfa.rspecs.sfa_rspec import SfaRSpec
+from sfa.rspecs.rspec_converter import RSpecConverter
+#from sfa.rspecs.rspec_parser import parse_rspec    
+#from sfa.rspecs.rspec_version import RSpecVersion
+#from sfa.rspecs.sfa_rspec import sfa_rspec_version
+#from sfa.rspecs.pg_rspec import pg_rspec_ad_version, pg_rspec_request_version
+from sfa.client.client_helper import sfa_to_pg_users_arg
+from sfa.rspecs.version_manager import VersionManager
+
+#from sfa.rspecs.rspec import RSpec 
+from sfa.util.policy import Policy
+from sfa.util.prefixTree import prefixTree
+#from sfa.util.sfaticket import *
+from sfa.trust.credential import Credential
+#from sfa.util.threadmanager import ThreadManager
+#import sfa.util.xmlrpcprotocol as xmlrpcprotocol     
+#import sfa.plc.peers as peers
+from sfa.util.version import version_core
+from sfa.util.callids import Callids
+#from sfa.senslab.api import *
+
+
+#api=SfaAPI(interface='slicemgr')
+
+def _call_id_supported(api, server):
+    """
+    Returns true if server support the optional call_id arg, false otherwise.
+    """
+    server_version = api.get_cached_server_version(server)
+
+    if 'sfa' in server_version:
+        code_tag = server_version['code_tag']
+        code_tag_parts = code_tag.split("-")
+
+        version_parts = code_tag_parts[0].split(".")
+        major, minor = version_parts[0:2]
+        rev = code_tag_parts[1]
+        if int(major) > 1:
+            if int(minor) > 0 or int(rev) > 20:
+                return True
+    return False
+
+# we have specialized xmlrpclib.ServerProxy to remember the input url
+# OTOH it's not clear if we're only dealing with XMLRPCServerProxy instances
+def get_serverproxy_url (server):
+    try:
+        return server.get_url()
+    except:
+        logger.warning("GetVersion, falling back to xmlrpclib.ServerProxy internals")
+        return server._ServerProxy__host + server._ServerProxy__handler 
+
+def GetVersion(api):
+    # peers explicitly in aggregates.xml
+    peers =dict ([ (peername,get_serverproxy_url(v)) for (peername,v) in api.aggregates.iteritems()
+                   if peername != api.hrn])
+    version_manager = VersionManager()
+    ad_rspec_versions = []
+    request_rspec_versions = []
+    for rspec_version in version_manager.versions:
+        if rspec_version.content_type in ['*', 'ad']:
+            ad_rspec_versions.append(rspec_version.to_dict())
+        if rspec_version.content_type in ['*', 'request']:
+            request_rspec_versions.append(rspec_version.to_dict())
+    default_rspec_version = version_manager.get_version("sfa 1").to_dict()
+    xrn=Xrn(api.hrn, 'authority+sa')
+    version_more = {'interface':'slicemgr',
+                    'hrn' : xrn.get_hrn(),
+                    'urn' : xrn.get_urn(),
+                    'peers': peers,
+                    'request_rspec_versions': request_rspec_versions,
+                    'ad_rspec_versions': ad_rspec_versions,
+                    'default_ad_rspec': default_rspec_version
+                    }
+    sm_version=version_core(version_more)
+    # local aggregate if present needs to have localhost resolved
+    if api.hrn in api.aggregates:
+        local_am_url=get_serverproxy_url(api.aggregates[api.hrn])
+        sm_version['peers'][api.hrn]=local_am_url.replace('localhost',sm_version['hostname'])
+    return sm_version
+
+
+#def GetVersion(api):
+    ## peers explicitly in aggregates.xml
+    #peers =dict ([ (peername,get_serverproxy_url(v)) for (peername,v) in api.aggregates.iteritems() 
+                   #if peername != api.hrn])
+    #xrn=Xrn (api.hrn)
+    #request_rspec_versions = [dict(pg_rspec_request_version), dict(sfa_rspec_version)]
+    #ad_rspec_versions = [dict(pg_rspec_ad_version), dict(sfa_rspec_version)]
+    #version_more = {'interface':'slicemgr',
+                    #'hrn' : xrn.get_hrn(),
+                    #'urn' : xrn.get_urn(),
+                    #'peers': peers,
+                    #'request_rspec_versions': request_rspec_versions,
+                    #'ad_rspec_versions': ad_rspec_versions,
+                    #'default_ad_rspec': dict(sfa_rspec_version)
+                    #}
+    #sm_version=version_core(version_more)
+    ## local aggregate if present needs to have localhost resolved
+    #if api.hrn in api.aggregates:
+        #local_am_url=get_serverproxy_url(api.aggregates[api.hrn])
+        #sm_version['peers'][api.hrn]=local_am_url.replace('localhost',sm_version['hostname'])
+    #return sm_version
+def drop_slicemgr_stats(api,rspec):
+       try:
+               stats_elements = rspec.xml.xpath('//statistics')
+               for node in stats_elements:
+                       node.getparent().remove(node)
+       except Exception, e:
+               api.logger.warn("drop_slicemgr_stats failed: %s " % (str(e)))
+def CreateSliver(api, xrn, creds, rspec_str, users, call_id):
+       
+       version_manager = VersionManager()
+       def _CreateSliver(aggregate, server, xrn, credential, rspec, users, call_id):
+               
+               tStart = time.time()
+               try:
+                       # Need to call GetVersion at an aggregate to determine the supported
+                       # rspec type/format beofre calling CreateSliver at an Aggregate.
+                       print>>sys.stderr, " \r\n SLICE MANAGERSLAB _CreateSliver server " 
+                       server_version = api.get_cached_server_version(server)
+                       requested_users = users
+                       if 'sfa' not in server_version and 'geni_api' in server_version:
+                               # sfa aggregtes support both sfa and pg rspecs, no need to convert
+                               # if aggregate supports sfa rspecs. otherwise convert to pg rspec
+                               rspec = RSpec(RSpecConverter.to_pg_rspec(rspec, 'request'))
+                               filter = {'component_manager_id': server_version['urn']}
+                               rspec.filter(filter)
+                               rspec = rspec.toxml()
+                               requested_users = sfa_to_pg_users_arg(users)
+                       args = [xrn, credential, rspec, requested_users]
+                       if _call_id_supported(api, server):
+                               args.append(call_id)
+                       rspec = server.CreateSliver(*args)
+                       return {"aggregate": aggregate, "rspec": rspec, "elapsed": time.time()-tStart, "status": "success"}
+               except: 
+                       logger.log_exc('Something wrong in _CreateSliver with URL %s'%server.url)
+               return {"aggregate": aggregate, "elapsed": time.time()-tStart, "status": "exception"}
+
+       
+       if Callids().already_handled(call_id): return ""
+       
+       # Validate the RSpec against PlanetLab's schema --disabled for now
+       # The schema used here needs to aggregate the PL and VINI schemas
+       # schema = "/var/www/html/schemas/pl.rng"
+       rspec = RSpec(rspec_str)
+       schema = None
+       if schema:
+               rspec.validate(schema)
+               
+       print>>sys.stderr, " \r\n \r\n \t\t =======SLICE MANAGER _CreateSliver api %s" %(api)
+       # if there is a <statistics> section, the aggregates don't care about it,
+       # so delete it.
+       drop_slicemgr_stats(api,rspec)
+       
+       # attempt to use delegated credential first
+       credential = api.getDelegatedCredential(creds)
+       if not credential:
+               credential = api.getCredential()
+
+       # get the callers hrn
+       hrn, type = urn_to_hrn(xrn)
+       valid_cred = api.auth.checkCredentials(creds, 'createsliver', hrn)[0]
+       caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+       threads = ThreadManager()
+       print>>sys.stderr, " \r\n \r\n \t\t =======SLICE MANAGER _CreateSliver api aggregates  %s \t caller_hrn %s api.hrn %s" %(api.aggregates, caller_hrn, api.hrn)
+       for aggregate in api.aggregates:
+       # prevent infinite loop. Dont send request back to caller
+       # unless the caller is the aggregate's SM 
+               if caller_hrn == aggregate and aggregate != api.hrn:
+                       continue
+               interface = api.aggregates[aggregate]
+               print>>sys.stderr, " \r\n \r\n \t\t =======SLICE MANAGER _CreateSliver aggregate %s interface %s" %(api.aggregates[aggregate],interface)   
+               server = api.get_server(interface, credential)
+               if server is None:
+                       print>>sys.stderr, " \r\n \r\n \t\t =======SLICE MANAGER _CreateSliver NOSERVERS "  
+               # Just send entire RSpec to each aggregate
+               #threads.run(_CreateSliver, aggregate, xrn, [credential], rspec.toxml(), users, call_id)
+               threads.run(_CreateSliver, aggregate, server, xrn, [credential], rspec.toxml(), users, call_id)
+       results = threads.get_results()
+       manifest_version = version_manager._get_version(rspec.version.type, rspec.version.version, 'manifest')
+       result_rspec = RSpec(version=manifest_version)
+    #rspec = SfaRSpec()
+       for result in results:
+               add_slicemgr_stat(result_rspec, "CreateSliver", result["aggregate"], result["elapsed"], result["status"])
+               if result["status"]=="success":
+                       try:
+                               result_rspec.version.merge(result["rspec"])
+                       except:
+                               api.logger.log_exc("SM.CreateSliver: Failed to merge aggregate rspec")
+       return result_rspec.toxml()
+        #rspec.merge(result)     
+    #return rspec.toxml()
+
+def RenewSliver(api, xrn, creds, expiration_time, call_id):
+    if Callids().already_handled(call_id): return True
+
+    (hrn, type) = urn_to_hrn(xrn)
+    # get the callers hrn
+    valid_cred = api.auth.checkCredentials(creds, 'renewsliver', hrn)[0]
+    caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+
+    # attempt to use delegated credential first
+    credential = api.getDelegatedCredential(creds)
+    if not credential:
+        credential = api.getCredential()
+    threads = ThreadManager()
+    for aggregate in api.aggregates:
+        # prevent infinite loop. Dont send request back to caller
+        # unless the caller is the aggregate's SM
+        if caller_hrn == aggregate and aggregate != api.hrn:
+            continue
+
+        server = api.aggregates[aggregate]
+        threads.run(server.RenewSliver, xrn, [credential], expiration_time, call_id)
+    # 'and' the results
+    return reduce (lambda x,y: x and y, threads.get_results() , True)
+
+def get_ticket(api, xrn, creds, rspec, users):
+    slice_hrn, type = urn_to_hrn(xrn)
+    # get the netspecs contained within the clients rspec
+    aggregate_rspecs = {}
+    tree= etree.parse(StringIO(rspec))
+    elements = tree.findall('./network')
+    for element in elements:
+        aggregate_hrn = element.values()[0]
+        aggregate_rspecs[aggregate_hrn] = rspec 
+
+    # get the callers hrn
+    valid_cred = api.auth.checkCredentials(creds, 'getticket', slice_hrn)[0]
+    caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+
+    # attempt to use delegated credential first
+    credential = api.getDelegatedCredential(creds)
+    if not credential:
+        credential = api.getCredential() 
+    threads = ThreadManager()
+    for (aggregate, aggregate_rspec) in aggregate_rspecs.iteritems():
+        # prevent infinite loop. Dont send request back to caller
+        # unless the caller is the aggregate's SM
+        if caller_hrn == aggregate and aggregate != api.hrn:
+            continue
+        server = None
+        if aggregate in api.aggregates:
+            server = api.aggregates[aggregate]
+        else:
+            net_urn = hrn_to_urn(aggregate, 'authority')     
+            # we may have a peer that knows about this aggregate
+            for agg in api.aggregates:
+                target_aggs = api.aggregates[agg].get_aggregates(credential, net_urn)
+                if not target_aggs or not 'hrn' in target_aggs[0]:
+                    continue
+                # send the request to this address 
+                url = target_aggs[0]['url']
+                server = xmlrpcprotocol.get_server(url, api.key_file, api.cert_file)
+                # aggregate found, no need to keep looping
+                break   
+        if server is None:
+            continue 
+        threads.run(server.ParseTicket, xrn, credential, aggregate_rspec, users)
+
+    results = threads.get_results()
+    
+    # gather information from each ticket 
+    rspecs = []
+    initscripts = []
+    slivers = [] 
+    object_gid = None  
+    for result in results:
+        agg_ticket = SfaTicket(string=result)
+        attrs = agg_ticket.get_attributes()
+        if not object_gid:
+            object_gid = agg_ticket.get_gid_object()
+        rspecs.append(agg_ticket.get_rspec())
+        initscripts.extend(attrs.get('initscripts', [])) 
+        slivers.extend(attrs.get('slivers', [])) 
+    
+    # merge info
+    attributes = {'initscripts': initscripts,
+                 'slivers': slivers}
+    merged_rspec = merge_rspecs(rspecs) 
+
+    # create a new ticket
+    ticket = SfaTicket(subject = slice_hrn)
+    ticket.set_gid_caller(api.auth.client_gid)
+    ticket.set_issuer(key=api.key, subject=api.hrn)
+    ticket.set_gid_object(object_gid)
+    ticket.set_pubkey(object_gid.get_pubkey())
+    #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
+    ticket.set_attributes(attributes)
+    ticket.set_rspec(merged_rspec)
+    ticket.encode()
+    ticket.sign()          
+    return ticket.save_to_string(save_parents=True)
+
+
+def DeleteSliver(api, xrn, creds, call_id):
+    if Callids().already_handled(call_id): return ""
+    (hrn, type) = urn_to_hrn(xrn)
+    # get the callers hrn
+    valid_cred = api.auth.checkCredentials(creds, 'deletesliver', hrn)[0]
+    caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+
+    # attempt to use delegated credential first
+    credential = api.getDelegatedCredential(creds)
+    if not credential:
+        credential = api.getCredential()
+    threads = ThreadManager()
+    for aggregate in api.aggregates:
+        # prevent infinite loop. Dont send request back to caller
+        # unless the caller is the aggregate's SM
+        if caller_hrn == aggregate and aggregate != api.hrn:
+            continue
+        server = api.aggregates[aggregate]
+        threads.run(server.DeleteSliver, xrn, credential, call_id)
+    threads.get_results()
+    return 1
+
+def start_slice(api, xrn, creds):
+    hrn, type = urn_to_hrn(xrn)
+
+    # get the callers hrn
+    valid_cred = api.auth.checkCredentials(creds, 'startslice', hrn)[0]
+    caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+
+    # attempt to use delegated credential first
+    credential = api.getDelegatedCredential(creds)
+    if not credential:
+        credential = api.getCredential()
+    threads = ThreadManager()
+    for aggregate in api.aggregates:
+        # prevent infinite loop. Dont send request back to caller
+        # unless the caller is the aggregate's SM
+        if caller_hrn == aggregate and aggregate != api.hrn:
+            continue
+        server = api.aggregates[aggregate]
+        threads.run(server.Start, xrn, credential)
+    threads.get_results()    
+    return 1
+def stop_slice(api, xrn, creds):
+    hrn, type = urn_to_hrn(xrn)
+
+    # get the callers hrn
+    valid_cred = api.auth.checkCredentials(creds, 'stopslice', hrn)[0]
+    caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+
+    # attempt to use delegated credential first
+    credential = api.getDelegatedCredential(creds)
+    if not credential:
+        credential = api.getCredential()
+    threads = ThreadManager()
+    for aggregate in api.aggregates:
+        # prevent infinite loop. Dont send request back to caller
+        # unless the caller is the aggregate's SM
+        if caller_hrn == aggregate and aggregate != api.hrn:
+            continue
+        server = api.aggregates[aggregate]
+        threads.run(server.Stop, xrn, credential)
+    threads.get_results()    
+    return 1
+
+def reset_slice(api, xrn):
+    """
+    Not implemented
+    """
+    return 1
+
+def shutdown(api, xrn, creds):
+    """
+    Not implemented   
+    """
+    return 1
+
+def status(api, xrn, creds):
+    """
+    Not implemented 
+    """
+    return 1
+
+# Thierry : caching at the slicemgr level makes sense to some extent
+#caching=True
+caching=False
+def ListSlices(api, creds, call_id):
+
+    if Callids().already_handled(call_id): return []
+
+    # look in cache first
+    if caching and api.cache:
+        slices = api.cache.get('slices')
+        if slices:
+            return slices    
+
+    # get the callers hrn
+    valid_cred = api.auth.checkCredentials(creds, 'listslices', None)[0]
+    caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+
+    # attempt to use delegated credential first
+    credential = api.getDelegatedCredential(creds)
+    if not credential:
+        credential = api.getCredential()
+    threads = ThreadManager()
+    # fetch from aggregates
+    for aggregate in api.aggregates:
+        # prevent infinite loop. Dont send request back to caller
+        # unless the caller is the aggregate's SM
+        if caller_hrn == aggregate and aggregate != api.hrn:
+            continue
+        server = api.aggregates[aggregate]
+        threads.run(server.ListSlices, credential, call_id)
+
+    # combime results
+    results = threads.get_results()
+    slices = []
+    for result in results:
+        slices.extend(result)
+    
+    # cache the result
+    if caching and api.cache:
+        api.cache.add('slices', slices)
+
+    return slices
+
+def add_slicemgr_stat(rspec, callname, aggname, elapsed, status):
+       try:
+               stats_tags = rspec.xml.xpath('//statistics[@call="%s"]' % callname)
+               if stats_tags:
+                       stats_tag = stats_tags[0]
+               else:
+                       stats_tag = etree.SubElement(rspec.xml.root, "statistics", call=callname)
+                       
+               etree.SubElement(stats_tag, "aggregate", name=str(aggname), elapsed=str(elapsed), status=str(status))
+       except Exception, e:
+               api.logger.warn("add_slicemgr_stat failed on  %s: %s" %(aggname, str(e)))
+
+
+
+
+def ListResources(api, creds, options, call_id):
+    version_manager = VersionManager()
+    def _ListResources(aggregate, server, credential, opts, call_id):
+
+        my_opts = copy(opts)
+        args = [credential, my_opts]
+        tStart = time.time()
+        try:
+            if _call_id_supported(api, server):
+                args.append(call_id)
+            version = api.get_cached_server_version(server)
+            # force ProtoGENI aggregates to give us a v2 RSpec
+            if 'sfa' not in version.keys():
+                my_opts['rspec_version'] = version_manager.get_version('ProtoGENI 2').to_dict()
+            rspec = server.ListResources(*args)
+            return {"aggregate": aggregate, "rspec": rspec, "elapsed": time.time()-tStart, "status": "success"}
+        except Exception, e:
+            api.logger.log_exc("ListResources failed at %s" %(server.url))
+            return {"aggregate": aggregate, "elapsed": time.time()-tStart, "status": "exception"}
+
+    if Callids().already_handled(call_id): return ""
+
+    # get slice's hrn from options
+    xrn = options.get('geni_slice_urn', '')
+    (hrn, type) = urn_to_hrn(xrn)
+    if 'geni_compressed' in options:
+        del(options['geni_compressed'])
+
+    # get the rspec's return format from options
+    rspec_version = version_manager.get_version(options.get('rspec_version'))
+    version_string = "rspec_%s" % (rspec_version.to_string())
+
+    # look in cache first
+    if caching and api.cache and not xrn:
+        rspec =  api.cache.get(version_string)
+        if rspec:
+            return rspec
+
+    # get the callers hrn
+    valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0]
+    caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+
+    # attempt to use delegated credential first
+    cred = api.getDelegatedCredential(creds)
+    if not cred:
+        cred = api.getCredential()
+    threads = ThreadManager()
+    for aggregate in api.aggregates:
+        # prevent infinite loop. Dont send request back to caller
+        # unless the caller is the aggregate's SM
+        if caller_hrn == aggregate and aggregate != api.hrn:
+            continue
+
+        # get the rspec from the aggregate
+        interface = api.aggregates[aggregate]
+        server = api.get_server(interface, cred)
+        threads.run(_ListResources, aggregate, server, [cred], options, call_id)
+
+
+    results = threads.get_results()
+    rspec_version = version_manager.get_version(options.get('rspec_version'))
+    if xrn:    
+        result_version = version_manager._get_version(rspec_version.type, rspec_version.version, 'manifest')
+    else: 
+        result_version = version_manager._get_version(rspec_version.type, rspec_version.version, 'ad')
+    rspec = RSpec(version=result_version)
+    for result in results:
+        add_slicemgr_stat(rspec, "ListResources", result["aggregate"], result["elapsed"], result["status"])
+        if result["status"]=="success":
+            try:
+                rspec.version.merge(result["rspec"])
+            except:
+                api.logger.log_exc("SM.ListResources: Failed to merge aggregate rspec")
+
+    # cache the result
+    if caching and api.cache and not xrn:
+        api.cache.add(version_string, rspec.toxml())
+       
+    print >>sys.stderr, "\r\n  slice_manager  \r\n"   , rspec
+    return rspec.toxml()
+
+#def ListResources(api, creds, options, call_id):
+
+    #if Callids().already_handled(call_id): return ""
+
+    ## get slice's hrn from options
+    #xrn = options.get('geni_slice_urn', '')
+    #(hrn, type) = urn_to_hrn(xrn)
+    #print >>sys.stderr, " SM_ListResources xrn " , xrn
+    ##print >>sys.stderr, " SM ListResources api.__dict__ " , api.__dict__.keys()
+    ##print >>sys.stderr, " SM ListResources dir(api)" , dir(api)
+    #print >>sys.stderr, "  \r\n avant RspecVersion \r\n \r\n"
+    ## get the rspec's return format from options
+    #rspec_version = RSpecVersion(options.get('rspec_version'))
+    #print >>sys.stderr, " \r\n \r\n ListResources RSpecVersion ", rspec_version
+    #version_string = "rspec_%s" % (rspec_version.get_version_name())
+
+    ##panos adding the info option to the caching key (can be improved)
+    #if options.get('info'):
+       #version_string = version_string + "_"+options.get('info')
+   
+    #print>>sys.stderr,"version string = ",version_string
+
+    ## look in cache first
+    #if caching and api.cache and not xrn:
+       #print>>sys.stderr," \r\n  caching %s and api.cache %s and not xrn %s"%(caching , api.cache,xrn) 
+        #rspec =  api.cache.get(version_string)
+        #if rspec:
+            #return rspec
+
+    ## get the callers hrn
+    #print >>sys.stderr, " SM ListResources get the callers hrn "
+    #valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0]
+    #caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+    #print >>sys.stderr, " \r\n SM ListResources get the callers caller_hrn hrn  %s "%(caller_hrn)
+    ## attempt to use delegated credential first
+    #credential = api.getDelegatedCredential(creds)
+    #print >>sys.stderr, " \r\n SM ListResources get the callers credential  %s "%(credential) 
+    #if not credential:
+        #credential = api.getCredential()
+    #threads = ThreadManager()
+    #print >>sys.stderr, " \r\n SM ListResources get the callers api.aggregates  %s "%(api.aggregates) 
+    #for aggregate in api.aggregates:
+        ## prevent infinite loop. Dont send request back to caller
+        ## unless the caller is the aggregate's SM
+        #if caller_hrn == aggregate and aggregate != api.hrn:
+            #continue
+        ## get the rspec from the aggregate
+        #server = api.aggregates[aggregate]
+       #print >>sys.stderr, " Slice Mgr ListResources, server" ,server
+        #my_opts = copy(options)
+        #my_opts['geni_compressed'] = False
+        #threads.run(server.ListResources, credential, my_opts, call_id)
+        #print >>sys.stderr, "\r\n  !!!!!!!!!!!!!!!! \r\n"       
+    #results = threads.get_results()
+    ##results.append(open('/root/protogeni.rspec', 'r').read())
+    #rspec_version = RSpecVersion(my_opts.get('rspec_version'))
+    #if rspec_version['type'].lower() == 'protogeni':
+        #rspec = PGRSpec()
+    #else:
+        #rspec = SfaRSpec()
+
+    #for result in results:
+        #print >>sys.stderr, "\r\n  slice_manager  result"   , result
+        #try:
+            #print >>sys.stderr, "avant merge"  , rspec         
+            #rspec.merge(result)        
+            #print >>sys.stderr, "AFTERMERGE" , rspec
+        #except:
+            #raise
+            #api.logger.info("SM.ListResources: Failed to merge aggregate rspec")
+
+    ## cache the result
+    #if caching and api.cache and not xrn:
+        #api.cache.add(version_string, rspec.toxml())
+
+    #print >>sys.stderr, "\r\n  slice_manager  \r\n"   , rspec
+    #return rspec.toxml()
+
+# first draft at a merging SliverStatus
+def SliverStatus(api, slice_xrn, creds, call_id):
+    if Callids().already_handled(call_id): return {}
+    # attempt to use delegated credential first
+    credential = api.getDelegatedCredential(creds)
+    if not credential:
+        credential = api.getCredential()
+    threads = ThreadManager()
+    for aggregate in api.aggregates:
+        server = api.aggregates[aggregate]
+        threads.run (server.SliverStatus, slice_xrn, credential, call_id)
+    results = threads.get_results()
+
+    # get rid of any void result - e.g. when call_id was hit where by convention we return {}
+    results = [ result for result in results if result and result['geni_resources']]
+
+    # do not try to combine if there's no result
+    if not results : return {}
+
+    # otherwise let's merge stuff
+    overall = {}
+
+    # mmh, it is expected that all results carry the same urn
+    overall['geni_urn'] = results[0]['geni_urn']
+
+    # consolidate geni_status - simple model using max on a total order
+    states = [ 'ready', 'configuring', 'failed', 'unknown' ]
+    # hash name to index
+    shash = dict ( zip ( states, range(len(states)) ) )
+    def combine_status (x,y):
+        return shash [ max (shash(x),shash(y)) ]
+    overall['geni_status'] = reduce (combine_status, [ result['geni_status'] for result in results], 'ready' )
+
+    # {'ready':0,'configuring':1,'failed':2,'unknown':3}
+    # append all geni_resources
+    overall['geni_resources'] = \
+        reduce (lambda x,y: x+y, [ result['geni_resources'] for result in results] , [])
+
+    return overall
+
+def main():
+    r = RSpec()
+    r.parseFile(sys.argv[1])
+    rspec = r.toDict()
+    CreateSliver(None,'plc.princeton.tmacktestslice',rspec,'create-slice-tmacktestslice')
+
+if __name__ == "__main__":
+    main()
+    
index 0e944ac..b28d1a5 100644 (file)
@@ -2,6 +2,7 @@ from sfa.util.faults import SfaInvalidArgument, InvalidRSpec
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.sfatablesRuntime import run_sfatables
+import sys
 from sfa.trust.credential import Credential
 from sfa.storage.parameter import Parameter, Mixed
 from sfa.rspecs.rspec import RSpec
@@ -33,7 +34,7 @@ class CreateSliver(Method):
         hrn, type = urn_to_hrn(slice_xrn)
 
         self.api.logger.info("interface: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, hrn, self.name))
-
+        print >>sys.stderr, " \r\n \r\n Createsliver.py call %s\ttarget-hrn: %s\tmethod-name: %s "%(self.api.interface, hrn, self.name)
         # Find the valid credentials
         valid_creds = self.api.auth.checkCredentials(creds, 'createsliver', hrn)
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
index 04359a0..41676e6 100644 (file)
@@ -1,5 +1,5 @@
 import zlib
-
+import sys
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.sfatablesRuntime import run_sfatables
@@ -42,8 +42,10 @@ class ListResources(Method):
 
         # get hrn of the original caller 
         origin_hrn = options.get('origin_hrn', None)
+
         if not origin_hrn:
             origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
+        print >>sys.stderr, " \r\n \r\n \t Lsitresources.py call :self.api.interface %s  origin_hrn %s options %s \r\n \t creds %s " %(self.api.interface,origin_hrn,options, creds)          
         rspec = self.api.manager.ListResources(self.api, creds, options)
 
         # filter rspec through sfatables 
@@ -51,7 +53,8 @@ class ListResources(Method):
             chain_name = 'OUTGOING'
         elif self.api.interface in ['slicemgr']: 
             chain_name = 'FORWARD-OUTGOING'
-        self.api.logger.debug("ListResources: sfatables on chain %s"%chain_name)
+        self.api.logger.debug("ListResources: sfatables on chain %s"%chain_name)  
+        print >>sys.stderr, " \r\n \r\n \t Listresources.py call : chain_name %s hrn %s origine_hrn %s " %(chain_name, hrn, origin_hrn)
         filtered_rspec = run_sfatables(chain_name, hrn, origin_hrn, rspec) 
  
         if options.has_key('geni_compressed') and options['geni_compressed'] == True:
index 6f2c57e..4a88e22 100644 (file)
@@ -1,5 +1,6 @@
 from types import StringTypes
 from collections import defaultdict
+import sys
 
 from sfa.util.sfalogging import logger
 from sfa.util.xrn import Xrn, get_leaf, get_authority, urn_to_hrn
@@ -131,13 +132,13 @@ class PlSlices:
         # slice belongs to out local plc or a myplc peer. We will assume it 
         # is a local site, unless we find out otherwise  
         peer = None
-
+        print>>sys.stderr, " \r\n \r\n \tplslices.py get_peer slice_authority  "
         # get this slice's authority (site)
         slice_authority = get_authority(hrn)
 
         # get this site's authority (sfa root authority or sub authority)
         site_authority = get_authority(slice_authority).lower()
-
+        print>>sys.stderr, " \r\n \r\n \tplslices.py get_peer slice_authority  %s site_authority %s" %(slice_authority,site_authority) 
         # check if we are already peered with this site_authority, if so
         peers = self.driver.shell.GetPeers({}, ['peer_id', 'peername', 'shortname', 'hrn_root'])
         for peer_record in peers:
diff --git a/sfa/rspecs/pl_rspec_version.py b/sfa/rspecs/pl_rspec_version.py
new file mode 100644 (file)
index 0000000..eb4f9a6
--- /dev/null
@@ -0,0 +1,16 @@
+from sfa.rspecs.sfa_rspec import sfa_rspec_version
+from sfa.rspecs.pg_rspec import pg_rspec_ad_version, pg_rspec_request_version 
+
+ad_rspec_versions = [
+    pg_rspec_ad_version,
+    sfa_rspec_version
+    ]
+
+request_rspec_versions = ad_rspec_versions
+
+default_rspec_version = { 'type': 'SFA', 'version': '1' }
+
+supported_rspecs = {'ad_rspec_versions': ad_rspec_versions,
+                    'request_rspec_versions': request_rspec_versions,
+                    'default_ad_rspec': default_rspec_version}
+
diff --git a/sfa/senslab/LDAPapi.py b/sfa/senslab/LDAPapi.py
new file mode 100644 (file)
index 0000000..d84f6f8
--- /dev/null
@@ -0,0 +1,97 @@
+
+
+
+import ldap
+from sfa.util.config import *
+from sfa.trust.gid import *
+from sfa.trust.hierarchy import *
+from sfa.trust.auth import *
+from sfa.trust.certificate import *
+
+class LDAPapi :
+       def __init__(self, record_filter = None):
+               self.ldapserv=ldap.open("192.168.0.251")
+               self.senslabauth=Hierarchy()
+               config=Config()
+               self.authname=config.SFA_REGISTRY_ROOT_AUTH
+               authinfo=self.senslabauth.get_auth_info(self.authname)
+       
+               self.auth=Auth()
+               gid=authinfo.get_gid_object()
+                self.ldapdictlist = ['type',
+                                'pkey',
+                                'uid',
+                               'serial',
+                               'authority',
+                               'peer_authority',
+                               'pointer' ,
+                               'hrn']
+       
+       def ldapFind(self, record_filter = None, columns=None):
+
+               results = []
+       
+               if 'authority' in record_filter:
+               # ask for authority
+                       if record_filter['authority']==self.authname:
+                               # which is SFA_REGISTRY_ROOT_AUTH
+                               # request all records which are under our authority, ie all ldap entries
+                               ldapfilter="cn=*"
+                       else:
+                               #which is NOT SFA_REGISTRY_ROOT_AUTH
+                               return []
+               else :
+                       if not 'hrn' in record_filter:
+                               print >>sys.stderr,"find : don't know how to handle filter ",record_filter
+                               return []
+                       else:
+                               hrns=[]
+                               h=record_filter['hrn']
+                               if  isinstance(h,list):
+                                       hrns=h
+                               else : 
+                                       hrns.append(h)
+       
+                               ldapfilter="(|"
+                               for hrn in hrns:
+                                       splited_hrn=hrn.split(".")
+                                       if splited_hrn[0] != self.authname :
+                                               print >>sys.stderr,"i know nothing about",hrn, " my authname is ", self.authname, " not ", splited_hrn[0]
+                                       else :
+                                               login=splited_hrn[1]
+                                               ldapfilter+="(uid="
+                                               ldapfilter+=login
+                                               ldapfilter+=")"
+                               ldapfilter+=")"
+       
+       
+               rindex=self.ldapserv.search("ou=people,dc=senslab,dc=info",ldap.SCOPE_SUBTREE,ldapfilter, ['mail','givenName', 'sn', 'uid','sshPublicKey'])
+               ldapresponse=self.ldapserv.result(rindex,1)
+               for ldapentry in ldapresponse[1]:
+                       hrn=self.authname+"."+ldapentry[1]['uid'][0]
+#                      uuid=create_uuid() 
+               
+#                      RSA_KEY_STRING=ldapentry[1]['sshPublicKey'][0]
+               
+#                      pkey=convert_public_key(RSA_KEY_STRING)
+               
+#                      gid=self.senslabauth.create_gid("urn:publicid:IDN+"+self.authname+"+user+"+ldapentry[1]['uid'][0], uuid, pkey, CA=False)
+               
+                       parent_hrn = get_authority(hrn)
+                       parent_auth_info = self.senslabauth.get_auth_info(parent_hrn)
+
+                       results.append(  {      
+                               'type': 'user',
+                                'pkey': ldapentry[1]['sshPublicKey'][0],
+                                'uid': ldapentry[1]['uid'][0],
+#                              'email': ldapentry[1]['mail'][0],
+#                              'first_name': ldapentry[1]['givenName'][0],
+#                              'last_name': ldapentry[1]['sn'][0],
+#                              'phone': 'none',
+                               'serial': 'none',
+                               'authority': self.authname,
+                               'peer_authority': '',
+                               'pointer' : -1,
+                               'hrn': hrn,
+                               } )
+               return results
diff --git a/sfa/senslab/OARrestapi.py b/sfa/senslab/OARrestapi.py
new file mode 100644 (file)
index 0000000..b4eb45c
--- /dev/null
@@ -0,0 +1,336 @@
+# import modules used here -- sys is a very standard one
+import sys
+import httplib
+import json
+from sfa.senslab.parsing import *
+from sfa.senslab.SenslabImportUsers import *
+import urllib
+import urllib2
+
+
+#OARIP='10.127.255.254'
+OARIP='192.168.0.109'
+
+
+OARrequests_list = ["GET_version", "GET_timezone", "GET_jobs", "GET_jobs_table", "GET_jobs_details",
+"GET_resources_full", "GET_resources"]
+
+OARrequests_uri_list = ['/oarapi/version.json','/oarapi/timezone.json', '/oarapi/jobs.json',
+'/oarapi/jobs/details.json', '/oarapi/resources/full.json', '/oarapi/resources.json'] 
+
+OARrequests_get_uri_dict = { 'GET_version': '/oarapi/version.json',
+                       'GET_timezone':'/oarapi/timezone.json' ,
+                       'GET_jobs': '/oarapi/jobs.json',
+                        'GET_jobs<id>': '/oarapi/jobs/id.json',
+                        'GET_jobs<id>/resources': '/oarapi/jobs/id/resources.json',
+                        'GET_resources/<id>': '/oarapi/resources/.json',
+                       'GET_jobs_table': '/oarapi/jobs/table.json',
+                       'GET_jobs_details': '/oarapi/jobs/details.json',
+                       'GET_resources_full': '/oarapi/resources/full.json',
+                       'GET_resources':'/oarapi/resources.json',
+}
+
+OARrequest_post_uri_dict = { 'POST_job': '/oarapi/jobs.json'}
+
+POSTformat = {  #'yaml': {'content':"text/yaml", 'object':yaml}
+'json' : {'content':"application/json",'object':json}, 
+#'http': {'content':"applicaton/x-www-form-urlencoded",'object': html},
+}
+
+OARpostdatareqfields = {'resource' :"/nodes=", 'command':"sleep", 'workdir':"/home/", 'walltime':""}
+
+class OARrestapi:
+    def __init__(self):
+        self.oarserver= {}
+        self.oarserver['ip'] = OARIP
+        self.oarserver['port'] = 80
+        self.oarserver['uri'] = None
+        self.oarserver['postformat'] = 'json'  
+            
+    def GETRequestToOARRestAPI(self, request, strval=None  ): 
+        self.oarserver['uri'] = OARrequests_get_uri_dict[request]
+        if  strval:
+          self.oarserver['uri'] = self.oarserver['uri'].replace("id",strval)
+          print>>sys.stderr, "\r\n \r\n   GETRequestToOARRestAPI replace :  self.oarserver['uri'] %s",  self.oarserver['uri']
+        
+        try :
+            conn = httplib.HTTPConnection(self.oarserver['ip'],self.oarserver['port'])
+            conn.request("GET",self.oarserver['uri'] )
+            resp = ( conn.getresponse()).read()
+            conn.close()
+        except:
+            raise ServerError("GET_OAR_SRVR : Could not reach OARserver")
+        try:
+            js = json.loads(resp)
+            return js
+        
+        except ValueError:
+            raise ServerError("Failed to parse Server Response:" + js)
+
+               
+               
+    def POSTRequestToOARRestAPI(self, request, datadict, username):
+        #first check that all params for are OK 
+        print>>sys.stderr, " \r\n \r\n POSTRequestToOARRestAPI username",username
+        try:
+            self.oarserver['uri'] = OARrequest_post_uri_dict[request] 
+        except:
+            print>>sys.stderr, " \r\n \r\n POSTRequestToOARRestAPI request not in OARrequest_post_uri_dict"
+            return
+        #if format in POSTformat:
+            #if format is 'json':
+        data = json.dumps(datadict)
+        headers = {'X-REMOTE_IDENT':username,\
+                'content-type':POSTformat['json']['content'],\
+                'content-length':str(len(data))}     
+        try :
+            #self.oarserver['postformat'] = POSTformat[format]
+            
+            print>>sys.stderr, "\r\n POSTRequestToOARRestAPI   headers %s uri %s" %(headers,self.oarserver['uri'])
+            conn = httplib.HTTPConnection(self.oarserver['ip'],self.oarserver['port'])
+            conn.request("POST",self.oarserver['uri'],data,headers )
+            resp = ( conn.getresponse()).read()
+            conn.close()
+            
+            #conn = httplib.HTTPConnection(self.oarserver['ip'],self.oarserver['port'])
+            #conn.putrequest("POST",self.oarserver['uri'] )
+            #self.oarserver['postformat'] = POSTformat[format]
+            #conn.putheader('HTTP X-REMOTE_IDENT', 'avakian')
+            #conn.putheader('content-type', self.oarserver['postformat']['content'])
+            #conn.putheader('content-length', str(len(data))) 
+            #conn.endheaders()
+            #conn.send(data)
+            #resp = ( conn.getresponse()).read()
+            #conn.close()
+
+        except:
+            print>>sys.stderr, "\r\n POSTRequestToOARRestAPI  ERROR: data %s \r\n \t\n \t\t headers %s uri %s" %(data,headers,self.oarserver['uri'])
+            #raise ServerError("POST_OAR_SRVR : error")
+                
+        try:
+            answer = json.loads(resp)
+            print>>sys.stderr, "\r\n POSTRequestToOARRestAPI : ", answer
+            return answer
+
+        except ValueError:
+            raise ServerError("Failed to parse Server Response:" + answer)
+
+
+    #def createjobrequest(self, nodelist):
+        #datadict = dict(zip(self.OARpostdatareqfields.keys(), self.OARpostdatareqfields.values())
+        #for k in datadict:
+                #if k is 'resource':
+                    #for node in nodelist:
+                    #datadict[k] += str(nodelist)
+
+                       
+class OARGETParser:
+
+    #Insert a new node into the dictnode dictionary
+    def AddNodeId(self,dictnode,value):
+        #Inserts new key. The value associated is a tuple list.
+        node_id = int(value)
+        dictnode[node_id] = [('node_id',node_id) ]     
+        return node_id
+    
+    def AddNodeNetworkAddr(self,tuplelist,value):
+        tuplelist.append(('hostname',str(value)))
+                    
+            
+    def AddNodeSite(self,tuplelist,value):
+        tuplelist.append(('site_login_base',str(value)))       
+            
+    def AddNodeRadio(self,tuplelist,value):
+        tuplelist.append(('radio',str(value))) 
+    
+    
+    def AddMobility(self,tuplelist,value):
+        if value :
+            tuplelist.append(('mobile',int(value)))    
+        return 0
+    
+    
+    def AddPosX(self,tuplelist,value):
+        tuplelist.append(('posx',value))       
+    
+    
+    def AddPosY(self,tuplelist,value):
+        tuplelist.append(('posy',value))       
+    
+    def AddBootState(self,tuplelist,value):
+        tuplelist.append(('boot_state',str(value)))    
+    
+    def ParseVersion(self) : 
+        #print self.raw_json
+        #print >>sys.stderr, self.raw_json
+        if 'oar_version' in self.raw_json :
+            self.version_json_dict.update(api_version=self.raw_json['api_version'] ,
+                            apilib_version=self.raw_json['apilib_version'],
+                            api_timezone=self.raw_json['api_timezone'],
+                            api_timestamp=self.raw_json['api_timestamp'],
+                            oar_version=self.raw_json['oar_version'] )
+        else :
+            self.version_json_dict.update(api_version=self.raw_json['api'] ,
+                            apilib_version=self.raw_json['apilib'],
+                            api_timezone=self.raw_json['api_timezone'],
+                            api_timestamp=self.raw_json['api_timestamp'],
+                            oar_version=self.raw_json['oar'] )
+                                
+        print self.version_json_dict['apilib_version']
+        
+            
+    def ParseTimezone(self) : 
+        print " ParseTimezone" 
+            
+    def ParseJobs(self) :
+        self.jobs_list = []
+        print " ParseJobs "
+            
+    def ParseJobsTable(self) : 
+        print "ParseJobsTable"
+                
+    def ParseJobsDetails (self): 
+        print "ParseJobsDetails"
+            
+    def ParseResources(self) :
+        print>>sys.stderr, " \r\n  \t\t\t ParseResources__________________________ " 
+        #resources are listed inside the 'items' list from the json
+        self.raw_json = self.raw_json['items']
+        self.ParseNodes()
+       
+        
+            
+            
+    def ParseResourcesFull(self ) :
+        print>>sys.stderr, " \r\n \t\t\t  ParseResourcesFull_____________________________ "
+        #print self.raw_json[1]
+        #resources are listed inside the 'items' list from the json
+        if self.version_json_dict['apilib_version'] != "0.2.10" :
+                self.raw_json = self.raw_json['items']
+        self.ParseNodes()
+        self.ParseSites()
+
+            
+            
+    #Parse nodes properties from OAR
+    #Put them into a dictionary with key = node id and value is a dictionary 
+    #of the node properties and properties'values.
+    def ParseNodes(self):  
+        node_id = None
+        #print >>sys.stderr, " \r\n \r\n \t\t OARrestapi.py ParseNodes self.raw_json %s" %(self.raw_json)
+        for dictline in self.raw_json:
+            #print >>sys.stderr, " \r\n \r\n \t\t OARrestapi.py ParseNodes dictline %s hey" %(dictline)
+            for k in dictline.keys():
+                if k in self.resources_fulljson_dict:
+                    # dictionary is empty and/or a new node has to be inserted 
+                    if node_id is None :
+                        node_id = self.resources_fulljson_dict[k](self,self.node_dictlist, dictline[k])        
+                    else:
+                        ret = self.resources_fulljson_dict[k](self,self.node_dictlist[node_id], dictline[k])
+                        #If last property has been inserted in the property tuple list, reset node_id 
+                        if ret == 0:
+                            #Turn the property tuple list (=dict value) into a dictionary
+                            self.node_dictlist[node_id] = dict(self.node_dictlist[node_id])
+                            node_id = None
+                    
+                else:
+                    pass
+
+    #Retourne liste de dictionnaires contenant attributs des sites     
+    def ParseSites(self):
+        nodes_per_site = {}
+        
+        # Create a list of nodes per  site_id
+        for node_id in self.node_dictlist.keys():
+            node  = self.node_dictlist[node_id]
+            if node['site_login_base'] not in nodes_per_site.keys():
+                nodes_per_site[node['site_login_base']] = []
+                nodes_per_site[node['site_login_base']].append(node['node_id'])
+            else:
+                if node['node_id'] not in nodes_per_site[node['site_login_base']]:
+                    nodes_per_site[node['site_login_base']].append(node['node_id'])
+        #Create a site dictionary with key is site_login_base (name of the site)
+        # and value is a dictionary of properties, including the list of the node_ids
+        for node_id in self.node_dictlist.keys():
+            node  = self.node_dictlist[node_id]
+            if node['site_login_base'] not in self.site_dict.keys():
+                self.site_dict[node['site_login_base']] = [('login_base', node['site_login_base']),\
+                                                        ('node_ids',nodes_per_site[node['site_login_base']]),\
+                                                        ('latitude',"48.83726"),\
+                                                        ('longitude',"- 2.10336"),('name',"senslab"),\
+                                                        ('pcu_ids', []), ('max_slices', None), ('ext_consortium_id', None),\
+                                                        ('max_slivers', None), ('is_public', True), ('peer_site_id', None),\
+                                                        ('abbreviated_name', "senslab"), ('address_ids', []),\
+                                                        ('url', "http,//www.senslab.info"), ('person_ids', []),\
+                                                        ('site_tag_ids', []), ('enabled', True),  ('slice_ids', []),\
+                                                        ('date_created', None), ('peer_id', None),]
+                self.site_dict[node['site_login_base']] = dict(self.site_dict[node['site_login_base']])
+                        
+        #print>>sys.stderr, "\r\n \r\n =============\t\t ParseSites site dict %s \r\n"%(self.site_dict)
+            
+            
+    def GetNodesFromOARParse(self):
+        #print>>sys.stderr, " \r\n =========GetNodesFromOARParse: node_dictlist %s "%(self.node_dictlist)
+        return self.node_dictlist
+
+    def GetSitesFromOARParse(self):
+        return self.site_dict
+    
+    def GetJobsFromOARParse(self):
+        return self.jobs_list  
+
+    OARrequests_uri_dict = { 
+        'GET_version': {'uri':'/oarapi/version.json', 'parse_func': ParseVersion},
+        'GET_timezone':{'uri':'/oarapi/timezone.json' ,'parse_func': ParseTimezone },
+        'GET_jobs': {'uri':'/oarapi/jobs.json','parse_func': ParseJobs},
+        'GET_jobs_table': {'uri':'/oarapi/jobs/table.json','parse_func': ParseJobsTable},
+        'GET_jobs_details': {'uri':'/oarapi/jobs/details.json','parse_func': ParseJobsDetails},
+        'GET_resources_full': {'uri':'/oarapi/resources/full.json','parse_func': ParseResourcesFull},
+        'GET_resources':{'uri':'/oarapi/resources.json' ,'parse_func': ParseResources},
+        }
+    resources_fulljson_dict= {
+        'resource_id' : AddNodeId,
+        'network_address' : AddNodeNetworkAddr,
+        'site': AddNodeSite, 
+        'radio': AddNodeRadio,
+        'mobile': AddMobility,
+        'posx': AddPosX,
+        'posy': AddPosY,
+        'state':AddBootState,
+        }
+
+    
+    def __init__(self, srv ):
+        self.version_json_dict= { 'api_version' : None , 'apilib_version' :None,  'api_timezone': None, 'api_timestamp': None, 'oar_version': None ,}
+        self.timezone_json_dict = { 'timezone': None, 'api_timestamp': None, }
+        self.jobs_json_dict = { 'total' : None, 'links' : [] , 'offset':None , 'items' : [] , }
+        self.jobs_table_json_dict = self.jobs_json_dict
+        self.jobs_details_json_dict = self.jobs_json_dict              
+        self.server = srv
+        self.node_dictlist = {}
+        self.site_dict = {}
+        self.SendRequest("GET_version")
+
+    def SendRequest(self,request):
+        if request in OARrequests_get_uri_dict:
+            self.raw_json = self.server.GETRequestToOARRestAPI(request)
+            self.OARrequests_uri_dict[request]['parse_func'](self)
+        else:
+            print>>sys.stderr, "\r\n OARGetParse __init__ : ERROR_REQUEST "    ,request
+            
+class OARapi:
+
+    def __init__(self):
+            self.server = OARrestapi()
+            self.parser = OARGETParser(self.server)
+
+       #GetNodes moved to slabdriver.py
+            
+
+    
+                    
+    def GetJobs(self):
+        print>>sys.stderr, " \r\n GetJobs" 
+        self.parser.SendRequest("GET_jobs")    
+        return self.parser.GetJobsFromOARParse()
+    
diff --git a/sfa/senslab/SenslabImport.py b/sfa/senslab/SenslabImport.py
new file mode 100644 (file)
index 0000000..716e484
--- /dev/null
@@ -0,0 +1,274 @@
+
+#
+# The import tool assumes that the existing PLC hierarchy should all be part
+# of "planetlab.us" (see the root_auth and level1_auth variables below).
+#
+# Public keys are extracted from the users' SSH keys automatically and used to
+# create GIDs. This is relatively experimental as a custom tool had to be
+# written to perform conversion from SSH to OpenSSL format. It only supports
+# RSA keys at this time, not DSA keys.
+##
+
+import getopt
+import sys
+import tempfile
+from sfa.util.sfalogging import _SfaLogger
+#from sfa.util.sfalogging import sfa_logger_goes_to_import,sfa_logger
+
+from sfa.util.record import *
+from sfa.util.table import SfaTable
+from sfa.util.xrn import get_authority, hrn_to_urn
+from sfa.util.plxrn import email_to_hrn
+from sfa.util.config import Config
+from sfa.trust.certificate import convert_public_key, Keypair
+from sfa.trust.trustedroots import *
+from sfa.trust.hierarchy import *
+from sfa.trust.gid import create_uuid
+
+
+
+def _un_unicode(str):
+   if isinstance(str, unicode):
+       return str.encode("ascii", "ignore")
+   else:
+       return str
+
+def _cleanup_string(str):
+    # pgsql has a fit with strings that have high ascii in them, so filter it
+    # out when generating the hrns.
+    tmp = ""
+    for c in str:
+        if ord(c) < 128:
+            tmp = tmp + c
+    str = tmp
+
+    str = _un_unicode(str)
+    str = str.replace(" ", "_")
+    str = str.replace(".", "_")
+    str = str.replace("(", "_")
+    str = str.replace("'", "_")
+    str = str.replace(")", "_")
+    str = str.replace('"', "_")
+    return str
+
+class SenslabImport:
+
+    def __init__(self):
+       self.logger = _SfaLogger(logfile='/var/log/sfa_import.log', loggername='importlog')
+    
+       #sfa_logger_goes_to_import()
+       #self.logger = sfa_logger()
+       self.AuthHierarchy = Hierarchy()
+       self.config = Config()
+       self.TrustedRoots = TrustedRoots(Config.get_trustedroots_dir(self.config))
+       print>>sys.stderr, "\r\n ========= \t\t SenslabImport TrustedRoots\r\n" ,  self.TrustedRoots
+       self.plc_auth = self.config.get_plc_auth()
+       print>>sys.stderr, "\r\n ========= \t\t SenslabImport  self.plc_auth %s \r\n" %(self.plc_auth ) 
+       self.root_auth = self.config.SFA_REGISTRY_ROOT_AUTH
+
+    def create_sm_client_record(self):
+        """
+        Create a user record for the Slicemanager service.
+        """
+        hrn = self.config.SFA_INTERFACE_HRN + '.slicemanager'
+        urn = hrn_to_urn(hrn, 'user')
+        if not self.AuthHierarchy.auth_exists(urn):
+            self.logger.info("Import: creating Slice Manager user")
+            self.AuthHierarchy.create_auth(urn)
+
+        auth_info = self.AuthHierarchy.get_auth_info(hrn)
+        table = SfaTable()
+        sm_user_record = table.find({'type': 'user', 'hrn': hrn})
+        if not sm_user_record:
+            record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="user", pointer=-1)
+            record['authority'] = get_authority(record['hrn'])
+            table.insert(record)    
+
+    def create_top_level_auth_records(self, hrn):
+        """
+        Create top level records (includes root and sub authorities (local/remote)
+        """
+       print>>sys.stderr, "\r\n =========SenslabImport create_top_level_auth_records\r\n"
+        urn = hrn_to_urn(hrn, 'authority')
+        # make sure parent exists
+        parent_hrn = get_authority(hrn)
+        if not parent_hrn:
+            parent_hrn = hrn
+        if not parent_hrn == hrn:
+            self.create_top_level_auth_records(parent_hrn)
+
+        # create the authority if it doesnt already exist 
+        if not self.AuthHierarchy.auth_exists(urn):
+            self.logger.info("Import: creating top level authorities")
+            self.AuthHierarchy.create_auth(urn)
+        
+        # create the db record if it doesnt already exist    
+        auth_info = self.AuthHierarchy.get_auth_info(hrn)
+        table = SfaTable()
+        auth_record = table.find({'type': 'authority', 'hrn': hrn})
+
+        if not auth_record:
+            auth_record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="authority", pointer=-1)
+            auth_record['authority'] = get_authority(auth_record['hrn'])
+            self.logger.info("Import: inserting authority record for %s"%hrn)
+            table.insert(auth_record)
+           print>>sys.stderr, "\r\n ========= \t\t SenslabImport NO AUTH RECORD \r\n" ,auth_record['authority']
+           
+           
+    def create_interface_records(self):
+        """
+        Create a record for each SFA interface
+        """
+        # just create certs for all sfa interfaces even if they
+        # arent enabled
+        interface_hrn = self.config.SFA_INTERFACE_HRN
+        interfaces = ['authority+sa', 'authority+am', 'authority+sm']
+        table = SfaTable()
+        auth_info = self.AuthHierarchy.get_auth_info(interface_hrn)
+        pkey = auth_info.get_pkey_object()
+        for interface in interfaces:
+            interface_record = table.find({'type': interface, 'hrn': interface_hrn})
+            if not interface_record:
+                self.logger.info("Import: interface %s %s " % (interface_hrn, interface))
+                urn = hrn_to_urn(interface_hrn, interface)
+                gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+                record = SfaRecord(hrn=interface_hrn, gid=gid, type=interface, pointer=-1)  
+                record['authority'] = get_authority(interface_hrn)
+               print>>sys.stderr,"\r\n ==========create_interface_records", record['authority']
+                table.insert(record) 
+
+    def import_person(self, parent_hrn, person, keys):
+        """
+        Register a user record 
+        """
+        hrn = email_to_hrn(parent_hrn, person['email'])
+
+       print >>sys.stderr , "\r\n_____00______SenslabImport : person", person  
+        # ASN.1 will have problems with hrn's longer than 64 characters
+        if len(hrn) > 64:
+            hrn = hrn[:64]
+       print >>sys.stderr , "\r\n_____0______SenslabImport : parent_hrn", parent_hrn
+        self.logger.info("Import: person %s"%hrn)
+        key_ids = []
+       # choper les cles ssh des users , sont ils dans OAR
+        if 'key_ids' in person and person['key_ids']:
+            key_ids = person["key_ids"]
+            # get the user's private key from the SSH keys they have uploaded
+            # to planetlab
+           print >>sys.stderr , "\r\n_____1______SenslabImport : self.plc_auth %s \r\n \t keys %s key[0] %s" %(self.plc_auth,keys, keys[0])
+            key = keys[0]['key']
+            pkey = convert_public_key(key)
+           print >>sys.stderr , "\r\n_____2______SenslabImport : key %s pkey %s"% (key,pkey.as_pem())      
+            if not pkey:
+                pkey = Keypair(create=True)
+        else:
+            # the user has no keys
+            self.logger.warning("Import: person %s does not have a PL public key"%hrn)
+            # if a key is unavailable, then we still need to put something in the
+            # user's GID. So make one up.
+            pkey = Keypair(create=True)
+           print >>sys.stderr , "\r\n___ELSE________SenslabImport pkey : %s"%(pkey.key)
+        # create the gid
+        urn = hrn_to_urn(hrn, 'user')
+       print >>sys.stderr , "\r\n \t\t : urn ", urn
+        person_gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+        table = SfaTable()
+        person_record = SfaRecord(hrn=hrn, gid=person_gid, type="user", pointer=person['person_id'])
+        person_record['authority'] = get_authority(person_record['hrn'])
+        existing_records = table.find({'hrn': hrn, 'type': 'user', 'pointer': person['person_id']})
+        if not existing_records:
+            table.insert(person_record)
+        else:
+            self.logger.info("Import: %s exists, updating " % hrn)
+            existing_record = existing_records[0]
+            person_record['record_id'] = existing_record['record_id']
+            table.update(person_record)
+
+    def import_slice(self, parent_hrn, slice):
+        #slicename = slice['name'].split("_",1)[-1]
+       
+        slicename = _cleanup_string(slice['name'])
+
+        if not slicename:
+            self.logger.error("Import: failed to parse slice name %s" %slice['name'])
+            return
+
+        hrn = parent_hrn + "." + slicename
+        self.logger.info("Import: slice %s"%hrn)
+
+        pkey = Keypair(create=True)
+        urn = hrn_to_urn(hrn, 'slice')
+        slice_gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+        slice_record = SfaRecord(hrn=hrn, gid=slice_gid, type="slice", pointer=slice['slice_id'])
+        slice_record['authority'] = get_authority(slice_record['hrn'])
+        table = SfaTable()
+        existing_records = table.find({'hrn': hrn, 'type': 'slice', 'pointer': slice['slice_id']})
+        if not existing_records:
+            table.insert(slice_record)
+        else:
+            self.logger.info("Import: %s exists, updating " % hrn)
+            existing_record = existing_records[0]
+            slice_record['record_id'] = existing_record['record_id']
+            table.update(slice_record)
+
+    def import_node(self, hrn, node):
+        self.logger.info("Import: node %s" % hrn)
+        # ASN.1 will have problems with hrn's longer than 64 characters
+        if len(hrn) > 64:
+            hrn = hrn[:64]
+
+        table = SfaTable()
+        node_record = table.find({'type': 'node', 'hrn': hrn})
+        pkey = Keypair(create=True)
+        urn = hrn_to_urn(hrn, 'node')
+        node_gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+        node_record = SfaRecord(hrn=hrn, gid=node_gid, type="node", pointer=node['node_id'])
+        node_record['authority'] = get_authority(node_record['hrn'])
+        existing_records = table.find({'hrn': hrn, 'type': 'node', 'pointer': node['node_id']})
+        if not existing_records:
+            table.insert(node_record)
+        else:
+            self.logger.info("Import: %s exists, updating " % hrn)
+            existing_record = existing_records[0]
+            node_record['record_id'] = existing_record['record_id']
+            table.update(node_record)
+
+    
+    def import_site(self, parent_hrn, site):
+        plc_auth = self.plc_auth
+        sitename = site['login_base']
+        sitename = _cleanup_string(sitename)
+        hrn = parent_hrn + "." + sitename 
+
+        urn = hrn_to_urn(hrn, 'authority')
+        self.logger.info("Import: site %s"%hrn)
+
+        # create the authority
+        if not self.AuthHierarchy.auth_exists(urn):
+            self.AuthHierarchy.create_auth(urn)
+
+        auth_info = self.AuthHierarchy.get_auth_info(urn)
+
+        table = SfaTable()
+        auth_record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="authority", pointer=site['site_id'])
+        auth_record['authority'] = get_authority(auth_record['hrn'])
+        existing_records = table.find({'hrn': hrn, 'type': 'authority', 'pointer': site['site_id']})
+        if not existing_records:
+            table.insert(auth_record)
+        else:
+            self.logger.info("Import: %s exists, updating " % hrn)
+            existing_record = existing_records[0]
+            auth_record['record_id'] = existing_record['record_id']
+            table.update(auth_record)
+
+        return hrn
+
+
+    def delete_record(self, hrn, type):
+        # delete the record
+        table = SfaTable()
+        record_list = table.find({'type': type, 'hrn': hrn})
+        for record in record_list:
+            self.logger.info("Import: removing record %s %s" % (type, hrn))
+            table.remove(record)        
diff --git a/sfa/senslab/SenslabImportUsers.py b/sfa/senslab/SenslabImportUsers.py
new file mode 100644 (file)
index 0000000..8109f34
--- /dev/null
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+
+# import modules used here -- sys is a very standard one
+import sys
+import httplib
+import json
+import datetime
+import time
+from sfa.senslab.parsing import *
+
+
+                               
+                               
+class SenslabImportUsers:
+
+
+       def __init__(self):
+               self.person_list = []
+               self.keys_list = []
+               self.slices_list= []
+               #self.resources_fulldict['keys'] = []
+               #self.InitPersons()
+               #self.InitKeys()
+               #self.InitSlices()
+               
+               
+       #def InitSlices(self):
+               #slices_per_site = {}
+               #dflt_slice = { 'instantiation': None, 'description': "Senslab Slice Test",  'node_ids': [], 'url': "http://localhost.localdomain/", 'max_nodes': 256, 'site_id': 3,'peer_slice_id': None, 'slice_tag_ids': [], 'peer_id': None, 'hrn' :None}
+               #for person in self.person_list:
+                       #if 'user' or 'pi' in person['roles']:
+                               #def_slice = {}
+                               ##print>>sys.stderr, "\r\n \rn \t\t _____-----------************def_slice person %s \r\n \rn " %(person['person_id'])
+                               #def_slice['person_ids'] = []
+                               #def_slice['person_ids'].append(person['person_id'])
+                               #def_slice['slice_id'] = person['person_id']
+                               #def_slice['creator_person_id'] = person['person_id']
+                               #extime =  datetime.datetime.utcnow()
+                               #def_slice['created'] = int(time.mktime(extime.timetuple()))
+                               #extime = extime + datetime.timedelta(days=365)
+                               #def_slice['expires'] = int(time.mktime(extime.timetuple()))
+                               ##print>>sys.stderr, "\r\n \rn \t\t _____-----------************def_slice expires  %s \r\n \r\n "%(def_slice['expires'])                                
+                               #def_slice['name'] = person['email'].replace('@','_',1)
+                               ##print>>sys.stderr, "\r\n \rn \t\t _____-----------************def_slice %s \r\n \r\n " %(def_slice['name'])
+                               #def_slice.update(dflt_slice)
+                               #self.slices_list.append(def_slice)
+       
+               ##print>>sys.stderr, "InitSlices SliceLIST", self.slices_list
+               
+       #def InitPersons(self): 
+               #persons_per_site = {}
+               #person_id = 7
+               #persons_per_site[person_id] = {'person_id': person_id,'site_ids': [3],'email': 'a_rioot@senslab.fr', 'key_ids':[1], 'roles': ['pi'], 'role_ids':[20],'first_name':'A','last_name':'rioot'}
+               #person_id = 8
+               #persons_per_site[person_id] = {'person_id': person_id,'site_ids': [3],'email': 'lost@senslab.fr','key_ids':[1],'roles': ['pi'], 'role_ids':[20],'first_name':'L','last_name':'lost'}
+               #person_id = 9
+               #persons_per_site[person_id] = {'person_id': person_id,'site_ids': [3],'email': 'user@senslab.fr','key_ids':[1],'roles': ['user'], 'role_ids':[1],'first_name':'U','last_name':'senslab'}
+               #for person_id in persons_per_site.keys():
+                       #person  = persons_per_site[person_id]
+                       #if person['person_id'] not in self.person_list:
+                               #self.person_list.append(person)
+               ##print>>sys.stderr, "InitPersons PERSON DICLIST", self.person_list
+
+       
+       #def InitKeys(self):
+               ##print>>sys.stderr, " InitKeys HEYYYYYYY\r\n"
+       
+               #self.keys_list = [{'peer_key_id': None, 'key_type': 'ssh', 'key' :"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEArcdW0X2la754SoFE+URbDsYP07AZJjrspMlvUc6u+4o6JpGRkqiv7XdkgOMIn6w3DF3cYCcA1Mc6XSG7gSD7eQx614cjlLmXzHpxSeidSs/LgZaAQpq9aQ0KhEiFxg0gp8TPeB5Z37YOPUumvcJr1ArwL/8tAOx3ClwgRhccr2HOe10YtZbMEboCarTlzNHiGolo7RYIJjGuG2RBSeAg6SMZrtnn0OdKBwp3iUlOfkS98eirVtWUp+G5+SZggip3fS3k5Oj7OPr1qauva8Rizt02Shz30DN9ikFNqV2KuPg54nC27/DQsQ6gtycARRVY91VvchmOk0HxFiW/9kS2GQ== root@FlabFedora2",'person_id': 7, 'key_id':1, 'peer_id':None }, 
+               #{'peer_key_id': None, 'key_type': 'ssh', 'key' :"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEArcdW0X2la754SoFE+URbDsYP07AZJjrspMlvUc6u+4o6JpGRkqiv7XdkgOMIn6w3DF3cYCcA1Mc6XSG7gSD7eQx614cjlLmXzHpxSeidSs/LgZaAQpq9aQ0KhEiFxg0gp8TPeB5Z37YOPUumvcJr1ArwL/8tAOx3ClwgRhccr2HOe10YtZbMEboCarTlzNHiGolo7RYIJjGuG2RBSeAg6SMZrtnn0OdKBwp3iUlOfkS98eirVtWUp+G5+SZggip3fS3k5Oj7OPr1qauva8Rizt02Shz30DN9ikFNqV2KuPg54nC27/DQsQ6gtycARRVY91VvchmOk0HxFiW/9kS2GQ== root@FlabFedora2",'person_id': 8, 'key_id':1, 'peer_id':None }, 
+               #{'peer_key_id': None, 'key_type': 'ssh', 'key' :"ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEArcdW0X2la754SoFE+URbDsYP07AZJjrspMlvUc6u+4o6JpGRkqiv7XdkgOMIn6w3DF3cYCcA1Mc6XSG7gSD7eQx614cjlLmXzHpxSeidSs/LgZaAQpq9aQ0KhEiFxg0gp8TPeB5Z37YOPUumvcJr1ArwL/8tAOx3ClwgRhccr2HOe10YtZbMEboCarTlzNHiGolo7RYIJjGuG2RBSeAg6SMZrtnn0OdKBwp3iUlOfkS98eirVtWUp+G5+SZggip3fS3k5Oj7OPr1qauva8Rizt02Shz30DN9ikFNqV2KuPg54nC27/DQsQ6gtycARRVY91VvchmOk0HxFiW/9kS2GQ== root@FlabFedora2",'person_id': 9, 'key_id':1, 'peer_id':None }] 
+               
+               
+                                       
+       
+       #def GetPersons(self, person_filter=None, return_fields=None):
+               ##print>>sys.stderr, " \r\n GetPersons person_filter %s return_fields %s  list: %s" %(person_filter,return_fields, self.person_list)
+               #if not self.person_list :
+                       #print>>sys.stderr, " \r\n ========>GetPersons NO PERSON LIST DAMMIT<========== \r\n" 
+                       
+               #if not (person_filter or return_fields):
+                       #return self.person_list
+               
+               #return_person_list= [] 
+               #return_person_list = parse_filter(self.person_list,person_filter ,'persons', return_fields)
+               #return return_person_list
+               
+       
+       def GetPIs(self,site_id):
+               return_person_list= []  
+               for person in self.person_list :
+                       if site_id in person['site_ids'] and 'pi' in person['roles'] :
+                               return_person_list.append(person['person_id'])
+               #print>>sys.stderr, " \r\n  GetPIs      return_person_list %s :" %(return_person_list)  
+               return return_person_list
+               
+                               
+       def GetKeys(self,key_filter=None, return_fields=None):
+               return_key_list= []
+               print>>sys.stderr, " \r\n GetKeys" 
+       
+               if not (key_filter or return_fields):
+                       return self.keys_list
+               return_key_list = parse_filter(self.keys_list,key_filter ,'keys', return_fields)
+               return return_key_list
+       
+       #return_key_list= []
+               #print>>sys.stderr, " \r\n GetKeys" 
+       
+               #if not (key_filter or return_fields):
+                       #return self.keys_list
+               
+               #elif key_filter or return_fields:
+                       #for key in self.keys_list:
+                               #tmp_key = {}
+                               #if key_filter:
+                                       #for k_filter in key_filter:
+                                               #if key['key_id'] == k_filter :
+                                                       #if return_fields:
+                                                               #for field in return_fields:
+                                                                       #if field in key.keys():
+                                                                               #tmp_key[field] = key[field]
+                                                       #else:
+                                                               #tmp_key = key
+                                                               
+                                                       #print>>sys.stderr, " \r\n tmp_key",tmp_key  
+                                                       #return_key_list.append(tmp_key)
+                               #print>>sys.stderr," \r\n End GetKeys with filter ", return_key_list                    
+               #return return_key_list
+       
+
+       
+       
+       def AddSlice(self, slice_fields): 
+               print>>sys.stderr, " \r\n \r\nAddSlice "
+               
+               
+       def AddPersonToSlice(self,person_id_or_email, slice_id_or_name):
+               print>>sys.stderr, " \r\n \r\n  AddPersonToSlice"
+               
+       def DeletePersonFromSlice(self,person_id_or_email, slice_id_or_name):
+               print>>sys.stderr, " \r\n \r\n DeletePersonFromSlice "
diff --git a/sfa/senslab/__init__.py b/sfa/senslab/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sfa/senslab/parsing.py b/sfa/senslab/parsing.py
new file mode 100644 (file)
index 0000000..0a5092a
--- /dev/null
@@ -0,0 +1,122 @@
+
+# import modules used here -- sys is a very standard one
+import sys
+import httplib
+import json
+from collections import defaultdict
+
+def strip_dictionnary (dict_to_strip):
+       stripped_filter = []
+       stripped_filterdict = {} 
+       for f in dict_to_strip :
+               stripped_filter.append(str(f).strip('|'))
+               
+       stripped_filterdict = dict(zip(stripped_filter, dict_to_strip.values()))
+       
+       return stripped_filterdict
+       
+
+def filter_return_fields( dict_to_filter, return_fields):
+       filtered_dict = {}
+       print>>sys.stderr, " \r\n \t \tfilter_return_fields return fields %s " %(return_fields)
+       for field in return_fields:
+               #print>>sys.stderr, " \r\n \t \tfield %s " %(field)     
+               if field in dict_to_filter:
+                       filtered_dict[field] = dict_to_filter[field]
+       print>>sys.stderr, " \r\n \t\t filter_return_fields filtered_dict %s " %(filtered_dict)
+       return filtered_dict
+       
+       
+       
+def parse_filter(list_to_filter, param_filter, type_of_list, return_fields=None) :
+       list_type = { 'persons': {'str': 'hrn','int':'record_id'},\
+        'keys':{'int':'key_id'},\
+        'site':{'str':'login_base','int':'site_id'},\
+         'node':{'str':'hostname','int':'node_id'},\
+         'slice':{'str':'slice_hrn','int':'record_id_slice'}}
+               
+       print>>sys.stderr, " \r\n ___ parse_filter param_filter %s type %s  return fields %s " %(param_filter,type_of_list, return_fields)  
+       if  param_filter is None and return_fields is None:
+            return list_to_filter
+        
+       if type_of_list not in list_type:
+               print>>sys.stderr, " \r\n type_of_list Error  parse_filter %s " %(type_of_list)
+               return []
+
+       return_filtered_list= []
+       
+       for item in list_to_filter:
+               tmp_item = {}
+               
+               if type(param_filter) is list :
+                       #print>>sys.stderr, " \r\n p_filter LIST %s " %(param_filter)
+                       
+                       for p_filter in param_filter:
+                               #print>>sys.stderr, " \r\n p_filter %s \t item %s " %(p_filter,item)
+                               if type(p_filter) is int:
+                                       if item[list_type[type_of_list]['int']] == p_filter :
+                                               if return_fields:
+                                                       tmp_item = filter_return_fields(item,return_fields)
+                                               else:
+                                                       tmp_item = item
+                                               return_filtered_list.append(tmp_item)
+                                       #print>>sys.stderr, " \r\n 1tmp_item",tmp_item  
+                                       
+                               if type(p_filter) is str:
+                                       if item[list_type[type_of_list]['str']] == str(p_filter) :
+                                                print>>sys.stderr, " \r\n p_filter %s \t item %s "%(p_filter,item[list_type[type_of_list]['str']])
+                                               if return_fields:
+                                                       tmp_item = filter_return_fields(item,return_fields)
+                                               else:
+                                                       tmp_item = item
+                                               return_filtered_list.append(tmp_item)
+                                       #print>>sys.stderr, " \r\n 2tmp_item",tmp_item
+                                       
+       
+               elif type(param_filter) is dict:
+                       #stripped_filterdict = strip_dictionnary(param_filter)
+                       #tmp_copy = {}
+                       #tmp_copy = item.copy()
+                       #print>>sys.stderr, " \r\n \t\t ________tmp_copy %s " %(tmp_copy)
+                       #key_list = tmp_copy.keys()                     
+                       #for key in key_list:
+                               #print>>sys.stderr, " \r\n \t\t  key %s " %(key)
+                               #if key not in stripped_filterdict:
+                                       #del tmp_copy[key] 
+                                        
+                        #rif the item matches the filter, returns it
+                        founditem = []
+                        check =  [ True for  k in param_filter.keys() if 'id' in k ]
+                        if check :
+                            dflt= defaultdict(str,param_filter)
+                            
+                        else:
+                            dflt= defaultdict(str,param_filter)
+                              
+                        
+                        
+                        #founditem =  [ item for k in dflt if item[k] in dflt[k]]
+                        for k in dflt:
+                            if item[k] in dflt[k]:
+                               founditem = [item]
+
+                        if founditem: 
+                            if return_fields:
+                                print>>sys.stderr, "  \r\n \r\n parsing.py param_filter dflt %s founditem %s " %(dflt, founditem)
+                                tmp_item = filter_return_fields(founditem[0],return_fields)
+                            else:
+                                tmp_item = founditem[0]
+                            return_filtered_list.append(tmp_item)
+                       
+                       #print>>sys.stderr, " \r\n tmp_copy %s param_filter %s cmp = %s " %(tmp_copy, param_filter,cmp(tmp_copy, stripped_filterdict))
+                       
+                       #if cmp(tmp_copy, stripped_filterdict) == 0:    
+                               #if return_fields:
+                                       #tmp_item = filter_return_fields(item,return_fields)
+                               #else:
+                                       
+                                       #tmp_item = item        
+                               #return_filtered_list.append(tmp_item)
+       if return_filtered_list :
+          return return_filtered_list
+        
\ No newline at end of file
diff --git a/sfa/senslab/sfa-bare b/sfa/senslab/sfa-bare
new file mode 100755 (executable)
index 0000000..745955c
--- /dev/null
@@ -0,0 +1,69 @@
+#!/bin/bash
+#
+# sfa  starts sfa service
+#
+# chkconfig: 2345 61 39
+#
+# description:   starts sfa service
+#
+
+# Source config
+[ -f /etc/sfa/sfa_config ] && . /etc/sfa/sfa_config
+
+# source function library
+. /etc/init.d/functions
+
+start() {
+
+    if [ "$SFA_REGISTRY_ENABLED" -eq 1 ]; then
+        action $"SFA Registry" daemon /usr/bin/sfa-server.py -r -d $OPTIONS
+    fi
+
+    if [ "$SFA_AGGREGATE_ENABLED" -eq 1 ]; then
+        action $"SFA Aggregate" daemon /usr/bin/sfa-server.py -a -d $OPTIONS
+    fi
+        
+    if [ "$SFA_SM_ENABLED" -eq 1 ]; then
+        action "SFA SliceMgr" daemon /usr/bin/sfa-server.py -s -d $OPTIONS
+    fi
+
+    if [ "$SFA_FLASHPOLICY_ENABLED" -eq 1 ]; then
+        action "Flash Policy Server" daemon /usr/bin/sfa_flashpolicy.py --file="$SFA_FLASHPOLICY_CONFIG_FILE" --port=$SFA_FLASHPOLICY_PORT -d
+    fi
+
+    RETVAL=$?
+    [ $RETVAL -eq 0 ] && touch /var/lock/subsys/sfa-server.py
+
+}
+
+stop() {
+    action $"Shutting down SFA" killproc sfa-server.py
+    RETVAL=$?
+
+    [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/sfa-server.py
+}
+
+
+case "$1" in
+    start) start ;;
+    stop) stop ;;
+    reload) reload force ;;
+    restart) stop; start ;;
+    condrestart)
+       if [ -f /var/lock/subsys/sfa-server.py ]; then
+            stop
+            start
+       fi
+       ;;
+    status)
+       status sfa-server.py
+       RETVAL=$?
+       ;;
+    *)
+       echo $"Usage: $0 {start|stop|reload|restart|condrestart|status}"
+       exit 1
+       ;;
+esac
+
+exit $RETVAL
+
diff --git a/sfa/senslab/sfaImport.py b/sfa/senslab/sfaImport.py
new file mode 100644 (file)
index 0000000..3f85f4b
--- /dev/null
@@ -0,0 +1,272 @@
+#
+# The import tool assumes that the existing PLC hierarchy should all be part
+# of "planetlab.us" (see the root_auth and level1_auth variables below).
+#
+# Public keys are extracted from the users' SSH keys automatically and used to
+# create GIDs. This is relatively experimental as a custom tool had to be
+# written to perform conversion from SSH to OpenSSL format. It only supports
+# RSA keys at this time, not DSA keys.
+##
+
+import getopt
+import sys
+import tempfile
+
+from sfa.util.sfalogging import sfa_logger_goes_to_import,sfa_logger
+
+from sfa.util.record import *
+from sfa.util.table import SfaTable
+from sfa.util.xrn import get_authority, hrn_to_urn
+from sfa.util.plxrn import email_to_hrn
+from sfa.util.config import Config
+from sfa.trust.certificate import convert_public_key, Keypair
+from sfa.trust.trustedroot import *
+from sfa.trust.hierarchy import *
+from sfa.trust.gid import create_uuid
+
+
+def _un_unicode(str):
+   if isinstance(str, unicode):
+       return str.encode("ascii", "ignore")
+   else:
+       return str
+
+def _cleanup_string(str):
+    # pgsql has a fit with strings that have high ascii in them, so filter it
+    # out when generating the hrns.
+    tmp = ""
+    for c in str:
+        if ord(c) < 128:
+            tmp = tmp + c
+    str = tmp
+
+    str = _un_unicode(str)
+    str = str.replace(" ", "_")
+    str = str.replace(".", "_")
+    str = str.replace("(", "_")
+    str = str.replace("'", "_")
+    str = str.replace(")", "_")
+    str = str.replace('"', "_")
+    return str
+
+class sfaImport:
+
+    def __init__(self):
+       sfa_logger_goes_to_import()
+       self.logger = sfa_logger()
+       self.AuthHierarchy = Hierarchy()
+       self.config = Config()
+       self.TrustedRoots = TrustedRootList(Config.get_trustedroots_dir(self.config))
+
+       self.plc_auth = self.config.get_plc_auth()
+       self.root_auth = self.config.SFA_REGISTRY_ROOT_AUTH
+       print>>sys.stderr, "\r\n ========= \t\t sfaImport plc_auth %s root_auth %s \r\n" %( self.plc_auth,  self.root_auth )      
+       # connect to planetlab
+       self.shell = None
+       if "Url" in self.plc_auth:
+          from sfa.plc.remoteshell import RemoteShell
+          self.shell = RemoteShell(self.logger)
+       else:
+          import PLC.Shell
+          self.shell = PLC.Shell.Shell(globals = globals())        
+
+    def create_top_level_auth_records(self, hrn):
+        """
+        Create top level records (includes root and sub authorities (local/remote)
+        """
+       
+        urn = hrn_to_urn(hrn, 'authority')
+        # make sure parent exists
+        parent_hrn = get_authority(hrn)
+        if not parent_hrn:
+            parent_hrn = hrn
+        if not parent_hrn == hrn:
+            self.create_top_level_auth_records(parent_hrn)
+       print>>sys.stderr, "\r\n =========create_top_level_auth_records parent_hrn \r\n", parent_hrn
+       
+        # create the authority if it doesnt already exist 
+        if not self.AuthHierarchy.auth_exists(urn):
+            self.logger.info("Import: creating top level authorities")
+            self.AuthHierarchy.create_auth(urn)
+        
+        # create the db record if it doesnt already exist    
+        auth_info = self.AuthHierarchy.get_auth_info(hrn)
+        table = SfaTable()
+        auth_record = table.find({'type': 'authority', 'hrn': hrn})
+
+        if not auth_record:
+            auth_record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="authority", pointer=-1)
+            auth_record['authority'] = get_authority(auth_record['hrn'])
+            self.logger.info("Import: inserting authority record for %s"%hrn)
+            table.insert(auth_record)
+           print>>sys.stderr, "\r\n ========= \t\t NO AUTH RECORD \r\n" ,auth_record['authority']
+           
+           
+    def create_interface_records(self):
+        """
+        Create a record for each SFA interface
+        """
+        # just create certs for all sfa interfaces even if they
+        # arent enabled
+        interface_hrn = self.config.SFA_INTERFACE_HRN
+        interfaces = ['authority+sa', 'authority+am', 'authority+sm']
+        table = SfaTable()
+        auth_info = self.AuthHierarchy.get_auth_info(interface_hrn)
+        pkey = auth_info.get_pkey_object()
+        for interface in interfaces:
+            interface_record = table.find({'type': interface, 'hrn': interface_hrn})
+            if not interface_record:
+                self.logger.info("Import: interface %s %s " % (interface_hrn, interface))
+                urn = hrn_to_urn(interface_hrn, interface)
+                gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+                record = SfaRecord(hrn=interface_hrn, gid=gid, type=interface, pointer=-1)  
+                record['authority'] = get_authority(interface_hrn)
+               print>>sys.stderr,"\r\n ==========create_interface_records", record['authority']
+                table.insert(record) 
+
+    def import_person(self, parent_hrn, person):
+        """
+        Register a user record 
+        """
+        hrn = email_to_hrn(parent_hrn, person['email'])
+
+       print >>sys.stderr , "\r\n_____00______SfaImport : person", person      
+        # ASN.1 will have problems with hrn's longer than 64 characters
+        if len(hrn) > 64:
+            hrn = hrn[:64]
+       print >>sys.stderr , "\r\n_____0______SfaImport : parent_hrn", parent_hrn
+        self.logger.info("Import: person %s"%hrn)
+        key_ids = []
+        if 'key_ids' in person and person['key_ids']:
+            key_ids = person["key_ids"]
+            # get the user's private key from the SSH keys they have uploaded
+            # to planetlab
+            keys = self.shell.GetKeys(self.plc_auth, key_ids)
+           print >>sys.stderr , "\r\n_____1______SfaImport : self.plc_auth %s \r\n \t keys %s " %(self.plc_auth,keys)
+            key = keys[0]['key']
+            pkey = convert_public_key(key)
+           print >>sys.stderr , "\r\n_____2______SfaImport : key %s pkey %s"% (key,pkey.as_pem())          
+            if not pkey:
+                pkey = Keypair(create=True)
+        else:
+            # the user has no keys
+            self.logger.warning("Import: person %s does not have a PL public key"%hrn)
+            # if a key is unavailable, then we still need to put something in the
+            # user's GID. So make one up.
+            pkey = Keypair(create=True)
+           print >>sys.stderr , "\r\n___ELSE________SfaImport pkey : %s \r\n \t pkey.key.bits%s "%(dir(pkey.key), pkey.as_pem())
+        # create the gid
+        urn = hrn_to_urn(hrn, 'user')
+       print >>sys.stderr , "\r\n \t\t : urn ", urn
+        person_gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+        table = SfaTable()
+        person_record = SfaRecord(hrn=hrn, gid=person_gid, type="user", pointer=person['person_id'])
+        person_record['authority'] = get_authority(person_record['hrn'])
+        existing_records = table.find({'hrn': hrn, 'type': 'user', 'pointer': person['person_id']})
+        if not existing_records:
+            table.insert(person_record)
+        else:
+            self.logger.info("Import: %s exists, updating " % hrn)
+            existing_record = existing_records[0]
+            person_record['record_id'] = existing_record['record_id']
+            table.update(person_record)
+
+    def import_slice(self, parent_hrn, slice):
+        slicename = slice['name'].split("_",1)[-1]
+        slicename = _cleanup_string(slicename)
+
+        if not slicename:
+            self.logger.error("Import: failed to parse slice name %s" %slice['name'])
+            return
+
+        hrn = parent_hrn + "." + slicename
+        self.logger.info("Import: slice %s"%hrn)
+
+        pkey = Keypair(create=True)
+        urn = hrn_to_urn(hrn, 'slice')
+        slice_gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+        slice_record = SfaRecord(hrn=hrn, gid=slice_gid, type="slice", pointer=slice['slice_id'])
+        slice_record['authority'] = get_authority(slice_record['hrn'])
+        table = SfaTable()
+        existing_records = table.find({'hrn': hrn, 'type': 'slice', 'pointer': slice['slice_id']})
+        if not existing_records:
+            table.insert(slice_record)
+        else:
+            self.logger.info("Import: %s exists, updating " % hrn)
+            existing_record = existing_records[0]
+            slice_record['record_id'] = existing_record['record_id']
+            table.update(slice_record)
+
+    def import_node(self, hrn, node):
+        self.logger.info("Import: node %s" % hrn)
+        # ASN.1 will have problems with hrn's longer than 64 characters
+        if len(hrn) > 64:
+            hrn = hrn[:64]
+
+        table = SfaTable()
+        node_record = table.find({'type': 'node', 'hrn': hrn})
+        pkey = Keypair(create=True)
+        urn = hrn_to_urn(hrn, 'node')
+        node_gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+        node_record = SfaRecord(hrn=hrn, gid=node_gid, type="node", pointer=node['node_id'])
+        node_record['authority'] = get_authority(node_record['hrn'])
+        existing_records = table.find({'hrn': hrn, 'type': 'node', 'pointer': node['node_id']})
+        if not existing_records:
+            table.insert(node_record)
+        else:
+            self.logger.info("Import: %s exists, updating " % hrn)
+            existing_record = existing_records[0]
+            node_record['record_id'] = existing_record['record_id']
+            table.update(node_record)
+
+    
+    def import_site(self, parent_hrn, site):
+        shell = self.shell
+        plc_auth = self.plc_auth
+       print >>sys.stderr , " \r\n !!!!!!!!! import_site plc_shell %s \r\n \t type %s dir %s" %(shell, type(shell),dir(shell))
+        sitename = site['login_base']
+        sitename = _cleanup_string(sitename)
+        hrn = parent_hrn + "." + sitename
+
+        # Hardcode 'internet2' into the hrn for sites hosting
+        # internet2 nodes. This is a special operation for some vini
+        # sites only
+        if ".vini" in parent_hrn and parent_hrn.endswith('vini'):
+            if sitename.startswith("i2"):
+                #sitename = sitename.replace("ii", "")
+                hrn = ".".join([parent_hrn, "internet2", sitename])
+            elif sitename.startswith("nlr"):
+                #sitename = sitename.replace("nlr", "")
+                hrn = ".".join([parent_hrn, "internet2", sitename])
+
+        urn = hrn_to_urn(hrn, 'authority')
+        self.logger.info("Import: site %s"%hrn)
+       print >>sys.stderr , " \r\n !!!!!!!!! import_site sitename %s  sitename %s \r\n \t hrn %s urn %s" %(site['login_base'],sitename, hrn,urn)
+        # create the authority
+        if not self.AuthHierarchy.auth_exists(urn):
+            self.AuthHierarchy.create_auth(urn)
+
+        auth_info = self.AuthHierarchy.get_auth_info(urn)
+
+        table = SfaTable()
+        auth_record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="authority", pointer=site['site_id'])
+        auth_record['authority'] = get_authority(auth_record['hrn'])
+        existing_records = table.find({'hrn': hrn, 'type': 'authority', 'pointer': site['site_id']})
+        if not existing_records:
+            table.insert(auth_record)
+        else:
+            self.logger.info("Import: %s exists, updating " % hrn)
+            existing_record = existing_records[0]
+            auth_record['record_id'] = existing_record['record_id']
+            table.update(auth_record)
+
+        return hrn
+
+
+    def delete_record(self, hrn, type):
+        # delete the record
+        table = SfaTable()
+        record_list = table.find({'type': type, 'hrn': hrn})
+        for record in record_list:
+            self.logger.info("Import: removing record %s %s" % (type, hrn))
+            table.remove(record)        
diff --git a/sfa/senslab/slab-import.py b/sfa/senslab/slab-import.py
new file mode 100644 (file)
index 0000000..8ff065e
--- /dev/null
@@ -0,0 +1,294 @@
+
+import sys
+import datetime
+import time
+from sfa.senslab.OARrestapi import OARapi
+from sfa.senslab.LDAPapi import LDAPapi
+from sfa.senslab.slabdriver import SlabDriver
+from sfa.senslab.slabpostgres import SlabDB
+from sfa.util.config import Config
+from sfa.util.plxrn import PlXrn
+from sfa.util.xrn import hrn_to_urn, get_authority,Xrn,get_leaf
+from sfa.util.table import SfaTable
+from sfa.util.record import SfaRecord
+from sfa.trust.hierarchy import Hierarchy
+from sfa.trust.certificate import Keypair,convert_public_key
+from sfa.trust.gid import create_uuid
+from sfa.trust.trustedroots import TrustedRoots
+
+config = Config()
+interface_hrn = config.SFA_INTERFACE_HRN
+TrustedR = TrustedRoots(Config.get_trustedroots_dir(config))
+AuthHierarchy = Hierarchy()
+table = SfaTable()
+db = SlabDB()
+if not table.exists():
+    table.create()
+    
+    
+def create_sm_client_record():
+    """
+    Create a user record for the Slicemanager service.
+    """
+    hrn = config.SFA_INTERFACE_HRN + '.slicemanager'
+    urn = hrn_to_urn(hrn, 'user')
+    if not AuthHierarchy.auth_exists(urn):
+        AuthHierarchy.create_auth(urn)
+
+    auth_info = AuthHierarchy.get_auth_info(hrn)
+    table = SfaTable()
+    sm_user_record = table.find({'type': 'user', 'hrn': hrn})
+    if not sm_user_record:
+        record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="user", pointer=-1)
+        record['authority'] = get_authority(record['hrn'])
+        table.insert(record)
+                
+def create_interface_records():
+    """
+    Create a record for each SFA interface
+    """
+    # just create certs for all sfa interfaces even if they
+    # arent enabled
+    interface_hrn = config.SFA_INTERFACE_HRN
+    interfaces = ['authority+sa', 'authority+am', 'authority+sm']
+    
+    auth_info = AuthHierarchy.get_auth_info(interface_hrn)
+    pkey = auth_info.get_pkey_object()
+    for interface in interfaces:
+        interface_record = table.find({'type': interface, 'hrn': interface_hrn})
+        if not interface_record:
+            urn = hrn_to_urn(interface_hrn, interface)
+            gid = AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+            record = SfaRecord(hrn=interface_hrn, gid=gid, type=interface, pointer=-1)  
+            record['authority'] = get_authority(interface_hrn)
+            print>>sys.stderr,"\r\n ==========create_interface_records", record['authority']
+            table.insert(record)                
+                
+def create_top_level_auth_records(hrn):
+    """
+    Create top level records (includes root and sub authorities (local/remote)
+    """
+
+    urn = hrn_to_urn(hrn, 'authority')
+    # make sure parent exists
+    parent_hrn = get_authority(hrn)
+    print>>sys.stderr, "\r\n =========slab-import create_top_level_auth_records hrn %s  urn %s parent_hrn %s \r\n" %(hrn, urn, parent_hrn)
+    if not parent_hrn:
+        parent_hrn = hrn
+    if not parent_hrn == hrn:
+        create_top_level_auth_records(parent_hrn)
+
+    # create the authority if it doesnt already exist 
+    if not AuthHierarchy.auth_exists(urn):
+        AuthHierarchy.create_auth(urn)
+    
+    # create the db record if it doesnt already exist    
+    auth_info = AuthHierarchy.get_auth_info(hrn)
+   
+    auth_record = table.find({'type': 'authority', 'hrn': hrn})
+
+    if not auth_record:
+        auth_record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="authority", pointer=-1)
+        auth_record['authority'] = get_authority(auth_record['hrn'])
+        print sys.stderr, " \r\n \t slab-import : auth record %s inserted record %s " %(auth_record['hrn'], auth_record)
+        table.insert(auth_record)
+
+        
+    
+def import_node(hrn, node):
+
+    # ASN.1 will have problems with hrn's longer than 64 characters
+    if len(hrn) > 64:
+        hrn = hrn[:64]
+
+    node_record = table.find({'type': 'node', 'hrn': hrn})
+    pkey = Keypair(create=True)        
+    
+    urn = hrn_to_urn(hrn, 'node')
+    node_gid = AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+    node_record = SfaRecord(hrn=hrn, gid=node_gid, type="node", pointer=node['node_id'])
+    node_record['authority'] = get_authority(node_record['hrn'])
+    extime = datetime.datetime.utcnow()
+    node_record['date_created'] = int(time.mktime(extime.timetuple()))
+    existing_records = table.find({'hrn': hrn, 'type': 'node', 'pointer': node['node_id']})
+    if not existing_records:
+        print>>sys.stderr, " \r\n \t slab-import : node record %s inserted" %(node['node_id'])
+        table.insert(node_record)
+    else:
+        existing_record = existing_records[0]
+        node_record['record_id'] = existing_record['record_id']
+        table.update(node_record)
+
+# person is already a sfa record 
+def import_person(authname,person):       
+    existing_records = table.find({'hrn': person['hrn'], 'type': 'user'})
+    extime = datetime.datetime.utcnow()
+    person['date_created'] = int(time.mktime(extime.timetuple()))
+
+  
+    if not existing_records:
+        print>>sys.stderr, " \r\n \t slab-import : person record %s inserted" %(person['hrn'])
+        uuid=create_uuid() 
+        RSA_KEY_STRING=person['pkey']
+        pkey=convert_public_key(RSA_KEY_STRING)
+       person['gid']=AuthHierarchy.create_gid("urn:publicid:IDN+"+authname+"+user+"+person['uid'], uuid, pkey, CA=False).save_to_string()
+        table.insert(person)
+    else:
+        existing_record = existing_records[0]
+        person['record_id'] = existing_record['record_id']
+        # handle key change ??? 
+        table.update(person)
+        
+def import_slice(person):
+
+    hrn = person['hrn']+'_slice'
+    pkey = Keypair(create=True)
+    urn = hrn_to_urn(hrn, 'slice')
+    gid = AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+    slice_record= SfaRecord(hrn=hrn, gid=gid, type="slice", pointer=-1)
+    slice_record['authority'] = get_authority(slice_record['hrn'])
+   
+    extime = datetime.datetime.utcnow()
+    slice_record['date_created'] = int(time.mktime(extime.timetuple()))
+    #special slice table for Senslab, to store nodes info (OAR)                        
+
+    existing_records = table.find({'hrn': slice_record['hrn'], 'type': 'slice'})
+    if not existing_records:
+        print>>sys.stderr, " \r\n \t slab-import : slice record %s inserted" %(slice_record['hrn'])
+        table.insert(slice_record)
+        db.insert_slab_slice(person)
+
+    else:
+        print>>sys.stderr, " \r\n \t slab-import : slice record %s updated" %(slice_record['hrn'])
+        existing_record = existing_records[0]
+        slice_record['record_id'] = existing_record['record_id']
+        table.update(slice_record)
+        db.update_senslab_slice(slice_record)   
+        
+def delete_record( hrn, type):
+    # delete the record
+    record_list = table.find({'type': type, 'hrn': hrn})
+    for record in record_list:
+        print>>sys.stderr, " \r\n \t slab-import : record %s deleted" %(record['hrn'])
+        table.remove(record)
+                
+def hostname_to_hrn(root_auth,login_base,hostname):
+    return PlXrn(auth=root_auth,hostname=login_base+'_'+hostname).get_hrn()
+
+    
+def main():
+
+    if not db.exists('slice'):
+        db.createtable('slice')
+        
+    if not config.SFA_REGISTRY_ENABLED:
+        sys.exit(0)
+    root_auth = config.SFA_REGISTRY_ROOT_AUTH
+    interface_hrn = config.SFA_INTERFACE_HRN
+
+    
+    #Get all records in the sfa table   
+    # create dict of all existing sfa records
+    existing_records = {}
+    existing_hrns = []
+    key_ids = []
+    results = table.find()
+   
+    for result in results:
+        existing_records[(result['hrn'], result['type'])] = result
+        existing_hrns.append(result['hrn'])   
+        
+    # create root authority if it doesn't exist
+    if root_auth not in  existing_hrns or \
+    (root_auth, 'authority') not in existing_records:
+        create_top_level_auth_records(root_auth)
+        if not root_auth == interface_hrn:
+            create_top_level_auth_records(interface_hrn)
+    
+        # create s user record for the slice manager Do we need this?
+        create_sm_client_record()
+        
+        # create interface records ADDED 18 nov 11 Do we need this?
+    
+        create_interface_records()
+    
+        # add local root authority's cert  to trusted list ADDED 18 nov 11 Do we need this?
+        
+        authority = AuthHierarchy.get_auth_info(interface_hrn)
+        TrustedR.add_gid(authority.get_gid_object())
+
+
+    #Get Senslab nodes 
+   
+    Driver = SlabDriver(config)
+    nodes_dict  = Driver.GetNodes()
+    #print "\r\n NODES8DICT ",nodes_dict
+    
+    ldap_person_list = Driver.GetPersons()
+
+        # import node records
+    for node in nodes_dict:
+        # Sandrine
+        # A changer pour l utilisation du nouveau OAR de prod, le site etant contenu dans le hostname
+        hrn =  hostname_to_hrn( root_auth,node['site_login_base'], node['hostname'])
+        if hrn not in existing_hrns or \
+        (hrn, 'node') not in existing_records:
+            import_node(hrn, node)
+
+   # import persons and slices
+    for person in ldap_person_list:
+        if person['hrn'] not in existing_hrns or \
+            (person['hrn'], 'user') not in existing_records :
+            import_person(root_auth,person)
+            import_slice(person)
+                               
+                                
+    # remove stale records    
+    system_records = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
+
+    for (record_hrn, type) in existing_records.keys():
+        if record_hrn in system_records:
+            continue
+        
+        record = existing_records[(record_hrn, type)]
+        if record['peer_authority']:
+            continue                                   
+
+
+
+        found = False
+        
+        if type == 'authority':    
+            found = True
+            print "\t \t Found :", found
+            break
+                
+        elif type == 'user':
+            for person in ldap_person_list:
+                if person['hrn'] == record_hrn:
+                    found = True
+                    break
+            
+        elif type == 'node':
+            login_base = get_leaf(get_authority(record_hrn))
+            nodename = Xrn.unescape(get_leaf(record_hrn))
+            for node in nodes_dict:
+                if node['hostname'] == nodename :
+                    found = True
+                    break 
+                
+        elif type == 'slice':
+            for person in ldap_person_list:
+                if person['hrn']+'_slice' == record_hrn:
+                    found = True
+                    break           
+        else:
+            continue 
+        
+        if not found:
+            record_object = existing_records[(record_hrn, type)]
+            print "\t\t  NOT FOUND ! ", record_hrn
+            delete_record(record_hrn, type) 
+    
+if __name__ == "__main__":
+    main()    
diff --git a/sfa/senslab/slabaggregate.py b/sfa/senslab/slabaggregate.py
new file mode 100644 (file)
index 0000000..714bd35
--- /dev/null
@@ -0,0 +1,217 @@
+
+#!/usr/bin/python
+
+# import modules used here -- sys is a very standard one
+import sys
+import httplib
+import json
+
+
+from sfa.rspecs.version_manager import VersionManager
+from sfa.senslab.OARrestapi import *
+from sfa.senslab.slabdriver import SlabDriver
+from sfa.util.config import Config
+from sfa.util.xrn import hrn_to_urn, urn_to_hrn, urn_to_sliver_id
+from sfa.util.plxrn import PlXrn, hostname_to_urn, hrn_to_pl_slicename
+
+from sfa.rspecs.rspec import RSpec
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.node import Node
+#from sfa.rspecs.elements.link import Link
+#from sfa.rspecs.elements.sliver import Sliver
+#from sfa.rspecs.elements.login import Login
+#from sfa.rspecs.elements.location import Location
+#from sfa.rspecs.elements.interface import Interface
+#from sfa.rspecs.elements.services import Services
+#from sfa.rspecs.elements.pltag import PLTag
+from sfa.util.topology import Topology
+from sfa.rspecs.version_manager import VersionManager
+#from sfa.plc.vlink import get_tc_rate
+from sfa.util.sfatime import epochparse
+
+def hostname_to_hrn(root_auth,login_base,hostname):
+    return PlXrn(auth=root_auth,hostname=login_base+'_'+hostname).get_hrn()
+
+class SlabAggregate:
+
+    
+    sites = {}
+    nodes = {}
+    api = None
+    interfaces = {}
+    links = {}
+    node_tags = {}
+    
+    prepared=False
+
+    user_options = {}
+    
+    def __init__(self ,api):
+       self.OARImporter = OARapi()     
+        self.driver = SlabDriver(Config())
+       self.api = api 
+       print >>sys.stderr,"\r\n \r\n \t\t_____________INIT Slabaggregate api : %s" %(api)
+
+
+    def get_slice_and_slivers(self, slice_xrn):
+        """
+        Returns a dict of slivers keyed on the sliver's node_id
+        """
+        slivers = {}
+        slice = None
+        if not slice_xrn:
+            return (slice, slivers)
+        slice_urn = hrn_to_urn(slice_xrn, 'slice')
+        slice_hrn, _ = urn_to_hrn(slice_xrn)
+        slice_name = slice_hrn
+        slices = self.driver.GetSlices(slice_name)
+        if not slices:
+            return (slice, slivers)
+        slice = slices[0]
+
+        # sort slivers by node id    
+        for node_id in slice['node_ids']:
+            sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, slice['slice_id'], node_id),
+                             'name': slice['hrn'],
+                             'type': 'slab-vm', 
+                             'tags': []})
+            slivers[node_id]= sliver
+
+        # sort sliver attributes by node id    
+        #tags = self.driver.GetSliceTags({'slice_tag_id': slice['slice_tag_ids']})
+        #for tag in tags:
+            ## most likely a default/global sliver attribute (node_id == None)
+            #if tag['node_id'] not in slivers:
+                #sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, slice['slice_id'], ""),
+                                 #'name': 'slab-vm',
+                                 #'tags': []})
+                #slivers[tag['node_id']] = sliver
+            #slivers[tag['node_id']]['tags'].append(tag)
+        
+        return (slice, slivers)
+            
+            
+  
+    def get_nodes(self):
+        filtre = {}
+        #tags_filter = {}
+        #if slice and 'node_ids' in slice and slice['node_ids']:
+            #filter['node_id'] = slice['node_ids']
+            #tags_filter=filter.copy()
+        
+        #filter.update({'peer_id': None})
+        nodes = self.driver.GetNodes(filtre)
+       
+        #site_ids = []
+        interface_ids = []
+        tag_ids = []
+        nodes_dict = {}
+        for node in nodes:
+            #site_ids.append(node['site_id'])
+            #interface_ids.extend(node['interface_ids'])
+            #tag_ids.extend(node['node_tag_ids'])
+            nodes_dict[node['node_id']] = node
+    
+        # get sites
+        #sites_dict  = self.get_sites({'site_id': site_ids}) 
+        # get interfaces
+        #interfaces = self.get_interfaces({'interface_id':interface_ids}) 
+        # get tags
+        #node_tags = self.get_node_tags(tags_filter)
+        # get initscripts
+        #pl_initscripts = self.get_pl_initscripts()
+        
+        #links = self.get_links(sites_dict, nodes_dict, interfaces)
+    
+        rspec_nodes = []
+        for node in nodes:
+            # skip whitelisted nodes
+            #if node['slice_ids_whitelist']:
+                #if not slice or slice['slice_id'] not in node['slice_ids_whitelist']:
+                    #continue
+            node['hostname'] = hostname_to_hrn( self.driver.root_auth,node['site_login_base'], node['hostname'])
+            rspec_node = Node()
+            # xxx how to retrieve site['login_base']
+            #site_id=node['site_id']
+            #site=sites_dict[site_id]
+            
+            rspec_node['component_id'] = hostname_to_urn(self.driver.root_auth, node['site_login_base'], node['hostname'])
+            rspec_node['component_name'] = node['hostname']
+            rspec_node['component_manager_id'] = hrn_to_urn(self.driver.root_auth, 'authority+sa')
+            rspec_node['authority_id'] = hrn_to_urn(PlXrn.site_hrn(self.driver.root_auth, node['site_login_base']), 'authority+sa')
+            rspec_node['boot_state'] = node['boot_state']
+            if node['posx'] and node['posy']:  
+                location = Location({'longitude':node['posx'], 'latitude': node['posy']})
+                rspec_node['location'] = location
+
+            rspec_node['exclusive'] = 'True'
+            rspec_node['hardware_types']= [HardwareType({'name': 'senslab sensor node'})]
+            # only doing this because protogeni rspec needs
+            # to advertise available initscripts 
+            #rspec_node['pl_initscripts'] = pl_initscripts.values()
+                # add site/interface info to nodes.
+            # assumes that sites, interfaces and tags have already been prepared.
+            #site = sites_dict[node['site_id']]
+            #if site['longitude'] and site['latitude']:  
+                #location = Location({'longitude': site['longitude'], 'latitude': site['latitude']})
+                #rspec_node['location'] = location
+            rspec_node['interfaces'] = []
+            #if_count=0
+            #for if_id in node['interface_ids']:
+                #interface = Interface(interfaces[if_id]) 
+                #interface['ipv4'] = interface['ip']
+                #interface['component_id'] = PlXrn(auth=self.api.hrn, interface='node%s:eth%s' % (node['node_id'], if_count)).get_urn()
+                #rspec_node['interfaces'].append(interface)
+                #if_count+=1
+    
+            #tags = [PLTag(node_tags[tag_id]) for tag_id in node['node_tag_ids']]
+            rspec_node['tags'] = []
+            #if node['node_id'] in slivers:
+                ## add sliver info
+                #sliver = slivers[node['node_id']]
+                #rspec_node['sliver_id'] = sliver['sliver_id']
+                #rspec_node['client_id'] = node['hostname']
+                #rspec_node['slivers'] = [sliver]
+                
+                ## slivers always provide the ssh service
+                #login = Login({'authentication': 'ssh-keys', 'hostname': node['hostname'], 'port':'22'})
+                #service = Services({'login': login})
+                #rspec_node['services'] = [service]
+            rspec_nodes.append(rspec_node)
+        return (rspec_nodes)
+        
+        
+
+#from plc/aggregate.py 
+    def get_rspec(self, slice_xrn=None, version = None, options={}):
+       print>>sys.stderr, " \r\n SlabAggregate \t\t get_rspec **************\r\n" 
+      
+       
+        rspec = None
+       version_manager = VersionManager()
+       version = version_manager.get_version(version)
+     
+       
+       if not slice_xrn:
+            rspec_version = version_manager._get_version(version.type, version.version, 'ad')
+        else:
+            rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
+        #slice, slivers = self.get_slice_and_slivers(slice_xrn)
+        rspec = RSpec(version=rspec_version, user_options=options)
+        #if slice and 'expires' in slice:
+           #rspec.xml.set('expires',  epochparse(slice['expires']))
+         # add sliver defaults
+        #nodes, links = self.get_nodes_and_links(slice, slivers)
+        nodes = self.get_nodes() 
+        rspec.version.add_nodes(nodes)
+
+        #rspec.version.add_links(links)
+        #default_sliver = slivers.get(None, [])
+        #if default_sliver:
+            #default_sliver_attribs = default_sliver.get('tags', [])
+            #for attrib in default_sliver_attribs:
+                #logger.info(attrib)
+                #rspec.version.add_default_sliver_attribute(attrib['tagname'], attrib['value'])   
+
+        return rspec.toxml()          
diff --git a/sfa/senslab/slabdriver.py b/sfa/senslab/slabdriver.py
new file mode 100644 (file)
index 0000000..0ff5149
--- /dev/null
@@ -0,0 +1,535 @@
+import sys
+
+from sfa.util.faults import MissingSfaInfo
+from sfa.util.sfalogging import logger
+from sfa.util.table import SfaTable
+from sfa.util.defaultdict import defaultdict
+
+from sfa.managers.driver import Driver
+from sfa.rspecs.version_manager import VersionManager
+
+from sfa.util.xrn import hrn_to_urn
+from sfa.util.plxrn import slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename, hrn_to_pl_login_base
+
+## thierry: everything that is API-related (i.e. handling incoming requests) 
+# is taken care of 
+# SlabDriver should be really only about talking to the senslab testbed
+
+## thierry : please avoid wildcard imports :)
+from sfa.senslab.OARrestapi import OARapi, OARrestapi
+from sfa.senslab.LDAPapi import LDAPapi
+from sfa.senslab.SenslabImportUsers import SenslabImportUsers
+from sfa.senslab.parsing import parse_filter
+from sfa.senslab.slabpostgres import SlabDB
+
+def list_to_dict(recs, key):
+    """
+    convert a list of dictionaries into a dictionary keyed on the 
+    specified dictionary key 
+    """
+   # print>>sys.stderr, " \r\n \t\t 1list_to_dict : rec %s  \r\n \t\t list_to_dict key %s" %(recs,key)   
+    keys = [rec[key] for rec in recs]
+    #print>>sys.stderr, " \r\n \t\t list_to_dict : rec %s  \r\n \t\t list_to_dict keys %s" %(recs,keys)   
+    return dict(zip(keys, recs))
+
+# thierry : note
+# this inheritance scheme is so that the driver object can receive
+# GetNodes or GetSites sorts of calls directly
+# and thus minimize the differences in the managers with the pl version
+class SlabDriver(Driver):
+
+    def __init__(self, config):
+        Driver.__init__ (self, config)
+        self.config=config
+        self.hrn = config.SFA_INTERFACE_HRN
+    
+        self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
+
+        
+       print >>sys.stderr, "\r\n_____________ SFA SENSLAB DRIVER \r\n" 
+        # thierry - just to not break the rest of this code
+
+
+       self.oar = OARapi()
+       self.ldap = LDAPapi()
+        self.users = SenslabImportUsers()
+        self.time_format = "%Y-%m-%d %H:%M:%S"
+        self.db = SlabDB()
+        #self.logger=sfa_logger()
+        self.cache=None
+        
+
+            
+    def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
+
+        aggregate = SlabAggregate(self)
+        slices = SlabSlices(self)
+        peer = slices.get_peer(slice_hrn)
+        sfa_peer = slices.get_sfa_peer(slice_hrn)
+        slice_record=None    
+        if users:
+            slice_record = users[0].get('slice_record', {})
+    
+        # parse rspec
+        rspec = RSpec(rspec_string)
+        requested_attributes = rspec.version.get_slice_attributes()
+        
+        # ensure site record exists
+        site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer, options=options)
+        # ensure slice record exists
+        slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer, options=options)
+        # ensure person records exists
+        persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer, options=options)
+        # ensure slice attributes exists
+        #slices.verify_slice_attributes(slice, requested_attributes, options=options)
+        
+        # add/remove slice from nodes
+        requested_slivers = [node.get('component_name') for node in rspec.version.get_nodes_with_slivers()]
+        nodes = slices.verify_slice_nodes(slice, requested_slivers, peer) 
+    
+        # add/remove links links 
+        #slices.verify_slice_links(slice, rspec.version.get_link_requests(), nodes)
+    
+        # handle MyPLC peer association.
+        # only used by plc and ple.
+        #slices.handle_peer(site, slice, persons, peer)
+        
+        return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
+        
+        
+    def delete_sliver (self, slice_urn, slice_hrn, creds, options):
+        
+        slices = self.GetSlices({'slice_hrn': slice_hrn})
+        if not slices:
+            return 1
+        slice = slices[0]
+    
+        # determine if this is a peer slice
+        # xxx I wonder if this would not need to use PlSlices.get_peer instead 
+        # in which case plc.peers could be deprecated as this here
+        # is the only/last call to this last method in plc.peers
+        peer = peers.get_peer(self, slice_hrn)
+        try:
+            if peer:
+                self.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
+            self.DeleteSliceFromNodes(slice_hrn, slice['node_ids'])
+        finally:
+            if peer:
+                self.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
+        return 1
+            
+            
+            
+            
+    # first 2 args are None in case of resource discovery
+    def list_resources (self, slice_urn, slice_hrn, creds, options):
+        #cached_requested = options.get('cached', True) 
+    
+        version_manager = VersionManager()
+        # get the rspec's return format from options
+        rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
+        version_string = "rspec_%s" % (rspec_version)
+    
+        #panos adding the info option to the caching key (can be improved)
+        if options.get('info'):
+            version_string = version_string + "_"+options.get('info', 'default')
+    
+        # look in cache first
+        #if cached_requested and self.cache and not slice_hrn:
+            #rspec = self.cache.get(version_string)
+            #if rspec:
+                #logger.debug("SlabDriver.ListResources: returning cached advertisement")
+                #return rspec 
+    
+        #panos: passing user-defined options
+        #print "manager options = ",options
+        aggregate = SlabAggregate(self)
+        rspec =  aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version, 
+                                     options=options)
+    
+        # cache the result
+        #if self.cache and not slice_hrn:
+            #logger.debug("Slab.ListResources: stores advertisement in cache")
+            #self.cache.add(version_string, rspec)
+    
+        return rspec
+    
+    def GetPersons(self, person_filter=None, return_fields=None):
+        
+        person_list = self.ldap.ldapFind({'authority': self.root_auth })
+        
+        #check = False
+        #if person_filter and isinstance(person_filter, dict):
+            #for k in  person_filter.keys():
+                #if k in person_list[0].keys():
+                    #check = True
+                    
+        return_person_list = parse_filter(person_list,person_filter ,'persons', return_fields)
+        if return_person_list:
+            print>>sys.stderr, " \r\n GetPersons person_filter %s return_fields %s return_person_list %s " %(person_filter,return_fields,return_person_list)
+            return return_person_list
+    
+    def GetNodes(self,node_filter= None, return_fields=None):
+               
+        self.oar.parser.SendRequest("GET_resources_full")
+        node_dict = self.oar.parser.GetNodesFromOARParse()
+        return_node_list = []
+
+        if not (node_filter or return_fields):
+                return_node_list = node_dict.values()
+                return return_node_list
+    
+        return_node_list= parse_filter(node_dict.values(),node_filter ,'node', return_fields)
+        return return_node_list
+    
+    def GetSites(self, auth, site_filter = None, return_fields=None):
+        self.oar.parser.SendRequest("GET_resources_full")
+        site_dict = self.oar.parser.GetSitesFromOARParse()
+        return_site_list = []
+        site = site_dict.values()[0]
+        if not (site_filter or return_fields):
+                return_site_list = site_dict.values()
+                return return_site_list
+        
+        return_site_list = parse_filter(site_dict.values(),site_filter ,'site', return_fields)
+        return return_site_list
+    
+    def GetSlices(self,slice_filter = None, return_fields=None):
+        
+        return_slice_list =[]
+        sliceslist = self.db.find('slice',columns = ['slice_hrn', 'record_id_slice','record_id_user'])
+        print >>sys.stderr, " \r\n \r\n SLABDRIVER.PY  GetSlices  slices %s" %(sliceslist)
+        #slicesdict = sliceslist[0]
+        if not (slice_filter or return_fields):
+                return_slice_list = sliceslist
+                return  return_slice_list
+        
+        return_slice_list  = parse_filter(sliceslist, slice_filter,'slice', return_fields)
+        print >>sys.stderr, " \r\n \r\n SLABDRIVER.PY  GetSlices  return_slice_list %s" %(return_slice_list)
+        return return_slice_list
+    
+    def testbed_name (self): return "senslab2" 
+         
+    # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
+    def aggregate_version (self):
+        version_manager = VersionManager()
+        ad_rspec_versions = []
+        request_rspec_versions = []
+        for rspec_version in version_manager.versions:
+            if rspec_version.content_type in ['*', 'ad']:
+                ad_rspec_versions.append(rspec_version.to_dict())
+            if rspec_version.content_type in ['*', 'request']:
+                request_rspec_versions.append(rspec_version.to_dict()) 
+        return {
+            'testbed':self.testbed_name(),
+            'geni_request_rspec_versions': request_rspec_versions,
+            'geni_ad_rspec_versions': ad_rspec_versions,
+            }
+          
+          
+          
+          
+          
+          
+    ##
+    # Convert SFA fields to PLC fields for use when registering up updating
+    # registry record in the PLC database
+    #
+    # @param type type of record (user, slice, ...)
+    # @param hrn human readable name
+    # @param sfa_fields dictionary of SFA fields
+    # @param pl_fields dictionary of PLC fields (output)
+
+    def sfa_fields_to_pl_fields(self, type, hrn, record):
+
+        def convert_ints(tmpdict, int_fields):
+            for field in int_fields:
+                if field in tmpdict:
+                    tmpdict[field] = int(tmpdict[field])
+
+        pl_record = {}
+        #for field in record:
+        #    pl_record[field] = record[field]
+        if type == "slice":
+            if not "instantiation" in pl_record:
+                pl_record["instantiation"] = "plc-instantiated"
+            pl_record["name"] = hrn_to_pl_slicename(hrn)
+           if "url" in record:
+               pl_record["url"] = record["url"]
+           if "description" in record:
+               pl_record["description"] = record["description"]
+           if "expires" in record:
+               pl_record["expires"] = int(record["expires"])
+
+        elif type == "node":
+            if not "hostname" in pl_record:
+                if not "hostname" in record:
+                    raise MissingSfaInfo("hostname")
+                pl_record["hostname"] = record["hostname"]
+            if not "model" in pl_record:
+                pl_record["model"] = "geni"
+
+        elif type == "authority":
+            pl_record["login_base"] = hrn_to_pl_login_base(hrn)
+
+            if not "name" in pl_record:
+                pl_record["name"] = hrn
+
+            if not "abbreviated_name" in pl_record:
+                pl_record["abbreviated_name"] = hrn
+
+            if not "enabled" in pl_record:
+                pl_record["enabled"] = True
+
+            if not "is_public" in pl_record:
+                pl_record["is_public"] = True
+
+        return pl_record
+
+  
+                 
+                 
+    def AddSliceToNodes(self,  slice_name, added_nodes, slice_user=None):
+        print>>sys.stderr, "\r\n \r\n AddSliceToNodes  slice_name %s added_nodes %s username %s" %(slice_name,added_nodes,slice_user )
+        site_list = []
+        nodeid_list =[]
+        resource = ""
+        reqdict = {}
+        reqdict['property'] ="network_address in ("
+        for node in added_nodes:
+            #Get the ID of the node : remove the root auth and put the site in a separate list
+            tmp = node.strip(self.root_auth+".")
+            l = tmp.split("_")
+             
+            nodeid= (l[len(l)-1]) 
+            reqdict['property'] += "'"+ nodeid +"', "
+            nodeid_list.append(nodeid)
+            site_list.append( l[0] )
+            
+        reqdict['property'] =  reqdict['property'][0: len( reqdict['property'])-2] +")"
+        reqdict['resource'] ="network_address="+ str(len(nodeid_list))
+        reqdict['resource']+= ",walltime=" + str(00) + ":" + str(05) + ":" + str(00)
+        reqdict['script_path'] = "/bin/sleep "
+
+        print>>sys.stderr, "\r\n \r\n AddSliceToNodes reqdict   %s \r\n site_list   %s"  %(reqdict,site_list)   
+        #OAR = OARrestapi()
+        #answer = OAR.POSTRequestToOARRestAPI('POST_job',reqdict,slice_user)
+        #print>>sys.stderr, "\r\n \r\n AddSliceToNodes jobid   %s "  %(answer)
+        #self.db.update('slice',['oar_job_id'], [answer['id']], 'slice_hrn', slice_name)
+        return 
+    
+
+        
+        
+    def DeleteSliceFromNodes(self, slice_name, deleted_nodes):
+        return   
+    
+
+    def fill_record_sfa_info(self, records):
+
+        def startswith(prefix, values):
+            return [value for value in values if value.startswith(prefix)]
+
+        # get person ids
+        person_ids = []
+        site_ids = []
+        for record in records:
+            person_ids.extend(record.get("person_ids", []))
+            site_ids.extend(record.get("site_ids", [])) 
+            if 'site_id' in record:
+                site_ids.append(record['site_id']) 
+               
+       #print>>sys.stderr, "\r\n \r\n _fill_record_sfa_info ___person_ids %s \r\n \t\t site_ids %s " %(person_ids, site_ids)
+       
+        # get all pis from the sites we've encountered
+        # and store them in a dictionary keyed on site_id 
+        site_pis = {}
+        if site_ids:
+            pi_filter = {'|roles': ['pi'], '|site_ids': site_ids} 
+            pi_list = self.GetPersons( pi_filter, ['person_id', 'site_ids'])
+           #print>>sys.stderr, "\r\n \r\n _fill_record_sfa_info ___ GetPersons ['person_id', 'site_ids'] pi_ilist %s" %(pi_list)
+
+            for pi in pi_list:
+                # we will need the pi's hrns also
+                person_ids.append(pi['person_id'])
+                
+                # we also need to keep track of the sites these pis
+                # belong to
+                for site_id in pi['site_ids']:
+                    if site_id in site_pis:
+                        site_pis[site_id].append(pi)
+                    else:
+                        site_pis[site_id] = [pi]
+                 
+        # get sfa records for all records associated with these records.   
+        # we'll replace pl ids (person_ids) with hrns from the sfa records
+        # we obtain
+        
+        # get the sfa records
+        table = SfaTable()
+        person_list, persons = [], {}
+        person_list = table.find({'type': 'user', 'pointer': person_ids})
+        # create a hrns keyed on the sfa record's pointer.
+        # Its possible for  multiple records to have the same pointer so
+        # the dict's value will be a list of hrns.
+        persons = defaultdict(list)
+        for person in person_list:
+            persons[person['pointer']].append(person)
+
+        # get the pl records
+        pl_person_list, pl_persons = [], {}
+        pl_person_list = self.GetPersons(person_ids, ['person_id', 'roles'])
+        pl_persons = list_to_dict(pl_person_list, 'person_id')
+        #print>>sys.stderr, "\r\n \r\n _fill_record_sfa_info ___  _list %s \r\n \t\t SenslabUsers.GetPersons ['person_id', 'roles'] pl_persons %s \r\n records %s" %(pl_person_list, pl_persons,records) 
+        # fill sfa info
+       
+        for record in records:
+            # skip records with no pl info (top level authorities)
+           #Sandrine 24 oct 11 2 lines
+            #if record['pointer'] == -1:
+                #continue 
+            sfa_info = {}
+            type = record['type']
+            if (type == "slice"):
+                # all slice users are researchers
+               #record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice')  ? besoin ou pas ?
+                record['PI'] = []
+                record['researcher'] = []
+               for person_id in record.get('person_ids', []):
+                        #Sandrine 24 oct 11 line
+                #for person_id in record['person_ids']:
+                    hrns = [person['hrn'] for person in persons[person_id]]
+                    record['researcher'].extend(hrns)                
+
+                # pis at the slice's site
+                pl_pis = site_pis[record['site_id']]
+                pi_ids = [pi['person_id'] for pi in pl_pis]
+                for person_id in pi_ids:
+                    hrns = [person['hrn'] for person in persons[person_id]]
+                    record['PI'].extend(hrns)
+                record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice')
+                record['geni_creator'] = record['PI'] 
+                
+            elif (type == "authority"):
+                record['PI'] = []
+                record['operator'] = []
+                record['owner'] = []
+                for pointer in record['person_ids']:
+                    if pointer not in persons or pointer not in pl_persons:
+                        # this means there is not sfa or pl record for this user
+                        continue   
+                    hrns = [person['hrn'] for person in persons[pointer]] 
+                    roles = pl_persons[pointer]['roles']   
+                    if 'pi' in roles:
+                        record['PI'].extend(hrns)
+                    if 'tech' in roles:
+                        record['operator'].extend(hrns)
+                    if 'admin' in roles:
+                        record['owner'].extend(hrns)
+                    # xxx TODO: OrganizationName
+            elif (type == "node"):
+                sfa_info['dns'] = record.get("hostname", "")
+                # xxx TODO: URI, LatLong, IP, DNS
+    
+            elif (type == "user"):
+                 sfa_info['email'] = record.get("email", "")
+                 sfa_info['geni_urn'] = hrn_to_urn(record['hrn'], 'user')
+                 sfa_info['geni_certificate'] = record['gid'] 
+                # xxx TODO: PostalAddress, Phone
+               
+            #print>>sys.stderr, "\r\n \r\rn \t\t \t <<<<<<<<<<<<<<<<<<<<<<<<  fill_record_sfa_info sfa_info %s  \r\n record %s : "%(sfa_info,record)  
+            record.update(sfa_info)
+            
+    def augment_records_with_testbed_info (self, sfa_records):
+        return self.fill_record_info (sfa_records)
+    
+    def fill_record_info(self, records):
+        """
+        Given a SFA record, fill in the senslab specific and SFA specific
+        fields in the record. 
+        """
+       print >>sys.stderr, "\r\n \t\t BEFORE fill_record_pl_info %s" %(records)        
+        if isinstance(records, list):
+            records = records[0]
+       #print >>sys.stderr, "\r\n \t\t BEFORE fill_record_pl_info %s" %(records)       
+        
+       
+        if records['type'] == 'slice':
+
+            sfatable = SfaTable()
+            recslice = self.db.find('slice',str(records['hrn']))
+            if isinstance(recslice,list) and len(recslice) == 1:
+                recslice = recslice[0]
+            recuser = sfatable.find(  recslice['record_id_user'], ['hrn'])
+            
+            print >>sys.stderr, "\r\n \t\t  SLABDRIVER.PY fill_record_info %s" %(recuser)
+            records['type']
+            if isinstance(recuser,list) and len(recuser) == 1:
+                recuser = recuser[0]             
+            records.update({'PI':[recuser['hrn']],
+            'researcher': [recuser['hrn']],
+            'name':records['hrn'], 'oar_job_id':recslice['oar_job_id'],
+            
+            'node_ids': [],
+            'person_ids':[recslice['record_id_user']]})
+
+        #self.fill_record_pl_info(records)
+       ##print >>sys.stderr, "\r\n \t\t after fill_record_pl_info %s" %(records)       
+        #self.fill_record_sfa_info(records)
+       #print >>sys.stderr, "\r\n \t\t after fill_record_sfa_info"
+       
+    def update_membership_list(self, oldRecord, record, listName, addFunc, delFunc):
+        # get a list of the HRNs tht are members of the old and new records
+        if oldRecord:
+            oldList = oldRecord.get(listName, [])
+        else:
+            oldList = []     
+        newList = record.get(listName, [])
+
+        # if the lists are the same, then we don't have to update anything
+        if (oldList == newList):
+            return
+
+        # build a list of the new person ids, by looking up each person to get
+        # their pointer
+        newIdList = []
+        table = SfaTable()
+        records = table.find({'type': 'user', 'hrn': newList})
+        for rec in records:
+            newIdList.append(rec['pointer'])
+
+        # build a list of the old person ids from the person_ids field 
+        if oldRecord:
+            oldIdList = oldRecord.get("person_ids", [])
+            containerId = oldRecord.get_pointer()
+        else:
+            # if oldRecord==None, then we are doing a Register, instead of an
+            # update.
+            oldIdList = []
+            containerId = record.get_pointer()
+
+    # add people who are in the new list, but not the oldList
+        for personId in newIdList:
+            if not (personId in oldIdList):
+                addFunc(self.plauth, personId, containerId)
+
+        # remove people who are in the old list, but not the new list
+        for personId in oldIdList:
+            if not (personId in newIdList):
+                delFunc(self.plauth, personId, containerId)
+
+    def update_membership(self, oldRecord, record):
+        print >>sys.stderr, " \r\n \r\n ***SLABDRIVER.PY update_membership record ", record
+        if record.type == "slice":
+            self.update_membership_list(oldRecord, record, 'researcher',
+                                        self.users.AddPersonToSlice,
+                                        self.users.DeletePersonFromSlice)
+        elif record.type == "authority":
+            # xxx TODO
+            pass
+
+### thierry
+# I don't think you plan on running a component manager at this point
+# let me clean up the mess of ComponentAPI that is deprecated anyways
diff --git a/sfa/senslab/slabpostgres.py b/sfa/senslab/slabpostgres.py
new file mode 100644 (file)
index 0000000..ac9f994
--- /dev/null
@@ -0,0 +1,276 @@
+import psycopg2
+import psycopg2.extensions
+psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
+# UNICODEARRAY not exported yet
+psycopg2.extensions.register_type(psycopg2._psycopg.UNICODEARRAY)
+from sfa.util.config import Config
+from sfa.util.table import SfaTable
+from sfa.util.sfalogging import logger
+# allow to run sfa2wsdl if this is missing (for mac)
+import sys
+try: import pgdb
+except: print >> sys.stderr, "WARNING, could not import pgdb"
+
+#Dict holding the columns names of the table as keys
+#and their type, used for creation of the table
+slice_table = {'record_id_user':'integer PRIMARY KEY references sfa ON DELETE CASCADE ON UPDATE CASCADE','oar_job_id':'integer DEFAULT -1',  'record_id_slice':'integer', 'slice_hrn':'text NOT NULL'}
+
+#Dict with all the specific senslab tables
+tablenames_dict = {'slice': slice_table}
+
+class SlabDB:
+    def __init__(self):
+        self.config = Config()
+        self.connection = None
+
+    def cursor(self):
+        if self.connection is None:
+            # (Re)initialize database connection
+            if psycopg2:
+                try:
+                    # Try UNIX socket first                    
+                    self.connection = psycopg2.connect(user = 'sfa',
+                                                       password = 'sfa',
+                                                       database = 'sfa')
+                    #self.connection = psycopg2.connect(user = self.config.SFA_PLC_DB_USER,
+                                                       #password = self.config.SFA_PLC_DB_PASSWORD,
+                                                       #database = self.config.SFA_PLC_DB_NAME)
+                except psycopg2.OperationalError:
+                    # Fall back on TCP
+                    self.connection = psycopg2.connect(user = self.config.SFA_PLC_DB_USER,
+                                                       password = self.config.SFA_PLC_DB_PASSWORD,
+                                                       database = self.config.SFA_PLC_DB_NAME,
+                                                       host = self.config.SFA_PLC_DB_HOST,
+                                                       port = self.config.SFA_PLC_DB_PORT)
+                self.connection.set_client_encoding("UNICODE")
+            else:
+                self.connection = pgdb.connect(user = self.config.SFA_PLC_DB_USER,
+                                               password = self.config.SFA_PLC_DB_PASSWORD,
+                                               host = "%s:%d" % (self.config.SFA_PLC_DB_HOST, self.config.SFA_PLC_DB_PORT),
+                                               database = self.config.SFA_PLC_DB_NAME)
+
+        return self.connection.cursor()
+        
+    #Close connection to database
+    def close(self):
+        if self.connection is not None:
+            self.connection.close()
+            self.connection = None
+            
+    def selectall(self, query,  hashref = True, key_field = None):
+        """
+        Return each row as a dictionary keyed on field name (like DBI
+        selectrow_hashref()). If key_field is specified, return rows
+        as a dictionary keyed on the specified field (like DBI
+        selectall_hashref()).
+
+        """
+        cursor = self.cursor()
+        cursor.execute(query)
+        rows = cursor.fetchall()
+        cursor.close()
+        self.connection.commit()
+
+        if hashref or key_field is not None:
+            # Return each row as a dictionary keyed on field name
+            # (like DBI selectrow_hashref()).
+            labels = [column[0] for column in cursor.description]
+            rows = [dict(zip(labels, row)) for row in rows]
+
+        if key_field is not None and key_field in labels:
+            # Return rows as a dictionary keyed on the specified field
+            # (like DBI selectall_hashref()).
+            return dict([(row[key_field], row) for row in rows])
+        else:
+            return rows
+        
+        
+    def exists(self, tablename):
+        """
+        Checks if the table specified as tablename exists.
+    
+        """
+        #mark = self.cursor()
+        sql = "SELECT * from pg_tables"
+        #mark.execute(sql)
+        #rows = mark.fetchall()
+        #mark.close()
+        #labels = [column[0] for column in mark.description]
+        #rows = [dict(zip(labels, row)) for row in rows]
+        rows = self.selectall(sql)
+        rows = filter(lambda row: row['tablename'].startswith(tablename), rows)
+        if rows:
+            return True
+        return False
+    
+    def createtable(self, tablename ):
+        """
+        Creates the specifed table. Uses the global dictionnary holding the tablenames and
+        the table schema.
+    
+        """
+        mark = self.cursor()
+        tablelist =[]
+        if tablename not in tablenames_dict:
+            logger.error("Tablename unknown - creation failed")
+            return
+            
+        T  = tablenames_dict[tablename]
+        
+        for k in T.keys(): 
+            tmp = str(k) +' ' + T[k]
+            tablelist.append(tmp)
+            
+        end_of_statement = ",".join(tablelist)
+        
+        statement = "CREATE TABLE " + tablename + " ("+ end_of_statement +");"
+     
+        #template = "CREATE INDEX %s_%s_idx ON %s (%s);"
+        #indexes = [template % ( self.tablename, field, self.tablename, field) \
+                    #for field in ['hrn', 'type', 'authority', 'peer_authority', 'pointer']]
+        # IF EXISTS doenst exist in postgres < 8.2
+        try:
+            mark.execute('DROP TABLE IF EXISTS ' + tablename +';')
+        except:
+            try:
+                mark.execute('DROP TABLE' + tablename +';')
+            except:
+                pass
+            
+        mark.execute(statement)
+        #for index in indexes:
+            #self.db.do(index)
+        self.connection.commit()
+        mark.close()
+        self.close()
+        return
+    
+
+
+
+    def insert(self, table, columns,values):
+        """
+        Inserts data (values) into the columns of the specified table. 
+    
+        """
+        mark = self.cursor()
+        statement = "INSERT INTO " + table + \
+                    "(" + ",".join(columns) + ") " + \
+                    "VALUES(" + ", ".join(values) + ");"
+
+        mark.execute(statement) 
+        self.connection.commit()
+        mark.close()
+        self.close()
+        return
+    
+    def insert_slab_slice(self, person_rec):
+        """
+        Inserts information about a user and his slice into the slice table. 
+    
+        """
+        sfatable = SfaTable()
+        keys = slice_table.keys()
+        
+        #returns a list of records from the sfa table (dicts)
+        #the filters specified will return only one matching record, into a list of dicts
+        #Finds the slice associated with the user (Senslabs slices  hrns contains the user hrn)
+
+        userrecord = sfatable.find({'hrn': person_rec['hrn'], 'type':'user'})
+        slicerec =  sfatable.find({'hrn': person_rec['hrn']+'_slice', 'type':'slice'})
+        if slicerec :
+            if (isinstance (userrecord, list)):
+                userrecord = userrecord[0]
+            if (isinstance (slicerec, list)):
+                slicerec = slicerec[0]
+                
+            oar_dflt_jobid = -1
+            values = [ str(oar_dflt_jobid), ' \''+ str(slicerec['hrn']) + '\'', str(userrecord['record_id']), str( slicerec['record_id'])]
+    
+            self.insert('slice', keys, values)
+        else :
+            logger.error("Trying to import a not senslab slice")
+        return
+        
+        
+    def update(self, table, column_names, values, whereclause, valueclause):
+        """
+        Updates a record in a given table. 
+    
+        """
+        #Creates the values string for the update SQL command
+        vclause = valueclause
+        if len(column_names) is not len(values):
+            return
+        else:
+            valueslist = []
+            valuesdict = dict(zip(column_names,values))
+            for k in valuesdict.keys():
+                valuesdict[k] = str(valuesdict[k])
+                #v = ' \''+ str(k) + '\''+ '='+' \''+ valuesdict[k]+'\''
+                v = str(k) + '=' + valuesdict[k]
+                valueslist.append(v)
+        if isinstance(vclause,str):
+            vclause = '\''+ vclause + '\''
+        statement = "UPDATE %s SET %s WHERE %s = %s" % \
+                    (table, ", ".join(valueslist), whereclause, vclause)
+        print>>sys.stderr,"\r\n \r\n SLABPOSTGRES.PY update statement %s valuesdict %s valueslist %s" %(statement,valuesdict,valueslist)
+        mark = self.cursor()
+        mark.execute(statement) 
+        self.connection.commit()
+        mark.close()
+        self.close()
+
+        return
+
+    def update_senslab_slice(self, slice_rec):
+        sfatable = SfaTable()
+        userhrn = slice_rec['hrn'].strip('_slice')
+        userrecord = sfatable.find({'hrn': userhrn, 'type':'user'})
+        if (isinstance (userrecord, list)):
+                userrecord = userrecord[0]
+        columns = [ 'record_user_id', 'oar_job_id']
+        values = [slice_rec['record_user_id'],slice_rec['oar_job_id']]
+        self.update('slice',columns, values,'record_slice_id', slice_rec['record_slice_id'])
+        return 
+        
+       
+    def find(self, tablename,record_filter = None, columns=None):
+        if not columns:
+            columns = "*"
+        else:
+            columns = ",".join(columns)
+        sql = "SELECT %s FROM %s WHERE True " % (columns, tablename)
+        
+        #if isinstance(record_filter, (list, tuple, set)):
+            #ints = filter(lambda x: isinstance(x, (int, long)), record_filter)
+            #strs = filter(lambda x: isinstance(x, StringTypes), record_filter)
+            #record_filter = Filter(SfaRecord.all_fields, {'record_id': ints, 'hrn': strs})
+            #sql += "AND (%s) %s " % record_filter.sql("OR") 
+        #elif isinstance(record_filter, dict):
+            #record_filter = Filter(SfaRecord.all_fields, record_filter)        
+            #sql += " AND (%s) %s" % record_filter.sql("AND")
+        #elif isinstance(record_filter, StringTypes):
+            #record_filter = Filter(SfaRecord.all_fields, {'hrn':[record_filter]})    
+            #sql += " AND (%s) %s" % record_filter.sql("AND")
+        #elif isinstance(record_filter, int):
+            #record_filter = Filter(SfaRecord.all_fields, {'record_id':[record_filter]})    
+            #sql += " AND (%s) %s" % record_filter.sql("AND")
+       
+        if isinstance(record_filter, dict):
+            for k in record_filter.keys():
+                sql += "AND "+' \''+ str(k) + '\''+ '='+' \''+ str(record_filter[k])+'\''
+            
+        elif isinstance(record_filter, str):
+            sql += "AND slice_hrn ="+ ' \''+record_filter+'\''
+
+        #elif isinstance(record_filter, int):
+            #record_filter = Filter(SfaRecord.all_fields, {'record_id':[record_filter]})    
+            #sql += " AND (%s) %s" % record_filter.sql("AND")
+        sql +=  ";"
+        print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES.PY find : sql %s record_filter  %s %s" %(sql, record_filter , type(record_filter))
+        results = self.selectall(sql)
+        if isinstance(results, dict):
+            results = [results]
+        return results
+       
diff --git a/sfa/senslab/slabslices.py b/sfa/senslab/slabslices.py
new file mode 100644 (file)
index 0000000..9e547e4
--- /dev/null
@@ -0,0 +1,667 @@
+from types import StringTypes
+from collections import defaultdict
+import sys
+from sfa.util.xrn import get_leaf, get_authority, urn_to_hrn
+from sfa.util.plxrn import hrn_to_pl_slicename
+from sfa.util.policy import Policy
+from sfa.rspecs.rspec import RSpec
+from sfa.plc.vlink import VLink
+from sfa.util.xrn import Xrn
+
+MAXINT =  2L**31-1
+
+class SlabSlices:
+
+    rspec_to_slice_tag = {'max_rate':'net_max_rate'}
+
+    def __init__(self, api, ttl = .5, origin_hrn=None):
+        self.api = api
+        #filepath = path + os.sep + filename
+        self.policy = Policy(self.api)    
+        self.origin_hrn = origin_hrn
+        self.registry = api.registries[api.hrn]
+        self.credential = api.getCredential()
+        self.nodes = []
+        self.persons = []
+
+    #def get_slivers(self, xrn, node=None):
+        #hrn, type = urn_to_hrn(xrn)
+         
+        #slice_name = hrn_to_pl_slicename(hrn)
+        ## XX Should we just call PLCAPI.GetSliceTicket(slice_name) instead
+        ## of doing all of this?
+        ##return self.api.driver.GetSliceTicket(self.auth, slice_name) 
+        
+        ## from PLCAPI.GetSlivers.get_slivers()
+        #slice_fields = ['slice_id', 'name', 'instantiation', 'expires', 'person_ids', 'slice_tag_ids']
+        #slices = self.api.driver.GetSlices(slice_name, slice_fields)
+        ## Build up list of users and slice attributes
+        #person_ids = set()
+        #all_slice_tag_ids = set()
+        #for slice in slices:
+            #person_ids.update(slice['person_ids'])
+            #all_slice_tag_ids.update(slice['slice_tag_ids'])
+        #person_ids = list(person_ids)
+        #all_slice_tag_ids = list(all_slice_tag_ids)
+        ## Get user information
+        #all_persons_list = self.api.driver.GetPersons({'person_id':person_ids,'enabled':True}, ['person_id', 'enabled', 'key_ids'])
+        #all_persons = {}
+        #for person in all_persons_list:
+            #all_persons[person['person_id']] = person        
+
+        ## Build up list of keys
+        #key_ids = set()
+        #for person in all_persons.values():
+            #key_ids.update(person['key_ids'])
+        #key_ids = list(key_ids)
+        ## Get user account keys
+        #all_keys_list = self.api.driver.GetKeys(key_ids, ['key_id', 'key', 'key_type'])
+        #all_keys = {}
+        #for key in all_keys_list:
+            #all_keys[key['key_id']] = key
+        ## Get slice attributes
+        #all_slice_tags_list = self.api.driver.GetSliceTags(all_slice_tag_ids)
+        #all_slice_tags = {}
+        #for slice_tag in all_slice_tags_list:
+            #all_slice_tags[slice_tag['slice_tag_id']] = slice_tag
+           
+        #slivers = []
+        #for slice in slices:
+            #keys = []
+            #for person_id in slice['person_ids']:
+                #if person_id in all_persons:
+                    #person = all_persons[person_id]
+                    #if not person['enabled']:
+                        #continue
+                    #for key_id in person['key_ids']:
+                        #if key_id in all_keys:
+                            #key = all_keys[key_id]
+                            #keys += [{'key_type': key['key_type'],
+                                    #'key': key['key']}]
+            #attributes = []
+            ## All (per-node and global) attributes for this slice
+            #slice_tags = []
+            #for slice_tag_id in slice['slice_tag_ids']:
+                #if slice_tag_id in all_slice_tags:
+                    #slice_tags.append(all_slice_tags[slice_tag_id]) 
+            ## Per-node sliver attributes take precedence over global
+            ## slice attributes, so set them first.
+            ## Then comes nodegroup slice attributes
+            ## Followed by global slice attributes
+            #sliver_attributes = []
+
+            #if node is not None:
+                #for sliver_attribute in filter(lambda a: a['node_id'] == node['node_id'], slice_tags):
+                    #sliver_attributes.append(sliver_attribute['tagname'])
+                    #attributes.append({'tagname': sliver_attribute['tagname'],
+                                    #'value': sliver_attribute['value']})
+
+            ## set nodegroup slice attributes
+            #for slice_tag in filter(lambda a: a['nodegroup_id'] in node['nodegroup_ids'], slice_tags):
+                ## Do not set any nodegroup slice attributes for
+                ## which there is at least one sliver attribute
+                ## already set.
+                #if slice_tag not in slice_tags:
+                    #attributes.append({'tagname': slice_tag['tagname'],
+                        #'value': slice_tag['value']})
+
+            #for slice_tag in filter(lambda a: a['node_id'] is None, slice_tags):
+                ## Do not set any global slice attributes for
+                ## which there is at least one sliver attribute
+                ## already set.
+                #if slice_tag['tagname'] not in sliver_attributes:
+                    #attributes.append({'tagname': slice_tag['tagname'],
+                                   #'value': slice_tag['value']})
+
+            ## XXX Sanity check; though technically this should be a system invariant
+            ## checked with an assertion
+            #if slice['expires'] > MAXINT:  slice['expires']= MAXINT
+            
+            #slivers.append({
+                #'hrn': hrn,
+                #'name': slice['name'],
+                #'slice_id': slice['slice_id'],
+                #'instantiation': slice['instantiation'],
+                #'expires': slice['expires'],
+                #'keys': keys,
+                #'attributes': attributes
+            #})
+
+        #return slivers
+    def get_peer(self, xrn):
+        hrn, type = urn_to_hrn(xrn)
+        # Becaues of myplc federation,  we first need to determine if this
+        # slice belongs to out local plc or a myplc peer. We will assume it 
+        # is a local site, unless we find out otherwise  
+        peer = None
+        print>>sys.stderr, " \r\n \r\n \t slices.py get_peer slice_authority  "
+        # get this slice's authority (site)
+        slice_authority = get_authority(hrn)
+
+        # get this site's authority (sfa root authority or sub authority)
+        site_authority = get_authority(slice_authority).lower()
+        print>>sys.stderr, " \r\n \r\n \t slices.py get_peer slice_authority  %s site_authority %s" %(slice_authority,site_authority) 
+        # check if we are already peered with this site_authority, if so
+        #peers = self.api.driver.GetPeers({}, ['peer_id', 'peername', 'shortname', 'hrn_root'])
+        #for peer_record in peers:
+            #names = [name.lower() for name in peer_record.values() if isinstance(name, StringTypes)]
+            #if site_authority in names:
+                #peer = peer_record
+
+        return peer
+
+    def get_sfa_peer(self, xrn):
+        hrn, type = urn_to_hrn(xrn)
+
+        # return the authority for this hrn or None if we are the authority
+        sfa_peer = None
+        slice_authority = get_authority(hrn)
+        site_authority = get_authority(slice_authority)
+
+        if site_authority != self.api.hrn:
+            sfa_peer = site_authority
+
+        return sfa_peer
+
+    def verify_slice_nodes(self, slice, requested_slivers, peer):
+        current_slivers = []
+        deleted_nodes = []
+        if slice['node_ids']:
+            nodes = self.api.driver.GetNodes(slice['node_ids'], ['hostname'])
+            current_slivers = [node['hostname'] for node in nodes]
+    
+            # remove nodes not in rspec
+            deleted_nodes = list(set(current_slivers).difference(requested_slivers))
+    
+        # add nodes from rspec
+        added_nodes = list(set(requested_slivers).difference(current_slivers))        
+        print>>sys.stderr , "\r\n \r\n \t slices.py  verify_slice_nodes added_nodes %s slice %s" %( added_nodes,slice)
+        try:
+            if peer:
+                self.api.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
+            #PI is a list, get the only username in this list
+            #so that the OAR/LDAP knows the user: remove the authority from the name
+            tmp=  slice['PI'][0].split(".")
+            username = tmp[(len(tmp)-1)]
+            self.api.driver.AddSliceToNodes(slice['name'], added_nodes, username)
+            if deleted_nodes:
+                self.api.driver.DeleteSliceFromNodes(slice['name'], deleted_nodes)
+
+        except: 
+            self.api.logger.log_exc('Failed to add/remove slice from nodes')
+
+    def free_egre_key(self):
+        used = set()
+        for tag in self.api.driver.GetSliceTags({'tagname': 'egre_key'}):
+                used.add(int(tag['value']))
+
+        for i in range(1, 256):
+            if i not in used:
+                key = i
+                break
+        else:
+            raise KeyError("No more EGRE keys available")
+
+        return str(key)
+
+    def verify_slice_links(self, slice, links, aggregate):
+
+            return
+
+       
+                        
+        
+
+    def handle_peer(self, site, slice, persons, peer):
+        if peer:
+            # bind site
+            try:
+                if site:
+                    self.api.driver.BindObjectToPeer('site', site['site_id'], peer['shortname'], slice['site_id'])
+            except Exception,e:
+                self.api.driver.DeleteSite(site['site_id'])
+                raise e
+            
+            # bind slice
+            try:
+                if slice:
+                    self.api.driver.BindObjectToPeer('slice', slice['slice_id'], peer['shortname'], slice['slice_id'])
+            except Exception,e:
+                self.api.driver.DeleteSlice(slice['slice_id'])
+                raise e 
+
+            # bind persons
+            for person in persons:
+                try:
+                    self.api.driver.BindObjectToPeer('person', 
+                                                     person['person_id'], peer['shortname'], person['peer_person_id'])
+
+                    for (key, remote_key_id) in zip(person['keys'], person['key_ids']):
+                        try:
+                            self.api.driver.BindObjectToPeer( 'key', key['key_id'], peer['shortname'], remote_key_id)
+                        except:
+                            self.api.driver.DeleteKey(key['key_id'])
+                            self.api.logger("failed to bind key: %s to peer: %s " % (key['key_id'], peer['shortname']))
+                except Exception,e:
+                    self.api.driver.DeletePerson(person['person_id'])
+                    raise e       
+
+        return slice
+
+    def verify_site(self, slice_xrn, slice_record={}, peer=None, sfa_peer=None):
+        (slice_hrn, type) = urn_to_hrn(slice_xrn)
+        site_hrn = get_authority(slice_hrn)
+        # login base can't be longer than 20 characters
+        slicename = hrn_to_pl_slicename(slice_hrn)
+        authority_name = slicename.split('_')[0]
+        login_base = authority_name[:20]
+        sites = self.api.driver.GetSites(login_base)
+        if not sites:
+            # create new site record
+            site = {'name': 'geni.%s' % authority_name,
+                    'abbreviated_name': authority_name,
+                    'login_base': login_base,
+                    'max_slices': 100,
+                    'max_slivers': 1000,
+                    'enabled': True,
+                    'peer_site_id': None}
+            if peer:
+                site['peer_site_id'] = slice_record.get('site_id', None)
+            site['site_id'] = self.api.driver.AddSite(site)
+            # exempt federated sites from monitor policies
+            self.api.driver.AddSiteTag(site['site_id'], 'exempt_site_until', "20200101")
+            
+            # is this still necessary?
+            # add record to the local registry 
+            if sfa_peer and slice_record:
+                peer_dict = {'type': 'authority', 'hrn': site_hrn, \
+                             'peer_authority': sfa_peer, 'pointer': site['site_id']}
+                self.registry.register_peer_object(self.credential, peer_dict)
+        else:
+            site =  sites[0]
+            if peer:
+                # unbind from peer so we can modify if necessary. Will bind back later
+                self.api.driver.UnBindObjectFromPeer('site', site['site_id'], peer['shortname']) 
+        
+        return site        
+
+    def verify_slice(self, slice_hrn, slice_record, peer, sfa_peer):
+        #slicename = hrn_to_pl_slicename(slice_hrn)
+        parts = hrn_to_pl_slicename(slice_hrn).split("_")
+        login_base = parts[0]
+        slicename = slice_hrn
+        slices = self.api.driver.GetSlices([slicename]) 
+        print>>sys.stderr, " \r\n \r\rn Slices.py verify_slice slicename %s slices %s slice_record %s"%(slicename ,slices, slice_record)
+        if not slices:
+            slice = {'name': slicename,
+                     'url': slice_record.get('url', slice_hrn), 
+                     #'description': slice_record.get('description', slice_hrn)
+                     }
+            # add the slice                          
+            slice['slice_id'] = self.api.driver.AddSlice(slice)
+            slice['node_ids'] = []
+            slice['person_ids'] = []
+            if peer:
+                slice['peer_slice_id'] = slice_record.get('slice_id', None) 
+            # mark this slice as an sfa peer record
+            if sfa_peer:
+                peer_dict = {'type': 'slice', 'hrn': slice_hrn, 
+                             'peer_authority': sfa_peer, 'pointer': slice['slice_id']}
+                self.registry.register_peer_object(self.credential, peer_dict)
+        else:
+            slice = slices[0]
+            slice.update(slice_record)
+            del slice['last_updated']
+            del slice['date_created']
+            if peer:
+                slice['peer_slice_id'] = slice_record.get('slice_id', None)
+                # unbind from peer so we can modify if necessary. Will bind back later
+                self.api.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
+               #Update existing record (e.g. expires field) it with the latest info.
+            #if slice_record and slice['expires'] != slice_record['expires']:
+                #self.api.driver.UpdateSlice( slice['slice_id'], {'expires' : slice_record['expires']})
+       
+        return slice
+
+    #def get_existing_persons(self, users):
+    def verify_persons(self, slice_hrn, slice_record, users, append=True):
+        users_by_id = {}
+        users_by_hrn = {}
+        users_dict = {}
+      
+        for user in users:
+            if 'person_id' in user and 'hrn' in user:
+                users_by_id[user['person_id']] = user
+                users_dict[user['person_id']] = {'person_id':user['person_id'], 'hrn':user['hrn']}
+           
+                #hrn, type = urn_to_hrn(user['urn'])
+                #username = get_leaf(hrn) 
+                #login_base = get_leaf(get_authority(user['urn']))
+                #user['username'] = username 
+                #users_by_site[login_base].append(user)
+                users_by_hrn[user['hrn']] = user
+                users_dict[user['hrn']] = {'person_id':user['person_id'], 'hrn':user['hrn']}
+       
+        existing_user_ids = []
+        existing_users= []
+        if users_by_hrn:
+            # get existing users by email 
+           
+            existing_users = self.api.driver.GetPersons({'hrn': users_by_hrn.keys()}, 
+                                                        ['hrn'])
+            #print>>sys.stderr, " \r\n \r\n \t slices.py HEEEEEEEEY===========verify_person  existing_users %s users_dict %s  " %(existing_users, users_dict) 
+            #existing_user_ids = [(users_dict[user['hrn']]['hrn'],users_dict[user['hrn']]['person_id'] ) for user in existing_users]
+            for user in existing_users :
+                for  k in users_dict[user['hrn']] :
+                    existing_user_ids.append (users_dict[user['hrn']][k])
+
+            #print>>sys.stderr, " \r\n \r\n slices.py verify_person   existing_user_ids %s " %(existing_user_ids)
+        #if users_by_id:
+            #existing_user_ids.extend([user for user in users_by_id])
+        #if users_by_site:
+            ## get a list of user sites (based on requeste user urns
+            #site_list = self.api.driver.GetSites(users_by_site.keys(), \
+                #['site_id', 'login_base', 'person_ids'])
+            #sites = {}
+            #site_user_ids = []
+            
+            ## get all existing users at these sites
+            #for site in site_list:
+                #sites[site['site_id']] = site
+                #site_user_ids.extend(site['person_ids'])
+
+            #existing_site_persons_list = self.api.driver.GetPersons(site_user_ids,  
+                                                                    #['person_id', 'key_ids', 'email', 'site_ids'])
+
+            ## all requested users are either existing users or new (added) users      
+            #for login_base in users_by_site:
+                #requested_site_users = users_by_site[login_base]
+                #for requested_user in requested_site_users:
+                    #user_found = False
+                    #for existing_user in existing_site_persons_list:
+                        #for site_id in existing_user['site_ids']:
+                            #site = sites[site_id]
+                            #if login_base == site['login_base'] and \
+                               #existing_user['email'].startswith(requested_user['username']):
+                                #existing_user_ids.append(existing_user['email'])
+                                #users_dict[existing_user['email']] = requested_user
+                                #user_found = True
+                                #break
+                        #if user_found:
+                            #break
+      
+                    #if user_found == False:
+                        #fake_email = requested_user['username'] + '@geni.net'
+                        #users_dict[fake_email] = requested_user
+                
+
+        # requested slice users        
+        requested_user_ids = users_dict.keys()
+        # existing slice users
+        existing_slice_users_filter = {'hrn': slice_record.get('PI', [])}
+        #print>>sys.stderr, " \r\n \r\n slices.py verify_person requested_user_ids %s existing_slice_users_filter %s slice_record %s" %(requested_user_ids,existing_slice_users_filter,slice_record)
+        
+        existing_slice_users = self.api.driver.GetPersons(existing_slice_users_filter,['hrn'])
+        existing_slice_user_ids = []
+        for user in existing_slice_users :
+            for  k in users_dict[user['hrn']] :
+                    existing_slice_user_ids.append (users_dict[user['hrn']][k])
+                    #existing_slice_user_ids = [user['hrn'] for user in existing_slice_users]
+                    
+        #print>>sys.stderr, " \r\n \r\n slices.py verify_person requested_user_ids %s  existing_slice_user_ids%s " %(requested_user_ids,existing_slice_user_ids)
+        # users to be added, removed or updated
+        added_user_ids = set(requested_user_ids).difference(set(existing_user_ids))
+        added_slice_user_ids = set(requested_user_ids).difference(existing_slice_user_ids)
+        removed_user_ids = set(existing_slice_user_ids).difference(requested_user_ids)
+        #print>>sys.stderr, " \r\n \r\n slices.py verify_persons  existing_slice_user_ids %s  requested_user_ids %s " %(existing_slice_user_ids,requested_user_ids)
+        updated_user_ids = set(existing_slice_user_ids).intersection(requested_user_ids)
+        #print>>sys.stderr, " \r\n \r\n slices.py verify_persons  added_user_ids %s added_slice_user_ids %s " %(added_user_ids,added_slice_user_ids)
+        #print>>sys.stderr, " \r\n \r\n slices.py verify_persons  removed_user_ids %s updated_user_ids %s " %(removed_user_ids,updated_user_ids)
+        # Remove stale users (only if we are not appending).
+        if append == False:
+            for removed_user_id in removed_user_ids:
+                self.api.driver.DeletePersonFromSlice(removed_user_id, slice_record['name'])
+        # update_existing users
+        updated_users_list = [user for user in existing_slice_users if user['hrn'] in \
+          updated_user_ids]
+        #self.verify_keys(existing_slice_users, updated_users_list, peer, append)
+
+        added_persons = []
+        # add new users
+        for added_user_id in added_user_ids:
+            added_user = users_dict[added_user_id]
+            #hrn, type = urn_to_hrn(added_user['urn'])  
+            person = {
+                #'first_name': added_user.get('first_name', hrn),
+                #'last_name': added_user.get('last_name', hrn),
+                'person_id': added_user_id,
+                #'peer_person_id': None,
+                #'keys': [],
+                #'key_ids': added_user.get('key_ids', []),
+                
+            } 
+            #print>>sys.stderr, " \r\n \r\n slices.py verify_persons   added_user_ids %s " %(added_user_ids)
+            person['person_id'] = self.api.driver.AddPerson(person)
+            if peer:
+                person['peer_person_id'] = added_user['person_id']
+            added_persons.append(person)
+           
+            # enable the account 
+            self.api.driver.UpdatePerson(person['person_id'], {'enabled': True})
+            
+            # add person to site
+            #self.api.driver.AddPersonToSite(added_user_id, login_base)
+
+            #for key_string in added_user.get('keys', []):
+                #key = {'key':key_string, 'key_type':'ssh'}
+                #key['key_id'] = self.api.driver.AddPersonKey(person['person_id'], key)
+                #person['keys'].append(key)
+
+            # add the registry record
+            #if sfa_peer:
+                #peer_dict = {'type': 'user', 'hrn': hrn, 'peer_authority': sfa_peer, \
+                    #'pointer': person['person_id']}
+                #self.registry.register_peer_object(self.credential, peer_dict)
+    
+        for added_slice_user_id in added_slice_user_ids.union(added_user_ids):
+            # add person to the slice 
+            self.api.driver.AddPersonToSlice(added_slice_user_id, slice_record['name'])
+            # if this is a peer record then it should already be bound to a peer.
+            # no need to return worry about it getting bound later 
+
+        return added_persons
+            
+
+    def verify_keys(self, persons, users, peer, append=True):
+        # existing keys 
+        key_ids = []
+        for person in persons:
+            key_ids.extend(person['key_ids'])
+        keylist = self.api.driver.GetKeys(key_ids, ['key_id', 'key'])
+        keydict = {}
+        for key in keylist:
+            keydict[key['key']] = key['key_id']     
+        existing_keys = keydict.keys()
+        persondict = {}
+        for person in persons:
+            persondict[person['email']] = person    
+    
+        # add new keys
+        requested_keys = []
+        updated_persons = []
+        for user in users:
+            user_keys = user.get('keys', [])
+            updated_persons.append(user)
+            for key_string in user_keys:
+                requested_keys.append(key_string)
+                if key_string not in existing_keys:
+                    key = {'key': key_string, 'key_type': 'ssh'}
+                    try:
+                        if peer:
+                            person = persondict[user['email']]
+                            self.api.driver.UnBindObjectFromPeer('person', person['person_id'], peer['shortname'])
+                        key['key_id'] = self.api.driver.AddPersonKey(user['email'], key)
+                        if peer:
+                            key_index = user_keys.index(key['key'])
+                            remote_key_id = user['key_ids'][key_index]
+                            self.api.driver.BindObjectToPeer('key', key['key_id'], peer['shortname'], remote_key_id)
+                            
+                    finally:
+                        if peer:
+                            self.api.driver.BindObjectToPeer('person', person['person_id'], peer['shortname'], user['person_id'])
+        
+        # remove old keys (only if we are not appending)
+        if append == False: 
+            removed_keys = set(existing_keys).difference(requested_keys)
+            for existing_key_id in keydict:
+                if keydict[existing_key_id] in removed_keys:
+                    try:
+                        if peer:
+                            self.api.driver.UnBindObjectFromPeer('key', existing_key_id, peer['shortname'])
+                        self.api.driver.DeleteKey(existing_key_id)
+                    except:
+                        pass   
+
+    def verify_slice_attributes(self, slice, requested_slice_attributes, append=False, admin=False):
+        # get list of attributes users ar able to manage
+        filter = {'category': '*slice*'}
+        if not admin:
+            filter['|roles'] = ['user']
+        slice_attributes = self.api.driver.GetTagTypes(filter)
+        valid_slice_attribute_names = [attribute['tagname'] for attribute in slice_attributes]
+
+        # get sliver attributes
+        added_slice_attributes = []
+        removed_slice_attributes = []
+        ignored_slice_attribute_names = []
+        existing_slice_attributes = self.api.driver.GetSliceTags({'slice_id': slice['slice_id']})
+
+        # get attributes that should be removed
+        for slice_tag in existing_slice_attributes:
+            if slice_tag['tagname'] in ignored_slice_attribute_names:
+                # If a slice already has a admin only role it was probably given to them by an
+                # admin, so we should ignore it.
+                ignored_slice_attribute_names.append(slice_tag['tagname'])
+            else:
+                # If an existing slice attribute was not found in the request it should
+                # be removed
+                attribute_found=False
+                for requested_attribute in requested_slice_attributes:
+                    if requested_attribute['name'] == slice_tag['tagname'] and \
+                       requested_attribute['value'] == slice_tag['value']:
+                        attribute_found=True
+                        break
+
+            if not attribute_found and not append:
+                removed_slice_attributes.append(slice_tag)
+        
+        # get attributes that should be added:
+        for requested_attribute in requested_slice_attributes:
+            # if the requested attribute wasn't found  we should add it
+            if requested_attribute['name'] in valid_slice_attribute_names:
+                attribute_found = False
+                for existing_attribute in existing_slice_attributes:
+                    if requested_attribute['name'] == existing_attribute['tagname'] and \
+                       requested_attribute['value'] == existing_attribute['value']:
+                        attribute_found=True
+                        break
+                if not attribute_found:
+                    added_slice_attributes.append(requested_attribute)
+
+
+        # remove stale attributes
+        for attribute in removed_slice_attributes:
+            try:
+                self.api.driver.DeleteSliceTag(attribute['slice_tag_id'])
+            except Exception, e:
+                self.api.logger.warn('Failed to remove sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
+                                % (name, value,  node_id, str(e)))
+
+        # add requested_attributes
+        for attribute in added_slice_attributes:
+            try:
+                self.api.driver.AddSliceTag(slice['name'], attribute['name'], attribute['value'], attribute.get('node_id', None))
+            except Exception, e:
+                self.api.logger.warn('Failed to add sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
+                                % (name, value,  node_id, str(e)))
+
+    def create_slice_aggregate(self, xrn, rspec):
+        hrn, type = urn_to_hrn(xrn)
+        # Determine if this is a peer slice
+        peer = self.get_peer(hrn)
+        sfa_peer = self.get_sfa_peer(hrn)
+
+        spec = RSpec(rspec)
+        # Get the slice record from sfa
+        slicename = hrn_to_pl_slicename(hrn) 
+        slice = {}
+        slice_record = None
+        registry = self.api.registries[self.api.hrn]
+        credential = self.api.getCredential()
+
+        site_id, remote_site_id = self.verify_site(registry, credential, hrn, peer, sfa_peer)
+        slice = self.verify_slice(registry, credential, hrn, site_id, remote_site_id, peer, sfa_peer)
+
+        # find out where this slice is currently running
+        nodelist = self.api.driver.GetNodes(slice['node_ids'], ['hostname'])
+        hostnames = [node['hostname'] for node in nodelist]
+
+        # get netspec details
+        nodespecs = spec.getDictsByTagName('NodeSpec')
+
+        # dict in which to store slice attributes to set for the nodes
+        nodes = {}
+        for nodespec in nodespecs:
+            if isinstance(nodespec['name'], list):
+                for nodename in nodespec['name']:
+                    nodes[nodename] = {}
+                    for k in nodespec.keys():
+                        rspec_attribute_value = nodespec[k]
+                        if (self.rspec_to_slice_tag.has_key(k)):
+                            slice_tag_name = self.rspec_to_slice_tag[k]
+                            nodes[nodename][slice_tag_name] = rspec_attribute_value
+            elif isinstance(nodespec['name'], StringTypes):
+                nodename = nodespec['name']
+                nodes[nodename] = {}
+                for k in nodespec.keys():
+                    rspec_attribute_value = nodespec[k]
+                    if (self.rspec_to_slice_tag.has_key(k)):
+                        slice_tag_name = self.rspec_to_slice_tag[k]
+                        nodes[nodename][slice_tag_name] = rspec_attribute_value
+
+                for k in nodespec.keys():
+                    rspec_attribute_value = nodespec[k]
+                    if (self.rspec_to_slice_tag.has_key(k)):
+                        slice_tag_name = self.rspec_to_slice_tag[k]
+                        nodes[nodename][slice_tag_name] = rspec_attribute_value
+
+        node_names = nodes.keys()
+        # remove nodes not in rspec
+        deleted_nodes = list(set(hostnames).difference(node_names))
+        # add nodes from rspec
+        added_nodes = list(set(node_names).difference(hostnames))
+
+        try:
+            if peer:
+                self.api.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
+
+            self.api.driver.AddSliceToNodes(slicename, added_nodes) 
+
+            # Add recognized slice tags
+            for node_name in node_names:
+                node = nodes[node_name]
+                for slice_tag in node.keys():
+                    value = node[slice_tag]
+                    if (isinstance(value, list)):
+                        value = value[0]
+
+                    self.api.driver.AddSliceTag(slicename, slice_tag, value, node_name)
+
+            self.api.driver.DeleteSliceFromNodes(slicename, deleted_nodes)
+        finally:
+            if peer:
+                self.api.driver.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
+
+        return 1
+
diff --git a/sfa/senslab/table_slab.py b/sfa/senslab/table_slab.py
new file mode 100644 (file)
index 0000000..9ace414
--- /dev/null
@@ -0,0 +1,182 @@
+#
+# implements support for SFA records stored in db tables
+#
+# TODO: Use existing PLC database methods? or keep this separate?
+
+
+from sfa.trust.gid import *
+from sfa.util.record import *
+from sfa.util.config import *
+from sfa.util.filter import *
+from sfa.trust.hierarchy import *
+from sfa.trust.certificate import *
+from sfa.trust.auth import *
+from sfa.senslab.OARrestapi import *
+from sfa.senslab.LDAPapi import *
+
+class SfaTable(list):
+    authname=""
+    def __init__(self, record_filter = None):
+       self.oar = OARapi()
+       self.ldap = LDAPapi()
+       self.senslabauth=Hierarchy()
+       config=Config()
+       self.authname=config.SFA_REGISTRY_ROOT_AUTH
+       authinfo=self.senslabauth.get_auth_info(self.authname)
+       
+       self.auth=Auth()
+       gid=authinfo.get_gid_object()
+
+    def exists(self):
+        return True
+
+    def db_fields(self, obj=None):
+        return dict( [ ] )
+
+    @staticmethod
+    def is_writable (key,value,dict):
+        # if not mentioned, assume it's writable (e.g. deleted ...)
+        if key not in dict: return True
+        # if mentioned but not linked to a Parameter object, idem
+        if not isinstance(dict[key], Parameter): return True
+        # if not marked ro, it's writable
+        if not dict[key].ro: return True
+
+        return False
+
+
+    def create(self):
+        return True
+    
+    def remove(self, record):
+        return 0
+
+    def insert(self, record):
+        return 0
+
+    def update(self, record):
+        return 0
+
+    def quote_string(self, value):
+        return str(self.db.quote(value))
+
+    def quote(self, value):
+        return self.db.quote(value)
+    
+    def oarFind(self, record_filter = None, columns=None):
+       results=[]
+       node_ids=[]
+
+       if 'authority' in record_filter:
+               # ask for authority
+               if record_filter['authority']== self.authname :
+                       # which is senslab
+                       print>> sys.stderr , "ET MERDE !!!!"
+                       node_ids=""
+               else:
+                       # which is NOT senslab
+                       return []
+       else :
+               if not 'hrn' in record_filter:
+                       print >>sys.stderr,"find : don't know how to handle filter ",record_filter
+                       return []
+               else:
+                       hrns=[]
+                       h=record_filter['hrn']
+                       if  isinstance(h,list):
+                               hrns=h
+                       else : 
+                               hrns.append(h)
+       
+                       for hrn in hrns:
+                               head,sep,tail=hrn.partition(".")
+                               if head != self.authname :
+                                       print >>sys.stderr,"i know nothing about",hrn, " my authname is ", self.authname, " not ", splited_hrn[0]
+                               else :
+                                       node_ids.append(tail)
+
+       node_list = self.oar.GetNodes( node_ids)
+
+       for node in node_list:
+               hrn=self.authname+"."+node['hostname']
+               results.append(  {      
+                       'type': 'node',
+#                      'email': ldapentry[1]['mail'][0],
+#                      'first_name': ldapentry[1]['givenName'][0],
+#                      'last_name': ldapentry[1]['sn'][0],
+#                      'phone': 'none',
+#                      'gid': gid.save_to_string(),
+#                      'serial': 'none',
+                       'authority': self.authname,
+                       'peer_authority': '',
+                       'pointer' : '',
+                       'hrn': hrn,
+                       'date_created' : 'none',
+                       'last_updated': 'none'
+                       } )     
+       
+       return results
+    
+    def find(self, record_filter = None, columns=None):
+       # senslab stores its users in an ldap dictionnary
+        # and nodes in a oar scheduller database
+        # both should be interrogated.
+       print >>sys.stderr,"find : ",record_filter
+       if not isinstance(record_filter,dict):
+               print >>sys.stderr,"find : record_filter is not a dict"
+               print >>sys.stderr,record_filter.__class__
+               return []
+       allResults=[]
+       if 'type' in record_filter:
+               if record_filter['type'] == 'slice':
+                       print >>sys.stderr,"find : don't know how to handle slices yet"
+                       return []
+               if record_filter['type'] == 'authority':
+                       if  'hrn' in  record_filter and record_filter['hrn']==self.authname:
+                               return []
+                       else:
+                               print >>sys.stderr,"find which authority ?"
+                               return []
+               if record_filter['type'] == 'user':
+                       return self.ldap.ldapFind(record_filter, columns)
+               if record_filter['type'] == 'node':
+                       return self.ldap.ldapFind(record_filter, columns)
+               else:
+                       print >>sys.stderr,"unknown type to find : ", record_filter['type']
+                       return []
+       else:
+               allResults = self.ldap.ldapFind(record_filter, columns)
+               allResults+= self.oarFind(record_filter, columns)
+       
+       return allResults
+    
+    def findObjects(self, record_filter = None, columns=None):
+       print >>sys.stderr,"find : ",record_filter
+        #        print record_filter['type']
+        #        if record_filter['type'] in  ['authority']:
+        #            print "findObjectAuthority"
+        results = self.find(record_filter, columns) 
+        result_rec_list = []
+       for result in results:
+               if result['type'] in ['authority']:
+                       result_rec_list.append(AuthorityRecord(dict=result))
+               elif result['type'] in ['node']:
+                       result_rec_list.append(NodeRecord(dict=result))
+               elif result['type'] in ['slice']:
+                       result_rec_list.append(SliceRecord(dict=result))
+               elif result['type'] in ['user']:
+                       result_rec_list.append(UserRecord(dict=result))
+               else:
+                       result_rec_list.append(SfaRecord(dict=result))
+       
+       return result_rec_list
+
+
+    def drop(self):
+        return 0
+    
+    def sfa_records_purge(self):
+        return 0
+        
index f6269b3..8e86eb4 100644 (file)
@@ -40,6 +40,7 @@ class Auth:
         valid = []
         if not isinstance(creds, list):
             creds = [creds]
+        #print>>sys.stderr, "\r\n \r\n \t AUTH.PY checkCredentials hrn %s" %(hrn)
         logger.debug("Auth.checkCredentials with %d creds"%len(creds))
         for cred in creds:
             try:
@@ -68,7 +69,7 @@ class Auth:
         self.client_cred = Credential(string = cred)
         self.client_gid = self.client_cred.get_gid_caller()
         self.object_gid = self.client_cred.get_gid_object()
-        
+        #print>>sys.stderr, " \r\n \r\n \t AUTH.PY check client_gid %s  hrn %s object_gid %s" %(self.client_gid.get_hrn(),hrn, self.object_gid.get_hrn())
         # make sure the client_gid is not blank
         if not self.client_gid:
             raise MissingCallerGID(self.client_cred.get_subject())
@@ -78,19 +79,25 @@ class Auth:
             self.verifyPeerCert(self.peer_cert, self.client_gid)                   
 
         # make sure the client is allowed to perform the operation
-        if operation:
+        if operation:    
+            #print>>sys.stderr, " \r\n \r\n \t AUTH.PY check operation %s trusted_cert_list %s " %(operation,self.trusted_cert_list)
             if not self.client_cred.can_perform(operation):
+                #print>>sys.stderr, " \r\n \r\n \t AUTH.PY InsufficientRights(operation)"
                 raise InsufficientRights(operation)
 
         if self.trusted_cert_list:
             self.client_cred.verify(self.trusted_cert_file_list, self.config.SFA_CREDENTIAL_SCHEMA)
+            #print>>sys.stderr, " \r\n \r\n \t AUTH.PY check  trusted_cert_file_list %s  self.config.SFA_CREDENTIAL_SCHEMA %s" %(self.trusted_cert_file_list, self.config.SFA_CREDENTIAL_SCHEMA)
+            
         else:
            raise MissingTrustedRoots(self.config.get_trustedroots_dir())
        
         # Make sure the credential's target matches the specified hrn. 
         # This check does not apply to trusted peers 
         trusted_peers = [gid.get_hrn() for gid in self.trusted_cert_list]
+        #print>>sys.stderr, " \r\n \r\n \t AUTH.PY check trusted_peers ", trusted_peers
         if hrn and self.client_gid.get_hrn() not in trusted_peers:
+            
             target_hrn = self.object_gid.get_hrn()
             if not hrn == target_hrn:
                 raise PermissionError("Target hrn: %s doesn't match specified hrn: %s " % \
@@ -225,13 +232,15 @@ class Auth:
         @param name human readable name to test  
         """
         object_hrn = self.object_gid.get_hrn()
-        if object_hrn == name:
+       strname = str(name).strip("['']")
+       
+        if object_hrn == strname:
             return
-        if name.startswith(object_hrn + "."):
+        if strname.startswith((object_hrn + ".")) is True:
             return
         #if name.startswith(get_authority(name)):
             #return
-    
+       #print>>sys.stderr, " \r\n \t AUTH.PY  verify_object_permission GROSECHECDELENFER "
         raise PermissionError(name)
 
     def determine_user_rights(self, caller_hrn, record):
index 8fd11e8..7f34757 100644 (file)
@@ -26,7 +26,7 @@
 # Credentials are signed XML files that assign a subject gid privileges to an object gid
 ##
 
-import os
+import os,sys
 from types import StringTypes
 import datetime
 from StringIO import StringIO
@@ -160,8 +160,10 @@ class Signature(object):
 
 
     def get_refid(self):
+        #print>>sys.stderr," \r\n \r\n credential.py Signature get_refid\ self.refid %s " %(self.refid)
         if not self.refid:
             self.decode()
+            #print>>sys.stderr," \r\n \r\n credential.py Signature get_refid self.refid %s " %(self.refid)
         return self.refid
 
     def get_xml(self):
@@ -588,18 +590,23 @@ class Credential(object):
     
     def updateRefID(self):
         if not self.parent:
-            self.set_refid('ref0')
+            self.set_refid('ref0') 
+            #print>>sys.stderr, " \r\n \r\n updateRefID next_cred ref0 "
             return []
         
         refs = []
 
         next_cred = self.parent
+       
         while next_cred:
+          
             refs.append(next_cred.get_refid())
             if next_cred.parent:
                 next_cred = next_cred.parent
+                #print>>sys.stderr, " \r\n \r\n updateRefID next_cred "
             else:
                 next_cred = None
+                #print>>sys.stderr, " \r\n \r\n updateRefID next_cred NONE"
 
         
         # Find a unique refid for this credential
@@ -804,10 +811,12 @@ class Credential(object):
                     # Failures here include unreadable files
                     # or non PEM files
                     trusted_cert_objects.append(GID(filename=f))
+                    #print>>sys.stderr, " \r\n \t\t\t credential.py verify trusted_certs %s" %(GID(filename=f).get_hrn())
                     ok_trusted_certs.append(f)
                 except Exception, exc:
                     logger.error("Failed to load trusted cert from %s: %r", f, exc)
             trusted_certs = ok_trusted_certs
+            #print>>sys.stderr, " \r\n \t\t\t credential.py verify trusted_certs elemnebts %s" %(len(trusted_certs))
 
         # Use legacy verification if this is a legacy credential
         if self.legacy:
@@ -833,7 +842,8 @@ class Credential(object):
             # Verify the gids of this cred and of its parents
             for cur_cred in self.get_credential_list():
                 cur_cred.get_gid_object().verify_chain(trusted_cert_objects)
-                cur_cred.get_gid_caller().verify_chain(trusted_cert_objects)
+                cur_cred.get_gid_caller().verify_chain(trusted_cert_objects)        
+                #print>>sys.stderr, " \r\n \t\t\t credential.py verify cur_cred get_gid_object hrn %s get_gid_caller %s" %(cur_cred.get_gid_object().get_hrn(),cur_cred.get_gid_caller().get_hrn()) 
 
         refs = []
         refs.append("Sig_%s" % self.get_refid())
@@ -841,7 +851,7 @@ class Credential(object):
         parentRefs = self.updateRefID()
         for ref in parentRefs:
             refs.append("Sig_%s" % ref)
-
+            #print>>sys.stderr, " \r\n \t\t\t credential.py verify trusted_certs refs",  ref 
         for ref in refs:
             # If caller explicitly passed in None that means skip xmlsec1 validation.
             # Strange and not typical
@@ -852,6 +862,7 @@ class Credential(object):
 #                (self.xmlsec_path, ref, cert_args, filename)
             verified = os.popen('%s --verify --node-id "%s" %s %s 2>&1' \
                             % (self.xmlsec_path, ref, cert_args, filename)).read()
+            #print>>sys.stderr, " \r\n \t\t\t credential.py verify filename %s verified %s " %(filename,verified)             
             if not verified.strip().startswith("OK"):
                 # xmlsec errors have a msg= which is the interesting bit.
                 mstart = verified.find("msg=")
@@ -862,11 +873,12 @@ class Credential(object):
                     msg = verified[mstart:mend]
                 raise CredentialNotVerifiable("xmlsec1 error verifying cred %s using Signature ID %s: %s %s" % (self.get_summary_tostring(), ref, msg, verified.strip()))
         os.remove(filename)
-
+        
+        #print>>sys.stderr, " \r\n \t\t\t credential.py HUMMM parents %s", self.parent
         # Verify the parents (delegation)
         if self.parent:
             self.verify_parent(self.parent)
-
+        #print>>sys.stderr, " \r\n \t\t\t credential.py verify trusted_certs parents" 
         # Make sure the issuer is the target's authority, and is
         # itself a valid GID
         self.verify_issuer(trusted_cert_objects)
@@ -969,6 +981,7 @@ class Credential(object):
     # . The expiry time on the child must be no later than the parent
     # . The signer of the child must be the owner of the parent        
     def verify_parent(self, parent_cred):
+        #print>>sys.stderr, " \r\n\r\n \t verify_parent parent_cred.get_gid_caller().save_to_string(False) %s  self.get_signature().get_issuer_gid().save_to_string(False) %s" %(parent_cred.get_gid_caller().get_hrn(),self.get_signature().get_issuer_gid().get_hrn())
         # make sure the rights given to the child are a subset of the
         # parents rights (and check delegate bits)
         if not parent_cred.get_privileges().is_superset(self.get_privileges()):
index 2443777..6f3668f 100644 (file)
@@ -1,6 +1,6 @@
 # sfa should not depend on sfatables
 # if the sfatables.runtime import fails, just define run_sfatables as identity
-
+import sys
 try:
     from sfatables.runtime import SFATablesRules
 
@@ -27,9 +27,10 @@ try:
         """
         if not context_callback:
             context_callback = fetch_context
-
+    
         chain = chain.upper()
         rules = SFATablesRules(chain)
+        print>>sys.stderr, " \r\n \r\n \t\t \t sfaTablesRuntime.py run_sfatables context_callback %s  chain %s rules %s " %(context_callback,chain, rules )
         if rules.sorted_rule_list:
             contexts = rules.contexts
             request_context = context_callback(hrn, origin_hrn, contexts)
index 1f50628..2679512 100644 (file)
@@ -22,7 +22,7 @@
 #----------------------------------------------------------------------
 
 import re
-
+import sys
 from sfa.util.faults import SfaAPIError
 
 # for convenience and smoother translation - we should get rid of these functions eventually 
@@ -116,16 +116,19 @@ class Xrn:
     # provide either urn, or (hrn + type)
     def __init__ (self, xrn, type=None):
         if not xrn: xrn = ""
+       
         # user has specified xrn : guess if urn or hrn
         if xrn.startswith(Xrn.URN_PREFIX):
             self.hrn=None
             self.urn=xrn
             self.urn_to_hrn()
+            #print>>sys.stderr, " \r\n \r\n \t XRN.PY init  xrn.startswith(Xrn.URN_PREFIX) hrn %s urn %s type %s" %(  self.hrn,  self.urn, self.type)
         else:
             self.urn=None
             self.hrn=xrn
             self.type=type
             self.hrn_to_urn()
+            #print>>sys.stderr, " \r\n \r\n \t XRN.PY init ELSE hrn %s urn %s type %s" %(  self.hrn,  self.urn, self.type)
 # happens all the time ..
 #        if not type:
 #            debug_logger.debug("type-less Xrn's are not safe")
@@ -136,13 +139,16 @@ class Xrn:
     def get_hrn_type(self): return (self.hrn, self.type)
 
     def _normalize(self):
+        #print>>sys.stderr, " \r\n \r\n \t XRN.PY _normalize self.hrn %s ",self.hrn
         if self.hrn is None: raise SfaAPIError, "Xrn._normalize"
         if not hasattr(self,'leaf'): 
             self.leaf=Xrn.hrn_split(self.hrn)[-1]
         # self.authority keeps a list
         if not hasattr(self,'authority'): 
             self.authority=Xrn.hrn_auth_list(self.hrn)
-
+        #print>>sys.stderr, " \r\n \r\n \t XRN.PY _normalize self.hrn %s leaf %s authority %s"%(self.hrn, self.leaf,  self.authority)
+       
+       
     def get_leaf(self):
         self._normalize()
         return self.leaf
index 709ce6b..4b29787 100755 (executable)
@@ -1,4 +1,5 @@
 #!/usr/bin/python
+# just checking write access on repo
 import sys
 import unittest