Merge branch 'master' into senslab2
authorSandrine Avakian <sandrine.avakian@inria.fr>
Thu, 30 Aug 2012 12:23:36 +0000 (14:23 +0200)
committerSandrine Avakian <sandrine.avakian@inria.fr>
Thu, 30 Aug 2012 12:23:36 +0000 (14:23 +0200)
Conflicts:
sfa/trust/credential.py
sfa/util/xrn.py

41 files changed:
setup.py
sfa/client/client_helper.py
sfa/client/sfi.py
sfa/generic/slab.py [new file with mode: 0644]
sfa/importer/slabimporter.py [new file with mode: 0644]
sfa/managers/driver.py
sfa/managers/registry_manager.py
sfa/managers/senslab/sl.rng [new file with mode: 0644]
sfa/methods/CreateSliver.py
sfa/methods/ListResources.py
sfa/planetlab/plslices.py
sfa/rspecs/elements/versions/sfav1Lease.py
sfa/rspecs/elements/versions/slabv1Lease.py [new file with mode: 0644]
sfa/rspecs/elements/versions/slabv1Node.py [new file with mode: 0644]
sfa/rspecs/elements/versions/slabv1Sliver.py [new file with mode: 0644]
sfa/rspecs/pl_rspec_version.py [new file with mode: 0644]
sfa/rspecs/versions/slabv1.py [new file with mode: 0644]
sfa/senslab/LDAPapi.py [new file with mode: 0644]
sfa/senslab/OARrestapi.py [new file with mode: 0644]
sfa/senslab/TestSuite.py [new file with mode: 0644]
sfa/senslab/__init__.py [new file with mode: 0644]
sfa/senslab/config/bash_nukem [new file with mode: 0755]
sfa/senslab/config/senslab/default_config.xml [new file with mode: 0644]
sfa/senslab/config/senslab/sfa_config [new file with mode: 0644]
sfa/senslab/config/senslab/sfa_config.xml [new file with mode: 0644]
sfa/senslab/config/senslab/site.xml [new file with mode: 0644]
sfa/senslab/config/senslab2/default_config.xml [new file with mode: 0644]
sfa/senslab/config/senslab2/sfa_config [new file with mode: 0644]
sfa/senslab/config/senslab2/sfa_config.xml [new file with mode: 0644]
sfa/senslab/config/senslab2/site.xml [new file with mode: 0644]
sfa/senslab/sfa-bare [new file with mode: 0755]
sfa/senslab/slabaggregate.py [new file with mode: 0644]
sfa/senslab/slabdriver.py [new file with mode: 0644]
sfa/senslab/slabpostgres.py [new file with mode: 0644]
sfa/senslab/slabslices.py [new file with mode: 0644]
sfa/server/sfa-start.py
sfa/trust/auth.py
sfa/trust/credential.py
sfa/util/sfatablesRuntime.py
sfa/util/xrn.py
tests/testXrn.py

index 606e1ef..69a9e3c 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -31,6 +31,15 @@ packages = [
     'sfa/generic',
     'sfa/managers',
     'sfa/importer',
+
+
+
+    'sfa/senslab',
+
+
+
+
+
     'sfa/rspecs',
     'sfa/rspecs/elements',
     'sfa/rspecs/elements/versions',
index 32e21a1..e1edfb8 100644 (file)
@@ -1,4 +1,4 @@
-
+import sys
 def pg_users_arg(records):
     users = []  
     for record in records:
@@ -11,19 +11,25 @@ def pg_users_arg(records):
 
 def sfa_users_arg(records, slice_record):
     users = []
+    print>>sys.stderr, " \r\n \r\n \t CLIENT_HELPER.PY sfa_users_arg slice_record %s \r\n records %s"%(slice_record,records)
     for record in records:
         if record['type'] != 'user': 
             continue
-        user = {'urn': record['geni_urn'], #
+        user = {'urn': record['geni_urn'], 
                 'keys': record['keys'],
                 'email': record['email'], # needed for MyPLC
-                'person_id': record['person_id'], # needed for MyPLC
+                'person_id': record['record_id'], 
+                'hrn': record['hrn'],
+                'type': record['type'],
+                'authority' : record['authority'],
+                'gid' : record['gid'],
                 'first_name': record['first_name'], # needed for MyPLC
                 'last_name': record['last_name'], # needed for MyPLC
                 'slice_record': slice_record, # needed for legacy refresh peer
                 'key_ids': record['key_ids'] # needed for legacy refresh peer
                 }         
-        users.append(user)
+        users.append(user)   
+        print>>sys.stderr, " \r\n \r\n \t CLIENT_HELPER.PY sfa_users_arg user %s",user
     return users        
 
 def sfa_to_pg_users_arg(users):
index 3695f9a..e837270 100644 (file)
@@ -1110,6 +1110,7 @@ or with an slice hrn, shows currently provisioned resources
                 rspec.filter({'component_manager_id': server_version['urn']})
                 rspec = RSpecConverter.to_pg_rspec(rspec.toxml(), content_type='request')
             else:
+                print >>sys.stderr, "\r\n \r\n \r\n WOOOOOO"
                 users = sfa_users_arg(user_records, slice_record)
 
         # do not append users, keys, or slice tags. Anything
diff --git a/sfa/generic/slab.py b/sfa/generic/slab.py
new file mode 100644 (file)
index 0000000..7923af0
--- /dev/null
@@ -0,0 +1,44 @@
+from sfa.generic import Generic
+
+import sfa.server.sfaapi
+
+
+
+class slab (Generic):
+    
+    # use the standard api class
+    def api_class (self):
+        return sfa.server.sfaapi.SfaApi
+    
+    # the importer class
+    def importer_class (self): 
+        import sfa.importer.slabimporter
+        return sfa.importer.slabimporter.SlabImporter
+    
+    # the manager classes for the server-side services
+    def registry_manager_class (self) :
+        import sfa.managers.registry_manager 
+        return sfa.managers.registry_manager.RegistryManager
+    
+    def slicemgr_manager_class (self) :
+        import sfa.managers.slice_manager 
+        return sfa.managers.slice_manager.SliceManager
+    
+    def aggregate_manager_class (self) :
+        import sfa.managers.aggregate_manager
+        return sfa.managers.aggregate_manager.AggregateManager
+
+    # driver class for server-side services, talk to the whole testbed
+    def driver_class (self):
+        import sfa.senslab.slabdriver
+        return sfa.senslab.slabdriver.SlabDriver
+
+    # slab does not have a component manager yet
+    # manager class
+    def component_manager_class (self):
+        return None
+    # driver_class
+    def component_driver_class (self):
+        return None
+
+
diff --git a/sfa/importer/slabimporter.py b/sfa/importer/slabimporter.py
new file mode 100644 (file)
index 0000000..325e526
--- /dev/null
@@ -0,0 +1,312 @@
+import sys
+
+from sfa.util.config import Config
+from sfa.util.xrn import Xrn, get_authority, hrn_to_urn
+
+from sfa.senslab.slabdriver import SlabDriver
+from sfa.senslab.slabpostgres import SliceSenslab, slab_dbsession
+
+from sfa.trust.certificate import Keypair,convert_public_key
+from sfa.trust.gid import create_uuid
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegAuthority, RegSlice, RegNode, \
+                                                    RegUser, RegKey
+
+
+from sqlalchemy.exc import SQLAlchemyError
+
+
+def _get_site_hrn(site):
+    hrn = site['name'] 
+    return hrn
+
+class SlabImporter:
+    
+    def __init__ (self, auth_hierarchy, logger):
+        self.auth_hierarchy = auth_hierarchy
+        self.logger=logger
+
+    def hostname_to_hrn_escaped(self, root_auth, hostname):
+        return '.'.join( [root_auth,Xrn.escape(hostname)] )
+
+
+    
+    def slicename_to_hrn(self, person_hrn):
+        return  (person_hrn +'_slice')
+    
+    def add_options (self, parser):
+        # we don't have any options for now
+        pass
+    
+    def find_record_by_type_hrn(self,type,hrn):
+        return self.records_by_type_hrn.get ( (type, hrn), None)
+    
+    def locate_by_type_pointer (self, type, pointer):
+        print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES locate_by_type_pointer  .........................." 
+        ret = self.records_by_type_pointer.get ( (type, pointer), None)
+        print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES locate_by_type_pointer  " 
+        return ret
+    
+    def update_just_added_records_dict (self, record):
+        tuple = (record.type, record.hrn)
+        if tuple in self.records_by_type_hrn:
+            self.logger.warning ("SlabImporter.update_just_added_records_dict: duplicate (%s,%s)"%tuple)
+            return
+        self.records_by_type_hrn [ tuple ] = record
+        
+    def run (self, options):
+        config = Config()
+
+        slabdriver = SlabDriver(config)
+        
+        #Create special slice table for senslab 
+        
+        if not slabdriver.db.exists('slice_senslab'):
+            slabdriver.db.createtable('slice_senslab')
+            self.logger.info ("SlabImporter.run:  slice_senslab table created ")
+
+        #retrieve all existing SFA objects
+        all_records = dbsession.query(RegRecord).all()
+      
+        #create hash by (type,hrn) 
+        #used  to know if a given record is already known to SFA 
+       
+        self.records_by_type_hrn = \
+            dict ( [ ( (record.type,record.hrn) , record ) for record in all_records ] )
+            
+        # create hash by (type,pointer) 
+        self.records_by_type_pointer = \
+            dict ( [ ( (str(record.type),record.pointer) , record ) for record in all_records  if record.pointer != -1] )
+
+        # initialize record.stale to True by default, then mark stale=False on the ones that are in use
+        for record in all_records: 
+            record.stale=True
+        
+        nodes_listdict  = slabdriver.GetNodes()
+        nodes_by_id = dict([(node['node_id'],node) for node in nodes_listdict])
+        sites_listdict  = slabdriver.GetSites()
+        
+        ldap_person_listdict = slabdriver.GetPersons()
+        slices_listdict = slabdriver.GetSlices()
+        try:
+            slices_by_userid = dict ( [ (one_slice['record_id_user'], one_slice ) for one_slice in slices_listdict ] )
+        except TypeError:
+             self.logger.log_exc("SlabImporter: failed to create list of slices by user id.") 
+             pass
+        for site in sites_listdict:
+            site_hrn = _get_site_hrn(site) 
+            site_record = self.find_record_by_type_hrn ('authority', site_hrn)
+            if not site_record:
+                try:
+                    urn = hrn_to_urn(site_hrn, 'authority') 
+                    if not self.auth_hierarchy.auth_exists(urn):
+                        self.auth_hierarchy.create_auth(urn)
+                    auth_info = self.auth_hierarchy.get_auth_info(urn)
+                    site_record = RegAuthority(hrn=site_hrn, gid=auth_info.get_gid_object(),
+                                               pointer='-1',
+                                               authority=get_authority(site_hrn))
+                    site_record.just_created()
+                    dbsession.add(site_record)
+                    dbsession.commit()
+                    self.logger.info("SlabImporter: imported authority (site) : %s" % site_record) 
+                    self.update_just_added_records_dict(site_record)
+                except SQLAlchemyError:
+                    # if the site import fails then there is no point in trying to import the
+                    # site's child records (node, slices, persons), so skip them.
+                    self.logger.log_exc("SlabImporter: failed to import site. Skipping child records") 
+                    continue
+            else:
+                # xxx update the record ...
+                pass
+            site_record.stale=False 
+            
+         # import node records in site
+            for node_id in site['node_ids']:
+                try:
+                    node = nodes_by_id[node_id]
+                except:
+                    self.logger.warning ("SlabImporter: cannot find node_id %s - ignored"%node_id)
+                    continue 
+                site_auth = get_authority(site_hrn)
+                site_name = site['name']                
+                escaped_hrn =  self.hostname_to_hrn_escaped(slabdriver.root_auth, node['hostname'])
+                print>>sys.stderr, "\r\n \r\n SLABIMPORTER node %s " %(node)               
+                hrn =  node['hrn']
+
+
+                # xxx this sounds suspicious
+                if len(hrn) > 64: hrn = hrn[:64]
+                node_record = self.find_record_by_type_hrn( 'node', hrn )
+                if not node_record:
+                    try:
+                        pkey = Keypair(create=True)
+                        urn = hrn_to_urn(escaped_hrn, 'node') 
+                        node_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+                        node_record = RegNode (hrn=hrn, gid=node_gid, 
+                                                pointer = '-1',
+                                                authority=get_authority(hrn)) 
+                        node_record.just_created()
+                        dbsession.add(node_record)
+                        dbsession.commit()
+                        self.logger.info("SlabImporter: imported node: %s" % node_record)  
+                        self.update_just_added_records_dict(node_record)
+                    except:
+                        self.logger.log_exc("SlabImporter: failed to import node") 
+                else:
+                    # xxx update the record ...
+                    pass
+                node_record.stale=False
+                    
+                    
+            # import persons
+            for person in ldap_person_listdict : 
+            
+                person_hrn = person['hrn']
+                slice_hrn = self.slicename_to_hrn(person['hrn'])
+               
+                # xxx suspicious again
+                if len(person_hrn) > 64: person_hrn = person_hrn[:64]
+                person_urn = hrn_to_urn(person_hrn, 'user')
+    
+                user_record = self.find_record_by_type_hrn( 'user', person_hrn)
+                slice_record = self.find_record_by_type_hrn ('slice', slice_hrn)
+                
+                # return a tuple pubkey (a plc key object) and pkey (a Keypair object)
+                def init_person_key (person, slab_key):
+                    pubkey=None
+                    if  person['pkey']:
+                        # randomly pick first key in set
+                        pubkey = slab_key
+                        try:
+                            pkey = convert_public_key(pubkey)
+                        except:
+                            self.logger.warn('SlabImporter: unable to convert public key for %s' % person_hrn)
+                            pkey = Keypair(create=True)
+                    else:
+                        # the user has no keys. Creating a random keypair for the user's gid
+                        self.logger.warn("SlabImporter: person %s does not have a PL public key"%person_hrn)
+                        pkey = Keypair(create=True)
+                    return (pubkey, pkey)
+                                
+                 
+                try:
+                    slab_key = person['pkey']
+                    # new person
+                    if not user_record:
+                        (pubkey,pkey) = init_person_key (person, slab_key )
+                        person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey)
+                        if person['email']:
+                            print>>sys.stderr, "\r\n \r\n SLAB IMPORTER PERSON EMAIL OK email %s " %(person['email'])
+                            person_gid.set_email(person['email'])
+                            user_record = RegUser (hrn=person_hrn, gid=person_gid, 
+                                                    pointer='-1', 
+                                                    authority=get_authority(person_hrn),
+                                                    email=person['email'])
+                        else:
+                            user_record = RegUser (hrn=person_hrn, gid=person_gid, 
+                                                    pointer='-1', 
+                                                    authority=get_authority(person_hrn))
+                            
+                        if pubkey: 
+                            user_record.reg_keys=[RegKey (pubkey)]
+                        else:
+                            self.logger.warning("No key found for user %s"%user_record)
+                        user_record.just_created()
+                        dbsession.add (user_record)
+                        dbsession.commit()
+                        self.logger.info("SlabImporter: imported person: %s" % user_record)
+                        print>>sys.stderr, "\r\n \r\n SLAB IMPORTER PERSON IMPORT NOTuser_record %s " %(user_record)
+                        self.update_just_added_records_dict( user_record )
+                    else:
+                        # update the record ?
+                        # if user's primary key has changed then we need to update the 
+                        # users gid by forcing an update here
+                        sfa_keys = user_record.reg_keys
+                       
+                        new_key=False
+                        if slab_key is not sfa_keys : 
+                            new_key = True
+                        if new_key:
+                            (pubkey,pkey) = init_person_key (person, slab_key)
+                            person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey)
+                            if not pubkey:
+                                user_record.reg_keys=[]
+                            else:
+                                user_record.reg_keys=[ RegKey (pubkey)]
+                            self.logger.info("SlabImporter: updated person: %s" % user_record)
+                    if person['email']:
+                        user_record.email = person['email']
+                    dbsession.commit()
+                    user_record.stale=False
+                except:
+                    self.logger.log_exc("SlabImporter: failed to import person  %s"%(person) )       
+                
+                try:
+                    slice = slices_by_userid[user_record.record_id]
+                except:
+                    self.logger.warning ("SlabImporter: cannot locate slices_by_userid[user_record.record_id] %s - ignored"%user_record.record_id )    
+                if not slice_record:
+                   
+                    try:
+                        pkey = Keypair(create=True)
+                        urn = hrn_to_urn(slice_hrn, 'slice')
+                        slice_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+                        slice_record = RegSlice (hrn=slice_hrn, gid=slice_gid, 
+                                                    pointer='-1',
+                                                    authority=get_authority(slice_hrn))
+                     
+                        slice_record.just_created()
+                        dbsession.add(slice_record)
+                        dbsession.commit()
+                        
+                        #Serial id created after commit
+                        #Get it
+                        sl_rec = dbsession.query(RegSlice).filter(RegSlice.hrn.match(slice_hrn)).all()
+                        
+                        slab_slice = SliceSenslab( slice_hrn = slice_hrn, record_id_slice=sl_rec[0].record_id, record_id_user= user_record.record_id)
+                        print>>sys.stderr, "\r\n \r\n SLAB IMPORTER SLICE IMPORT NOTslice_record %s \r\n slab_slice %s" %(sl_rec,slab_slice)
+                        slab_dbsession.add(slab_slice)
+                        slab_dbsession.commit()
+                        self.logger.info("SlabImporter: imported slice: %s" % slice_record)  
+                        self.update_just_added_records_dict ( slice_record )
+                    except:
+                        self.logger.log_exc("SlabImporter: failed to import slice")
+                        
+                #No slice update upon import in senslab 
+                else:
+                    # xxx update the record ...
+                    self.logger.warning ("Slice update not yet implemented")
+                    pass
+                # record current users affiliated with the slice
+
+                slice_record.reg_researchers =  [user_record]
+                dbsession.commit()
+                slice_record.stale=False 
+                       
+  
+                 
+         ### remove stale records
+        # special records must be preserved
+        system_hrns = [slabdriver.hrn, slabdriver.root_auth,  slabdriver.hrn+ '.slicemanager']
+        for record in all_records: 
+            if record.hrn in system_hrns: 
+                record.stale=False
+            if record.peer_authority:
+                record.stale=False
+          
+
+        for record in all_records:
+            try:        
+                stale=record.stale
+            except:     
+                stale=True
+                self.logger.warning("stale not found with %s"%record)
+            if stale:
+                self.logger.info("SlabImporter: deleting stale record: %s" % record)
+                dbsession.delete(record)
+                dbsession.commit()         
+                 
+
+  
index d6a81be..6b2681c 100644 (file)
@@ -2,7 +2,7 @@
 # an attempt to document what a driver class should provide, 
 # and implement reasonable defaults
 #
-
+import sys
 class Driver:
     
     def __init__ (self, config): 
@@ -26,6 +26,7 @@ class Driver:
     # this constraint, based on the principle that SFA should not rely on the
     # testbed database to perform such a core operation (i.e. getting rights right)
     def augment_records_with_testbed_info (self, sfa_records):
+        print >>sys.stderr, "  \r\n \r\n DRIVER.PY augment_records_with_testbed_info sfa_records ",sfa_records
         return sfa_records
 
     # incoming record, as provided by the client to the Register API call
index f3f75f7..2f53090 100644 (file)
@@ -155,7 +155,7 @@ class RegistryManager:
         local_records = dbsession.query(RegRecord).filter(RegRecord.hrn.in_(local_hrns))
         if type:
             local_records = local_records.filter_by(type=type)
-        local_records=local_records.all()
+        local_records=local_records.all()                
         logger.info("Resolve: local_records=%s (type=%s)"%(local_records,type))
         local_dicts = [ record.__dict__ for record in local_records ]
         
@@ -163,9 +163,11 @@ class RegistryManager:
             # in full mode we get as much info as we can, which involves contacting the 
             # testbed for getting implementation details about the record
             self.driver.augment_records_with_testbed_info(local_dicts)
+            #logger.debug("Resolve: local_dicts =%s "%(local_dicts))
             # also we fill the 'url' field for known authorities
             # used to be in the driver code, sounds like a poorman thing though
             def solve_neighbour_url (record):
+                logger.debug("\r\n \t\t solve_neighbour_url: record = %s "%(record))
                 if not record.type.startswith('authority'): return 
                 hrn=record.hrn
                 for neighbour_dict in [ api.aggregates, api.registries ]:
@@ -173,12 +175,13 @@ class RegistryManager:
                         record.url=neighbour_dict[hrn].get_url()
                         return 
             for record in local_records: solve_neighbour_url (record)
-        
+            #logger.debug("\solve_neighbour_url = OK ")
         # convert local record objects to dicts for xmlrpc
         # xxx somehow here calling dict(record) issues a weird error
         # however record.todict() seems to work fine
         # records.extend( [ dict(record) for record in local_records ] )
-        records.extend( [ record.todict() for record in local_records ] )    
+        records.extend( [ record.todict() for record in local_records ] ) 
+        #logger.debug("\RESOLVE = records %s " %(records))   
         if not records:
             raise RecordNotFound(str(hrns))
     
diff --git a/sfa/managers/senslab/sl.rng b/sfa/managers/senslab/sl.rng
new file mode 100644 (file)
index 0000000..627b6fd
--- /dev/null
@@ -0,0 +1,134 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+  <start>
+    <ref name="RSpec"/>
+  </start>
+  <define name="RSpec">
+    <element name="RSpec">
+      <attribute name="type">
+        <data type="NMTOKEN"/>
+      </attribute>
+      <choice>
+        <ref name="network"/>
+        <ref name="request"/>
+      </choice>
+    </element>
+  </define>
+  <define name="network">
+    <element name="network">
+      <attribute name="name">
+        <data type="NMTOKEN"/>
+      </attribute>
+      <optional>
+        <attribute name="slice">
+          <data type="NMTOKEN"/>
+        </attribute>
+      </optional>
+      <optional>
+        <ref name="sliver_defaults"/>
+      </optional>
+      <oneOrMore>
+        <ref name="site"/>
+      </oneOrMore>
+    </element>
+  </define>
+  <define name="sliver_defaults">
+    <element name="sliver_defaults">
+      <ref name="sliver_elements"/>
+    </element>
+  </define>
+  <define name="site">
+    <element name="site">
+      <attribute name="id">
+        <data type="ID"/>
+      </attribute>
+      <element name="name">
+        <text/>
+      </element>
+      <zeroOrMore>
+        <ref name="node"/>
+      </zeroOrMore>
+    </element>
+  </define>
+  <define name="node">
+    <element name="node">
+      <attribute name="node_id">
+        <data type="ID"/>
+      </attribute>
+      <element name="hostname">
+        <text/>
+      </element> 
+      <attribute name="reservable">
+        <data type="boolean"/>
+      </attribute>
+      <element name="ip_address">
+        <text/>
+      </element>
+      <optional>
+        <element name="urn">
+            <text/>
+        </element>
+      </optional>
+      <optional>
+        <ref name="leases"/>
+       </optional>
+      <optional>
+        <ref name="sliver"/>
+       </optional>
+    </element>
+  </define>
+  <define name="request">
+    <element name="request">
+      <attribute name="name">
+        <data type="NMTOKEN"/>
+      </attribute>
+      <optional>
+        <ref name="sliver_defaults"/>
+      </optional>
+      <oneOrMore>
+        <ref name="sliver"/>
+      </oneOrMore>
+    </element>
+  </define>
+  <define name="sliver">
+    <element name="sliver">
+      <optional>
+        <attribute name="nodeid">
+          <data type="ID"/>
+        </attribute>
+      </optional>
+      <ref name="sliver_elements"/>
+    </element>
+  </define>
+  <define name="sliver_elements">
+    <interleave>
+      <optional>
+        <element name="capabilities">
+          <text/>
+        </element>
+      </optional>
+      <optional>
+        <element name="delegations">
+          <text/>
+        </element>
+      </optional>
+      <optional>
+        <element name="program">
+          <text/>
+        </element>
+      </optional>     
+      </interleave>
+  </define>
+ <define name="leases">
+    <element name="leases">
+      <zeroOrMore>
+       <group>
+        <attribute name="slot"/>
+          <data type="dateTime"/>
+        </attribute>
+        <attribute name="slice">
+          <data type="NMTOKEN"/>
+        </attribute>
+       </group>
+      </zeroOrMore>
+</grammar>
index 2797489..b898c9d 100644 (file)
@@ -2,6 +2,7 @@ from sfa.util.faults import SfaInvalidArgument, InvalidRSpec
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.sfatablesRuntime import run_sfatables
+import sys
 from sfa.trust.credential import Credential
 from sfa.storage.parameter import Parameter, Mixed
 from sfa.rspecs.rspec import RSpec
@@ -33,7 +34,7 @@ class CreateSliver(Method):
         hrn, type = urn_to_hrn(slice_xrn)
 
         self.api.logger.info("interface: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, hrn, self.name))
-
+        print >>sys.stderr, " \r\n \r\n Createsliver.py call %s\ttarget-hrn: %s\tmethod-name: %s "%(self.api.interface, hrn, self.name)
         # Find the valid credentials
         valid_creds = self.api.auth.checkCredentials(creds, 'createsliver', hrn)
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
index 04359a0..996adab 100644 (file)
@@ -1,5 +1,5 @@
 import zlib
-
+import sys
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.sfatablesRuntime import run_sfatables
@@ -36,14 +36,16 @@ class ListResources(Method):
         # get slice's hrn from options    
         xrn = options.get('geni_slice_urn', '')
         (hrn, _) = urn_to_hrn(xrn)
-
+        print >>sys.stderr, " \r\n \r\n \t Lsitresources.pyeuuuuuu call : hrn %s options %s" %( hrn,options ) 
         # Find the valid credentials
         valid_creds = self.api.auth.checkCredentials(creds, 'listnodes', hrn)
 
         # get hrn of the original caller 
         origin_hrn = options.get('origin_hrn', None)
+        print >>sys.stderr, " \r\n \r\n \t Lsitresources  :origin_hrn %s sansvqalid credss %s " %(origin_hrn, Credential(string=creds[0]).get_gid_caller().get_hrn()) 
         if not origin_hrn:
             origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
+        print >>sys.stderr, " \r\n \r\n \t Lsitresources.py000 call : hrn %s self.api.interface %s  origin_hrn %s   \r\n \r\n \r\n " %(hrn ,self.api.interface,origin_hrn)          
         rspec = self.api.manager.ListResources(self.api, creds, options)
 
         # filter rspec through sfatables 
@@ -51,7 +53,8 @@ class ListResources(Method):
             chain_name = 'OUTGOING'
         elif self.api.interface in ['slicemgr']: 
             chain_name = 'FORWARD-OUTGOING'
-        self.api.logger.debug("ListResources: sfatables on chain %s"%chain_name)
+        self.api.logger.debug("ListResources: sfatables on chain %s"%chain_name)  
+        print >>sys.stderr, " \r\n \r\n \t Listresources.py001 call : chain_name %s hrn %s origine_hrn %s " %(chain_name, hrn, origin_hrn)
         filtered_rspec = run_sfatables(chain_name, hrn, origin_hrn, rspec) 
  
         if options.has_key('geni_compressed') and options['geni_compressed'] == True:
index eb60066..58ee4f4 100644 (file)
@@ -1,5 +1,6 @@
 from types import StringTypes
 from collections import defaultdict
+import sys
 
 from sfa.util.sfatime import utcparse, datetime_to_epoch
 from sfa.util.sfalogging import logger
@@ -130,13 +131,11 @@ class PlSlices:
         # slice belongs to out local plc or a myplc peer. We will assume it 
         # is a local site, unless we find out otherwise  
         peer = None
-
         # get this slice's authority (site)
         slice_authority = get_authority(hrn)
 
         # get this site's authority (sfa root authority or sub authority)
         site_authority = get_authority(slice_authority).lower()
-
         # check if we are already peered with this site_authority, if so
         peers = self.driver.shell.GetPeers({}, ['peer_id', 'peername', 'shortname', 'hrn_root'])
         for peer_record in peers:
index 8dffdfd..f36418d 100644 (file)
@@ -22,7 +22,7 @@ class SFAv1Lease:
 
     @staticmethod
     def add_leases(xml, leases):
-        
+        logger.debug("SFAV1LEASE \t add_lease ")
         network_elems = xml.xpath('//network')
         if len(network_elems) > 0:
             network_elem = network_elems[0]
@@ -37,6 +37,7 @@ class SFAv1Lease:
             lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
             lease_elem = network_elem.add_instance('lease', lease, lease_fields)
             lease_elems.append(lease_elem)
+            logger.debug("SFAV1LEASE \t add_lease lease %s" %(lease))
 
 
     @staticmethod
diff --git a/sfa/rspecs/elements/versions/slabv1Lease.py b/sfa/rspecs/elements/versions/slabv1Lease.py
new file mode 100644 (file)
index 0000000..24689c7
--- /dev/null
@@ -0,0 +1,53 @@
+from sfa.util.sfalogging import logger
+from sfa.util.xml import XpathFilter
+from sfa.util.xrn import Xrn
+
+
+
+#from sfa.rspecs.elements.versions.sfav1PLTag import SFAv1PLTag
+#from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
+from sfa.rspecs.elements.lease import Lease
+
+
+
+class Slabv1Lease:
+
+    @staticmethod
+    def add_leases(xml, leases):
+        
+        network_elems = xml.xpath('//network')
+        if len(network_elems) > 0:
+            network_elem = network_elems[0]
+        elif len(leases) > 0:
+            network_urn = Xrn(leases[0]['component_id']).get_authority_urn().split(':')[0]
+            network_elem = xml.add_element('network', name = network_urn)
+        else:
+            network_elem = xml
+         
+        lease_elems = []       
+        for lease in leases:
+            lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
+            lease_elem = network_elem.add_instance('lease', lease, lease_fields)
+            lease_elems.append(lease_elem)
+
+
+    @staticmethod
+    def get_leases(xml, filter={}):
+        xpath = '//lease%s | //default:lease%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
+        lease_elems = xml.xpath(xpath)
+        return Slabv1Lease.get_lease_objs(lease_elems)
+
+    @staticmethod
+    def get_lease_objs(lease_elems):
+        leases = []    
+        for lease_elem in lease_elems:
+            lease = Lease(lease_elem.attrib, lease_elem)
+            if lease.get('lease_id'):
+               lease['lease_id'] = lease_elem.attrib['lease_id']
+            lease['component_id'] = lease_elem.attrib['component_id']
+            lease['slice_id'] = lease_elem.attrib['slice_id']
+            lease['start_time'] = lease_elem.attrib['start_time']
+            lease['duration'] = lease_elem.attrib['duration']
+
+            leases.append(lease)
+        return leases      
\ No newline at end of file
diff --git a/sfa/rspecs/elements/versions/slabv1Node.py b/sfa/rspecs/elements/versions/slabv1Node.py
new file mode 100644 (file)
index 0000000..ce5a215
--- /dev/null
@@ -0,0 +1,223 @@
+
+from sfa.util.xrn import Xrn
+from sfa.util.xml import XpathFilter
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.versions.slabv1Sliver import Slabv1Sliver
+from sfa.util.sfalogging import logger
+
+class SlabNode(Node):
+    #First get the fields already defined in the class Node
+    fields = list(Node.fields)
+    #Extend it with senslab's specific fields
+    fields.extend (['archi', 'radio', 'mobile','position'])
+    
+
+class SlabPosition(Element):
+    fields = ['posx', 'posy','posz']
+
+
+
+class Slabv1Node:
+    
+    @staticmethod
+    def add_connection_information(xml, ldap_username):
+        """ Adds login and ssh connection info in the network item in 
+        the xml. Does not create the network element, therefore 
+        should be used after add_nodes, which creates the network item.
+        
+        """
+        logger.debug(" add_connection_information xml %s" %(xml))
+        #Get network item in the xml
+        network_elems = xml.xpath('//network')  
+        if len(network_elems) > 0:
+            network_elem = network_elems[0]
+
+        slab_network_dict = {}
+        slab_network_dict['login'] = ldap_username
+        slab_network_dict['vm'] = 'ssh ' + ldap_username + \
+                                        '@grenoble.senslab.info'
+        network_elem.set('vm', unicode(slab_network_dict['vm']))
+        network_elem.set('login', unicode( slab_network_dict['login']))
+
+        
+    @staticmethod
+    def add_nodes(xml, nodes):
+        #Add network item in the xml
+        network_elems = xml.xpath('//network')
+        if len(network_elems) > 0:
+            network_elem = network_elems[0]
+        elif len(nodes) > 0 and nodes[0].get('component_manager_id'):
+            network_urn = nodes[0]['component_manager_id']
+            network_elem = xml.add_element('network', \
+                                        name = Xrn(network_urn).get_hrn())
+        else:
+            network_elem = xml
+       
+        logger.debug("slabv1Node \t add_nodes  nodes %s \r\n "%(nodes))
+        node_elems = []
+        #Then add nodes items to the network item in the xml
+        for node in nodes:
+            #Attach this node to the network element
+            node_fields = ['component_manager_id', 'component_id', 'exclusive',\
+                                                    'boot_state', 'mobile']
+            node_elem = network_elem.add_instance('node', node, node_fields)
+            node_elems.append(node_elem)
+            
+            #Set the attibutes of this node element
+            for attribute in node: 
+            # set component name
+                if attribute is 'component_id':
+                    component_name = node['component_name']
+                    node_elem.set('component_name', component_name)
+                    
+            # set hardware types, extend fields to add Senslab's architecture
+            #and radio type
+                
+                if attribute is 'hardware_types':
+                    for hardware_type in node.get('hardware_types', []): 
+                        fields = HardwareType.fields
+                        fields.extend(['archi','radio'])
+                        node_elem.add_instance('hardware_types', node, fields)
+
+            # set location
+                if attribute is 'location':
+                    node_elem.add_instance('location', node['location'], \
+                                                        Location.fields)
+             # add granularity of the reservation system
+             #TODO put the granularity in network instead SA 18/07/12
+                if attribute is 'granularity' :
+                    granularity = node['granularity']
+                    if granularity:
+                        node_elem.add_instance('granularity', \
+                                    granularity, granularity.fields)
+                
+          
+            # set available element
+                if attribute is 'boot_state':
+                    if node.get('boot_state').lower() == 'alive':
+                        available_elem = node_elem.add_element('available', \
+                                                                    now='true')
+                    else:
+                        available_elem = node_elem.add_element('available', \
+                                                                now='false')
+
+            #set position 
+                if attribute is 'position':
+                    node_elem.add_instance('position', node['position'], \
+                                                        SlabPosition.fields)
+            ## add services
+            #PGv2Services.add_services(node_elem, node.get('services', [])) 
+            # add slivers
+                if attribute is 'slivers':
+                    slivers = node.get('slivers', [])
+                    if not slivers:
+                    # we must still advertise the available sliver types
+                        slivers = Sliver({'type': 'slab-node'})
+                    # we must also advertise the available initscripts
+                    #slivers['tags'] = []
+                    #if node.get('pl_initscripts'): 
+                        #for initscript in node.get('pl_initscripts', []):
+                            #slivers['tags'].append({'name': 'initscript', \
+                                                    #'value': initscript['name']})
+           
+                    Slabv1Sliver.add_slivers(node_elem, slivers)
+        return node_elems
+                    
+
+            
+    @staticmethod
+    def get_nodes(xml, filter={}):
+        xpath = '//node%s | //default:node%s' % (XpathFilter.xpath(filter), \
+                                                    XpathFilter.xpath(filter))
+        node_elems = xml.xpath(xpath)  
+        return Slabv1Node.get_node_objs(node_elems)
+
+    @staticmethod 
+    def get_nodes_with_slivers(xml, sliver_filter={}):
+
+        xpath = '//node[count(sliver)>0] | \
+                                //default:node[count(default:sliver) > 0]' 
+        node_elems = xml.xpath(xpath)    
+        logger.debug("SLABV1NODE \tget_nodes_with_slivers  \
+                                node_elems %s"%(node_elems))
+        return Slabv1Node.get_node_objs(node_elems)
+
+    @staticmethod
+    def get_node_objs(node_elems):
+        nodes = []
+        for node_elem in node_elems:
+            node = Node(node_elem.attrib, node_elem)
+            nodes.append(node) 
+            if 'component_id' in node_elem.attrib:
+                node['authority_id'] = \
+                    Xrn(node_elem.attrib['component_id']).get_authority_urn()
+            
+            # get hardware types
+            hardware_type_elems = node_elem.xpath('./default:hardware_type | \
+                                                            ./hardware_type')
+            node['hardware_types'] = [hw_type.get_instance(HardwareType) \
+                                            for hw_type in hardware_type_elems]
+            
+            # get location
+            location_elems = node_elem.xpath('./default:location | ./location')
+            locations = [location_elem.get_instance(Location) \
+                                            for location_elem in location_elems]
+            if len(locations) > 0:
+                node['location'] = locations[0]
+
+            # get interfaces
+            iface_elems = node_elem.xpath('./default:interface | ./interface')
+            node['interfaces'] = [iface_elem.get_instance(Interface) \
+                                            for iface_elem in iface_elems]
+
+            # get services
+            #node['services'] = PGv2Services.get_services(node_elem)
+
+            # get slivers
+            node['slivers'] = Slabv1Sliver.get_slivers(node_elem)    
+            available_elems = node_elem.xpath('./default:available | \
+                                                                ./available')
+            if len(available_elems) > 0 and 'name' in available_elems[0].attrib:
+                if available_elems[0].attrib.get('now', '').lower() == 'true': 
+                    node['boot_state'] = 'boot'
+                else: 
+                    node['boot_state'] = 'disabled' 
+        return nodes
+
+
+    @staticmethod
+    def add_slivers(xml, slivers):
+        logger.debug("SLABv1NODE \tadd_slivers ")
+        component_ids = []
+        for sliver in slivers:
+            filter_sliver = {}
+            if isinstance(sliver, str):
+                filter_sliver['component_id'] = '*%s*' % sliver
+                sliver = {}
+            elif 'component_id' in sliver and sliver['component_id']:
+                filter_sliver['component_id'] = '*%s*' % sliver['component_id']
+            if not filter_sliver: 
+                continue
+            nodes = Slabv1Node.get_nodes(xml, filter_sliver)
+            if not nodes:
+                continue
+            node = nodes[0]
+            Slabv1Sliver.add_slivers(node, sliver)
+
+    @staticmethod
+    def remove_slivers(xml, hostnames):
+        for hostname in hostnames:
+            nodes = Slabv1Node.get_nodes(xml, \
+                                    {'component_id': '*%s*' % hostname})
+            for node in nodes:
+                slivers = Slabv1Sliver.get_slivers(node.element)
+                for sliver in slivers:
+                    node.element.remove(sliver.element) 
+
+        
+                                    
diff --git a/sfa/rspecs/elements/versions/slabv1Sliver.py b/sfa/rspecs/elements/versions/slabv1Sliver.py
new file mode 100644 (file)
index 0000000..370f55f
--- /dev/null
@@ -0,0 +1,58 @@
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.sliver import Sliver
+
+#from sfa.rspecs.elements.versions.pgv2DiskImage import PGv2DiskImage
+import sys
+class Slabv1Sliver:
+
+    @staticmethod
+    def add_slivers(xml, slivers):
+        if not slivers:
+            return 
+        if not isinstance(slivers, list):
+            slivers = [slivers]
+        for sliver in slivers: 
+            #sliver_elem = xml.add_element('sliver_type')
+            sliver_elem = xml.add_element('sliver')
+            if sliver.get('type'):
+                sliver_elem.set('name', sliver['type'])
+            if sliver.get('client_id'):
+                sliver_elem.set('client_id', sliver['client_id'])
+            #images = sliver.get('disk_images')
+            #if images and isinstance(images, list):
+                #Slabv1DiskImage.add_images(sliver_elem, images)      
+            Slabv1Sliver.add_sliver_attributes(sliver_elem, sliver.get('tags', []))
+    
+    @staticmethod
+    def add_sliver_attributes(xml, attributes):
+        if attributes: 
+            for attribute in attributes:
+                if attribute['name'] == 'initscript':
+                    xml.add_element('{%s}initscript' % xml.namespaces['planetlab'], name=attribute['value'])
+                elif tag['tagname'] == 'flack_info':
+                    attrib_elem = xml.add_element('{%s}info' % self.namespaces['flack'])
+                    attrib_dict = eval(tag['value'])
+                    for (key, value) in attrib_dict.items():
+                        attrib_elem.set(key, value)                
+    @staticmethod
+    def get_slivers(xml, filter={}):
+        xpath = './default:sliver | ./sliver'
+     
+        sliver_elems = xml.xpath(xpath)
+        slivers = []
+        for sliver_elem in sliver_elems: 
+            sliver = Sliver(sliver_elem.attrib,sliver_elem)
+
+            if 'component_id' in xml.attrib:     
+                sliver['component_id'] = xml.attrib['component_id']
+            if 'name' in sliver_elem.attrib:
+                sliver['type'] = sliver_elem.attrib['name']
+            #sliver['images'] = Slabv1DiskImage.get_images(sliver_elem)
+                
+            print>>sys.stderr, "\r\n \r\n SLABV1SLIVER.PY  \t\t\t  get_slivers sliver %s " %( sliver)
+            slivers.append(sliver)
+        return slivers
+
+    @staticmethod
+    def get_sliver_attributes(xml, filter={}):
+        return []             
\ No newline at end of file
diff --git a/sfa/rspecs/pl_rspec_version.py b/sfa/rspecs/pl_rspec_version.py
new file mode 100644 (file)
index 0000000..eb4f9a6
--- /dev/null
@@ -0,0 +1,16 @@
+from sfa.rspecs.sfa_rspec import sfa_rspec_version
+from sfa.rspecs.pg_rspec import pg_rspec_ad_version, pg_rspec_request_version 
+
+ad_rspec_versions = [
+    pg_rspec_ad_version,
+    sfa_rspec_version
+    ]
+
+request_rspec_versions = ad_rspec_versions
+
+default_rspec_version = { 'type': 'SFA', 'version': '1' }
+
+supported_rspecs = {'ad_rspec_versions': ad_rspec_versions,
+                    'request_rspec_versions': request_rspec_versions,
+                    'default_ad_rspec': default_rspec_version}
+
diff --git a/sfa/rspecs/versions/slabv1.py b/sfa/rspecs/versions/slabv1.py
new file mode 100644 (file)
index 0000000..32872ac
--- /dev/null
@@ -0,0 +1,281 @@
+from copy import deepcopy
+
+
+from sfa.rspecs.version import RSpecVersion
+import sys
+from sfa.rspecs.elements.versions.slabv1Lease import Slabv1Lease
+from sfa.rspecs.elements.versions.slabv1Node import Slabv1Node
+from sfa.rspecs.elements.versions.slabv1Sliver import Slabv1Sliver
+
+
+from sfa.rspecs.elements.versions.sfav1Lease import SFAv1Lease
+
+from sfa.util.sfalogging import logger
+class Slabv1(RSpecVersion):
+    #enabled = True
+    type = 'Slab'
+    content_type = 'ad'
+    version = '1'
+    #template = '<RSpec type="%s"></RSpec>' % type
+
+    schema = 'http://senslab.info/resources/rspec/1/ad.xsd'
+    namespace = 'http://www.geni.net/resources/rspec/3'
+    extensions = {
+        'flack': "http://www.protogeni.net/resources/rspec/ext/flack/1",
+        'planetlab': "http://www.planet-lab.org/resources/sfa/ext/planetlab/1",
+    }
+    namespaces = dict(extensions.items() + [('default', namespace)])
+    elements = []
+    
+    # Network 
+    def get_networks(self):
+        #WARNING Added //default:network to the xpath 
+        #otherwise network element not detected 16/07/12 SA
+        
+        network_elems = self.xml.xpath('//network | //default:network') 
+        networks = [network_elem.get_instance(fields=['name', 'slice']) for \
+                    network_elem in network_elems]
+        return networks    
+
+
+    def add_network(self, network):
+        network_tags = self.xml.xpath('//network[@name="%s"]' % network)
+        if not network_tags:
+            network_tag = self.xml.add_element('network', name=network)
+        else:
+            network_tag = network_tags[0]
+        return network_tag
+
+   
+    # Nodes
+
+    def get_nodes(self, filter=None):
+        return Slabv1Node.get_nodes(self.xml, filter)
+
+    def get_nodes_with_slivers(self):
+        return Slabv1Node.get_nodes_with_slivers(self.xml)
+    
+    def get_slice_timeslot(self ):
+        return Slabv1Timeslot.get_slice_timeslot(self.xml)
+    
+    def add_connection_information(self, ldap_username):
+        return Slabv1Node.add_connection_information(self.xml,ldap_username)
+    
+    def add_nodes(self, nodes, check_for_dupes=False):
+        return Slabv1Node.add_nodes(self.xml,nodes )
+    
+    def merge_node(self, source_node_tag, network, no_dupes = False):
+        logger.debug("SLABV1 merge_node")
+        #if no_dupes and self.get_node_element(node['hostname']):
+            ## node already exists
+            #return
+        network_tag = self.add_network(network)
+        network_tag.append(deepcopy(source_node_tag))
+
+    # Slivers
+    
+    def get_sliver_attributes(self, hostname, node, network=None): 
+        print>>sys.stderr, "\r\n \r\n \r\n \t\t SLABV1.PY  get_sliver_attributes hostname %s " %(hostname)
+        nodes = self.get_nodes({'component_id': '*%s*' %hostname})
+        attribs = [] 
+        print>>sys.stderr, "\r\n \r\n \r\n \t\t SLABV1.PY  get_sliver_attributes-----------------nodes %s  " %(nodes)
+        if nodes is not None and isinstance(nodes, list) and len(nodes) > 0:
+            node = nodes[0]
+        #if node : 
+            #sliver = node.xpath('./default:sliver | ./sliver')
+            #sliver = node.xpath('./default:sliver', namespaces=self.namespaces)
+            sliver = node['slivers']
+            
+            if sliver is not None and isinstance(sliver, list) and len(sliver) > 0:
+                sliver = sliver[0]
+                attribs = sliver
+                #attribs = self.attributes_list(sliver)
+                print>>sys.stderr, "\r\n \r\n \r\n \t\t SLABV1.PY  get_sliver_attributes----------NN------- sliver %s self.namespaces %s attribs %s " %(sliver, self.namespaces,attribs)
+        return attribs
+
+    def get_slice_attributes(self, network=None):
+        
+        slice_attributes = []
+
+        nodes_with_slivers = self.get_nodes_with_slivers()
+
+        # TODO: default sliver attributes in the PG rspec?
+        default_ns_prefix = self.namespaces['default']
+        for node in nodes_with_slivers:
+            sliver_attributes = self.get_sliver_attributes(node['component_id'],node, network)
+            for sliver_attribute in sliver_attributes:
+                name = str(sliver_attribute[0])
+                text = str(sliver_attribute[1])
+                attribs = sliver_attribute[2]
+                # we currently only suppor the <initscript> and <flack> attributes
+                #if  'info' in name:
+                    #attribute = {'name': 'flack_info', 'value': str(attribs), 'node_id': node}
+                    #slice_attributes.append(attribute)
+                #elif 'initscript' in name:
+                if 'initscript' in name:
+                    if attribs is not None and 'name' in attribs:
+                        value = attribs['name']
+                    else:
+                        value = text
+                    attribute = {'name': 'initscript', 'value': value, 'node_id': node}
+                    slice_attributes.append(attribute)
+          
+
+        return slice_attributes
+
+    def attributes_list(self, elem):
+        opts = []
+        if elem is not None:
+            for e in elem:
+                opts.append((e.tag, str(e.text).strip(), e.attrib))
+        return opts
+
+    def get_default_sliver_attributes(self, network=None):
+        return []
+
+    def add_default_sliver_attribute(self, name, value, network=None):
+        pass
+
+    def add_slivers(self, hostnames, attributes=[], sliver_urn=None, append=False):
+        # all nodes hould already be present in the rspec. Remove all
+        # nodes that done have slivers
+        print>>sys.stderr, "\r\n \r\n \r\n \t\t\t SLABv1.PY add_slivers  ----->get_node "
+        for hostname in hostnames:
+            node_elems = self.get_nodes({'component_id': '*%s*' % hostname})
+            if not node_elems:
+                continue
+            node_elem = node_elems[0]
+            
+            # determine sliver types for this node
+            #TODO : add_slivers valid type of sliver needs to be changed 13/07/12 SA
+            valid_sliver_types = ['slab-node', 'emulab-openvz', 'raw-pc', 'plab-vserver', 'plab-vnode']
+            #valid_sliver_types = ['emulab-openvz', 'raw-pc', 'plab-vserver', 'plab-vnode']
+            requested_sliver_type = None
+            for sliver_type in node_elem.get('slivers', []):
+                if sliver_type.get('type') in valid_sliver_types:
+                    requested_sliver_type = sliver_type['type']
+            
+            if not requested_sliver_type:
+                continue
+            sliver = {'type': requested_sliver_type,
+                     'pl_tags': attributes}
+            print>>sys.stderr, "\r\n \r\n \r\n \t\t\t SLABv1.PY add_slivers  node_elem %s sliver_type %s \r\n \r\n " %(node_elem, sliver_type)
+            # remove available element
+            for available_elem in node_elem.xpath('./default:available | ./available'):
+                node_elem.remove(available_elem)
+            
+            # remove interface elements
+            for interface_elem in node_elem.xpath('./default:interface | ./interface'):
+                node_elem.remove(interface_elem)
+        
+            # remove existing sliver_type elements
+            for sliver_type in node_elem.get('slivers', []):
+                node_elem.element.remove(sliver_type.element)
+
+            # set the client id
+            node_elem.element.set('client_id', hostname)
+            if sliver_urn:
+                pass
+                # TODO
+                # set the sliver id
+                #slice_id = sliver_info.get('slice_id', -1)
+                #node_id = sliver_info.get('node_id', -1)
+                #sliver_id = urn_to_sliver_id(sliver_urn, slice_id, node_id)
+                #node_elem.set('sliver_id', sliver_id)
+
+            # add the sliver type elemnt
+            Slabv1Sliver.add_slivers(node_elem.element, sliver)  
+            #Slabv1SliverType.add_slivers(node_elem.element, sliver)         
+
+        # remove all nodes without slivers
+        if not append:
+            for node_elem in self.get_nodes():
+                if not node_elem['client_id']:
+                    parent = node_elem.element.getparent()
+                    parent.remove(node_elem.element)
+
+    def remove_slivers(self, slivers, network=None, no_dupes=False):
+        Slabv1Node.remove_slivers(self.xml, slivers) 
+        
+        
+    # Utility
+    def merge(self, in_rspec):
+        """
+        Merge contents for specified rspec with current rspec
+        """
+
+        if not in_rspec:
+            return
+        
+        from sfa.rspecs.rspec import RSpec
+       
+        if isinstance(in_rspec, RSpec):
+            rspec = in_rspec
+        else:
+            rspec = RSpec(in_rspec)
+        if rspec.version.type.lower() == 'protogeni':
+            from sfa.rspecs.rspec_converter import RSpecConverter
+            in_rspec = RSpecConverter.to_sfa_rspec(rspec.toxml())
+            rspec = RSpec(in_rspec)
+        # just copy over all networks
+        #Attention special get_networks using //default:network xpath
+        current_networks = self.get_networks() 
+        networks = rspec.version.get_networks()
+        for network in networks:
+            current_network = network.get('name')
+            if current_network and current_network not in current_networks:
+                self.xml.append(network.element)
+                current_networks.append(current_network)
+
+
+
+
+        
+    # Leases
+
+    def get_leases(self, lease_filter=None):
+        return SFAv1Lease.get_leases(self.xml, lease_filter)
+        #return Slabv1Lease.get_leases(self.xml, lease_filter)
+
+    def add_leases(self, leases, network = None, no_dupes=False):
+        SFAv1Lease.add_leases(self.xml, leases)
+        #Slabv1Lease.add_leases(self.xml, leases)    
+
+    def cleanup(self):
+        # remove unncecessary elements, attributes
+        if self.type in ['request', 'manifest']:
+            # remove 'available' element from remaining node elements
+            self.xml.remove_element('//default:available | //available')
+            
+            
+class Slabv1Ad(Slabv1):
+    enabled = True
+    content_type = 'ad'
+    schema = 'http://senslab.info/resources/rspec/1/ad.xsd'
+    #http://www.geni.net/resources/rspec/3/ad.xsd'
+    template = '<rspec type="advertisement" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://senslab.info/resources/rspec/1" xmlns:flack="http://senslab.info/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://senslab.info/resources/rspec/1 http://senslab.info/resources/rspec/1/ad.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+
+class Slabv1Request(Slabv1):
+    enabled = True
+    content_type = 'request'
+    schema = 'http://senslab.info/resources/rspec/1/request.xsd'
+    #http://www.geni.net/resources/rspec/3/request.xsd
+    template = '<rspec type="request" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://senslab.info/resources/rspec/1" xmlns:flack="http://senslab.info/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://senslab.info/resources/rspec/1 http://senslab.info/resources/rspec/1/request.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+
+class Slabv1Manifest(Slabv1):
+    enabled = True
+    content_type = 'manifest'
+    schema = 'http://senslab.info/resources/rspec/1/manifest.xsd'
+    #http://www.geni.net/resources/rspec/3/manifest.xsd
+    template = '<rspec type="manifest" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://senslab.info/resources/rspec/1" xmlns:flack="http://senslab.info/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://senslab.info/resources/rspec/1 http://senslab.info/resources/rspec/1/manifest.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+
+
+if __name__ == '__main__':
+    from sfa.rspecs.rspec import RSpec
+    from sfa.rspecs.rspec_elements import *
+    r = RSpec('/tmp/slab.rspec')
+    r.load_rspec_elements(Slabv1.elements)
+    r.namespaces = Slabv1.namespaces
+    print r.get(RSpecElements.NODE)
diff --git a/sfa/senslab/LDAPapi.py b/sfa/senslab/LDAPapi.py
new file mode 100644 (file)
index 0000000..f0ecb58
--- /dev/null
@@ -0,0 +1,688 @@
+import random
+from passlib.hash import ldap_salted_sha1 as lssha
+from sfa.util.xrn import get_authority 
+import ldap
+from sfa.util.config import Config
+from sfa.trust.hierarchy import Hierarchy
+#from sfa.trust.certificate import *
+import ldap.modlist as modlist
+from sfa.util.sfalogging import logger
+import os.path
+
+#API for OpenLDAP
+
+
+class ldap_config():
+    def __init__(self, config_file =  '/etc/sfa/ldap_config.py'):
+        self.load(config_file)
+
+    def load(self, config_file):
+        try:
+            execfile(config_file, self.__dict__)
+            self.config_file = config_file
+            # path to configuration data
+            self.config_path = os.path.dirname(config_file)
+        except IOError, error:
+            raise IOError, "Could not find or load the configuration file: %s" % config_file
+        
+class ldap_co:
+    """ Set admin login and server configuration variables."""
+    
+    def __init__(self):
+        #Senslab PROD LDAP parameters 
+        LdapConfig = ldap_config()
+        self.config = LdapConfig
+        self.ldapHost = LdapConfig.LDAP_IP_ADDRESS 
+        self.ldapPeopleDN = LdapConfig.LDAP_PEOPLE_DN
+        self.ldapGroupDN = LdapConfig.LDAP_GROUP_DN
+        self.ldapAdminDN = LdapConfig.LDAP_WEB_DN
+        self.ldapAdminPassword = LdapConfig.LDAP_WEB_PASSWORD
+
+
+        self.ldapPort = ldap.PORT
+        self.ldapVersion  = ldap.VERSION3
+        self.ldapSearchScope = ldap.SCOPE_SUBTREE
+
+
+    def connect(self, bind = True):
+        """Enables connection to the LDAP server.
+        Set the bind parameter to True if a bind is needed
+        (for add/modify/delete operations).
+        Set to False otherwise.
+        
+        """
+        try:
+            self.ldapserv = ldap.open(self.ldapHost)
+        except ldap.LDAPError, error:
+            return {'bool' : False, 'message' : error }
+        
+        # Bind with authentification
+        if(bind): 
+            return self.bind()
+        
+        else:     
+            return {'bool': True} 
+    
+    def bind(self):
+        """ Binding method. """
+        try:
+            # Opens a connection after a call to ldap.open in connect:
+            self.ldapserv = ldap.initialize("ldap://" + self.ldapHost)
+                
+            # Bind/authenticate with a user with apropriate rights to add objects
+            self.ldapserv.simple_bind_s(self.ldapAdminDN, self.ldapAdminPassword)
+
+        except ldap.LDAPError, error:
+            return {'bool' : False, 'message' : error }
+
+        return {'bool': True}
+    
+    def close(self):
+        """ Close the LDAP connection """
+        try:
+            self.ldapserv.unbind_s()
+        except ldap.LDAPError, error:
+            return {'bool' : False, 'message' : error }
+            
+        
+class LDAPapi :
+    def __init__(self):
+        logger.setLevelDebug() 
+        #SFA related config
+        self.senslabauth = Hierarchy()
+        config = Config()
+        
+        self.authname = config.SFA_REGISTRY_ROOT_AUTH
+
+        self.conn =  ldap_co() 
+        self.ldapUserQuotaNFS = self.conn.config.LDAP_USER_QUOTA_NFS 
+        self.ldapUserUidNumberMin = self.conn.config.LDAP_USER_UID_NUMBER_MIN 
+        self.ldapUserGidNumber = self.conn.config.LDAP_USER_GID_NUMBER 
+        self.ldapUserHomePath = self.conn.config.LDAP_USER_HOME_PATH 
+        
+        self.lengthPassword = 8
+        self.baseDN = self.conn.ldapPeopleDN
+        #authinfo=self.senslabauth.get_auth_info(self.authname)
+        
+        
+        self.charsPassword = [ '!','$','(',')','*','+',',','-','.',\
+                                '0','1','2','3','4','5','6','7','8','9',\
+                                'A','B','C','D','E','F','G','H','I','J',\
+                                'K','L','M','N','O','P','Q','R','S','T',\
+                                'U','V','W','X','Y','Z','_','a','b','c',\
+                                'd','e','f','g','h','i','j','k','l','m',\
+                                'n','o','p','q','r','s','t','u','v','w',\
+                                'x','y','z','\'']
+        
+        self.ldapShell = '/bin/bash'
+
+    
+    def generate_login(self, record):
+        """Generate login for adding a new user in LDAP Directory 
+        (four characters minimum length)
+        Record contains first name and last name.
+        
+        """ 
+        #Remove all special characters from first_name/last name
+        lower_first_name = record['first_name'].replace('-','')\
+                                        .replace('_','').replace('[','')\
+                                        .replace(']','').replace(' ','')\
+                                        .lower()
+        lower_last_name = record['last_name'].replace('-','')\
+                                        .replace('_','').replace('[','')\
+                                        .replace(']','').replace(' ','')\
+                                        .lower()  
+        length_last_name = len(lower_last_name)
+        login_max_length = 8
+        
+        #Try generating a unique login based on first name and last name
+        getAttrs = ['uid']
+        if length_last_name >= login_max_length :
+            login = lower_last_name[0:login_max_length]
+            index = 0
+            logger.debug("login : %s index : %s" %(login,index))
+        elif length_last_name >= 4 :
+            login = lower_last_name
+            index = 0
+            logger.debug("login : %s index : %s" %(login,index))
+        elif length_last_name == 3 :
+            login = lower_first_name[0:1] + lower_last_name
+            index = 1
+            logger.debug("login : %s index : %s" %(login,index))
+        elif length_last_name == 2:
+            if len ( lower_first_name) >=2:
+                login = lower_first_name[0:2] + lower_last_name
+                index = 2
+                logger.debug("login : %s index : %s" %(login,index))
+            else:
+                logger.error("LoginException : \
+                            Generation login error with \
+                            minimum four characters")
+            
+                
+        else :
+            logger.error("LDAP generate_login failed : \
+                            impossible to generate unique login for %s %s" \
+                            %(lower_first_name,lower_last_name))
+            
+        login_filter = '(uid=' + login + ')'
+        
+        try :
+            #Check if login already in use
+            while (len(self.LdapSearch(login_filter, getAttrs)) is not 0 ):
+            
+                index += 1
+                if index >= 9:
+                    logger.error("LoginException : Generation login error \
+                                    with minimum four characters")
+                else:
+                    try:
+                        login = lower_first_name[0:index] + \
+                                    lower_last_name[0:login_max_length-index]
+                        login_filter = '(uid='+ login+ ')'
+                    except KeyError:
+                        print "lower_first_name - lower_last_name too short"
+                        
+            logger.debug("LDAP.API \t generate_login login %s" %(login))
+            return login
+                    
+        except  ldap.LDAPError,error :
+            logger.log_exc("LDAP generate_login Error %s" %error)
+            return None
+
+        
+
+    def generate_password(self):
+    
+        """Generate password for adding a new user in LDAP Directory 
+        (8 characters length) return password
+        
+        """
+        password = str()
+        length = len(self.charsPassword)
+        for index in range(self.lengthPassword):
+            char_index = random.randint(0,length-1)
+            password += self.charsPassword[char_index]
+
+        return password
+
+    def encrypt_password(self, password):
+        """ Use passlib library to make a RFC2307 LDAP encrypted password
+        salt size = 8, use sha-1 algorithm. Returns encrypted password.
+        
+        """
+        #Keep consistency with Java Senslab's LDAP API 
+        #RFC2307SSHAPasswordEncryptor so set the salt size to 8 bytres
+        return lssha.encrypt(password,salt_size = 8)
+    
+
+
+    def find_max_uidNumber(self):
+            
+        """Find the LDAP max uidNumber (POSIX uid attribute) .
+        Used when adding a new user in LDAP Directory 
+        returns string  max uidNumber + 1
+        
+        """
+        #First, get all the users in the LDAP
+        getAttrs = "(uidNumber=*)"
+        login_filter = ['uidNumber']
+
+        result_data = self.LdapSearch(getAttrs, login_filter) 
+        #It there is no user in LDAP yet, First LDAP user
+        if result_data == []:
+            max_uidnumber = self.ldapUserUidNumberMin
+        #Otherwise, get the highest uidNumber
+        else:
+            
+            uidNumberList = [int(r[1]['uidNumber'][0])for r in result_data ]
+            logger.debug("LDAPapi.py \tfind_max_uidNumber  \
+                                    uidNumberList %s " %(uidNumberList))
+            max_uidnumber = max(uidNumberList) + 1
+            
+        return str(max_uidnumber)
+         
+         
+    def get_ssh_pkey(self, record):
+        """TODO ; Get ssh public key from sfa record  
+        To be filled by N. Turro ? or using GID pl way?
+        
+        """
+        return 'A REMPLIR '
+
+    def make_ldap_filters_from_record(self, record=None):
+        """TODO Handle OR filtering in the ldap query when 
+        dealing with a list of records instead of doing a for loop in GetPersons   
+        Helper function to make LDAP filter requests out of SFA records.
+        """
+        req_ldap = ''
+        req_ldapdict = {}
+        if record :
+            if 'first_name' in record  and 'last_name' in record:
+                req_ldapdict['cn'] = str(record['first_name'])+" "\
+                                        + str(record['last_name'])
+            if 'email' in record :
+                req_ldapdict['mail'] = record['email']
+            if 'mail' in record:
+                req_ldapdict['mail'] = record['mail']
+            if 'enabled' in record:
+                if record['enabled'] == True :
+                    req_ldapdict['shadowExpire'] = '-1'
+                else:
+                    req_ldapdict['shadowExpire'] = '0'
+                
+            #Hrn should not be part of the filter because the hrn 
+            #presented by a certificate of a SFA user not imported in 
+            #Senslab  does not include the senslab login in it 
+            #Plus, the SFA user may already have an account with senslab
+            #using another login.
+                
+            #if 'hrn' in record :
+                #splited_hrn = record['hrn'].split(".")
+                #if splited_hrn[0] != self.authname :
+                    #logger.warning(" \r\n LDAP.PY \
+                        #make_ldap_filters_from_record I know nothing \
+                        #about %s my authname is %s not %s" \
+                        #%(record['hrn'], self.authname, splited_hrn[0]) )
+                        
+                #login=splited_hrn[1]
+                #req_ldapdict['uid'] = login
+            
+
+            logger.debug("\r\n \t LDAP.PY make_ldap_filters_from_record \
+                                record %s req_ldapdict %s" \
+                                %(record, req_ldapdict))
+            
+            for k in req_ldapdict:
+                req_ldap += '('+ str(k)+ '=' + str(req_ldapdict[k]) + ')'
+            if  len(req_ldapdict.keys()) >1 :
+                req_ldap = req_ldap[:0]+"(&"+req_ldap[0:]
+                size = len(req_ldap)
+                req_ldap = req_ldap[:(size-1)] +')'+ req_ldap[(size-1):]
+        else:
+            req_ldap = "(cn=*)"
+        
+        return req_ldap
+        
+    def make_ldap_attributes_from_record(self, record):
+        """When addind a new user to Senslab's LDAP, creates an attributes 
+        dictionnary from the SFA record.
+        
+        """
+
+        attrs = {}
+        attrs['objectClass'] = ["top", "person", "inetOrgPerson",\
+                                    "organizationalPerson", "posixAccount",\
+                                    "shadowAccount", "systemQuotas",\
+                                    "ldapPublicKey"]
+        
+        attrs['givenName'] = str(record['first_name']).lower().capitalize()
+        attrs['sn'] = str(record['last_name']).lower().capitalize()
+        attrs['cn'] = attrs['givenName'] + ' ' + attrs['sn']
+        attrs['gecos'] = attrs['givenName'] + ' ' + attrs['sn']
+        attrs['uid'] = self.generate_login(record)   
+                    
+        attrs['quota'] = self.ldapUserQuotaNFS 
+        attrs['homeDirectory'] = self.ldapUserHomePath + attrs['uid']
+        attrs['loginShell'] = self.ldapShell
+        attrs['gidNumber'] = self.ldapUserGidNumber
+        attrs['uidNumber'] = self.find_max_uidNumber()
+        attrs['mail'] = record['mail'].lower()
+        try:
+            attrs['sshPublicKey'] = record['pkey']
+        except KeyError:
+            attrs['sshPublicKey'] = self.get_ssh_pkey(record) 
+        
+
+        #Password is automatically generated because SFA user don't go 
+        #through the Senslab website  used to register new users, 
+        #There is no place in SFA where users can enter such information
+        #yet.
+        #If the user wants to set his own password , he must go to the Senslab 
+        #website.
+        password = self.generate_password()
+        attrs['userPassword']= self.encrypt_password(password)
+        
+        #Account automatically validated (no mail request to admins)
+        #Set to 0 to disable the account, -1 to enable it,
+        attrs['shadowExpire'] = '-1'
+
+        #Motivation field in Senslab
+        attrs['description'] = 'SFA USER FROM OUTSIDE SENSLAB'
+
+        attrs['ou'] = 'SFA'         #Optional: organizational unit
+        #No info about those here:
+        attrs['l'] = 'To be defined'#Optional: Locality. 
+        attrs['st'] = 'To be defined' #Optional: state or province (country).
+
+        return attrs
+
+
+
+    def LdapAddUser(self, record) :
+        """Add SFA user to LDAP if it is not in LDAP  yet. """
+        
+        user_ldap_attrs = self.make_ldap_attributes_from_record(record)
+
+        
+        #Check if user already in LDAP wih email, first name and last name
+        filter_by = self.make_ldap_filters_from_record(user_ldap_attrs)
+        user_exist = self.LdapSearch(filter_by)
+        if user_exist:
+            logger.warning(" \r\n \t LDAP LdapAddUser user %s %s already exists" \
+                            %(user_ldap_attrs['sn'],user_ldap_attrs['mail'])) 
+            return {'bool': False}
+        
+        #Bind to the server
+        result = self.conn.connect()
+        
+        if(result['bool']):
+            
+            # A dict to help build the "body" of the object
+            
+            logger.debug(" \r\n \t LDAP LdapAddUser attrs %s " %user_ldap_attrs)
+
+            # The dn of our new entry/object
+            dn = 'uid=' + user_ldap_attrs['uid'] + "," + self.baseDN 
+
+            try:
+                ldif = modlist.addModlist(user_ldap_attrs)
+                logger.debug("LDAPapi.py add attrs %s \r\n  ldif %s"\
+                                %(user_ldap_attrs,ldif) )
+                self.conn.ldapserv.add_s(dn,ldif)
+                
+                logger.info("Adding user %s login %s in LDAP" \
+                        %(user_ldap_attrs['cn'] ,user_ldap_attrs['uid']))
+                        
+                        
+            except ldap.LDAPError, error:
+                logger.log_exc("LDAP Add Error %s" %error)
+                return {'bool' : False, 'message' : error }
+        
+            self.conn.close()
+            return {'bool': True}  
+        else: 
+            return result
+
+        
+    def LdapDelete(self, person_dn):
+        """
+        Deletes a person in LDAP. Uses the dn of the user.
+        """
+        #Connect and bind   
+        result =  self.conn.connect()
+        if(result['bool']):
+            try:
+                self.conn.ldapserv.delete_s(person_dn)
+                self.conn.close()
+                return {'bool': True}
+            
+            except ldap.LDAPError, error:
+                logger.log_exc("LDAP Delete Error %s" %error)
+                return {'bool': False}
+        
+    
+    def LdapDeleteUser(self, record_filter): 
+        """
+        Deletes a SFA person in LDAP, based on the user's hrn.
+        """
+        #Find uid of the  person 
+        person = self.LdapFindUser(record_filter,[])
+        logger.debug("LDAPapi.py \t LdapDeleteUser record %s person %s" \
+        %(record_filter, person))
+
+        if person:
+            dn = 'uid=' + person['uid'] + "," +self.baseDN 
+        else:
+            return {'bool': False}
+        
+        result = self.LdapDelete(dn)
+        return result
+        
+
+    def LdapModify(self, dn, old_attributes_dict, new_attributes_dict): 
+        """ Modifies a LDAP entry """
+         
+        ldif = modlist.modifyModlist(old_attributes_dict,new_attributes_dict)
+        # Connect and bind/authenticate    
+        result = self.conn.connect() 
+        if (result['bool']): 
+            try:
+                self.conn.ldapserv.modify_s(dn,ldif)
+                self.conn.close()
+                return {'bool' : True }
+            except ldap.LDAPError, error:
+                logger.log_exc("LDAP LdapModify Error %s" %error)
+                return {'bool' : False }
+    
+        
+    def LdapModifyUser(self, user_record, new_attributes_dict):
+        """
+        Gets the record from one user_uid_login based on record_filter 
+        and changes the attributes according to the specified new_attributes.
+        Does not use this if we need to modify the uid. Use a ModRDN 
+        #operation instead ( modify relative DN )
+        """
+        if user_record is None:
+            logger.error("LDAP \t LdapModifyUser Need user record  ")
+            return {'bool': False} 
+        
+        #Get all the attributes of the user_uid_login 
+        #person = self.LdapFindUser(record_filter,[])
+        req_ldap = self.make_ldap_filters_from_record(user_record)
+        person_list = self.LdapSearch(req_ldap,[])
+        logger.debug("LDAPapi.py \t LdapModifyUser person_list : %s" %(person_list))
+        if person_list and len(person_list) > 1 :
+            logger.error("LDAP \t LdapModifyUser Too many users returned")
+            return {'bool': False}
+        if person_list is None :
+            logger.error("LDAP \t LdapModifyUser  User %s doesn't exist "\
+                        %(user_record))
+            return {'bool': False} 
+        
+        # The dn of our existing entry/object
+        #One result only from ldapSearch
+        person = person_list[0][1]
+        dn  = 'uid=' + person['uid'][0] + "," +self.baseDN  
+       
+        if new_attributes_dict:
+            old = {}
+            for k in new_attributes_dict:
+                if k not in person:
+                    old[k] =  ''
+                else :
+                    old[k] = person[k]
+            logger.debug(" LDAPapi.py \t LdapModifyUser  new_attributes %s"\
+                                %( new_attributes_dict))  
+            result = self.LdapModify(dn, old,new_attributes_dict)
+            return result
+        else:
+            logger.error("LDAP \t LdapModifyUser  No new attributes given. ")
+            return {'bool': False} 
+            
+            
+            
+            
+    def LdapMarkUserAsDeleted(self, record): 
+
+        
+        new_attrs = {}
+        #Disable account
+        new_attrs['shadowExpire'] = '0'
+        logger.debug(" LDAPapi.py \t LdapMarkUserAsDeleted ")
+        ret = self.LdapModifyUser(record, new_attrs)
+        return ret
+
+            
+    def LdapResetPassword(self,record):
+        """
+        Resets password for the user whose record is the parameter and changes
+        the corresponding entry in the LDAP.
+        
+        """
+        password = self.generate_password()
+        attrs = {}
+        attrs['userPassword'] = self.encrypt_password(password)
+        logger.debug("LDAP LdapResetPassword encrypt_password %s"\
+                    %(attrs['userPassword']))
+        result = self.LdapModifyUser(record, attrs)
+        return result
+        
+        
+    def LdapSearch (self, req_ldap = None, expected_fields = None ):
+        """
+        Used to search directly in LDAP, by using ldap filters and
+        return fields. 
+        When req_ldap is None, returns all the entries in the LDAP.
+      
+        """
+        result = self.conn.connect(bind = False)
+        if (result['bool']) :
+            
+            return_fields_list = []
+            if expected_fields == None : 
+                return_fields_list = ['mail','givenName', 'sn', 'uid', \
+                                        'sshPublicKey', 'shadowExpire']
+            else : 
+                return_fields_list = expected_fields
+            #No specifc request specified, get the whole LDAP    
+            if req_ldap == None:
+                req_ldap = '(cn=*)'
+               
+            logger.debug("LDAP.PY \t LdapSearch  req_ldap %s \
+                                    return_fields_list %s" \
+                                    %(req_ldap, return_fields_list))
+
+            try:
+                msg_id = self.conn.ldapserv.search(
+                                            self.baseDN,ldap.SCOPE_SUBTREE,\
+                                            req_ldap, return_fields_list)     
+                #Get all the results matching the search from ldap in one 
+                #shot (1 value)
+                result_type, result_data = \
+                                        self.conn.ldapserv.result(msg_id,1)
+
+                self.conn.close()
+
+                logger.debug("LDAP.PY \t LdapSearch  result_data %s"\
+                            %(result_data))
+
+                return result_data
+            
+            except  ldap.LDAPError,error :
+                logger.log_exc("LDAP LdapSearch Error %s" %error)
+                return []
+            
+            else:
+                logger.error("LDAP.PY \t Connection Failed" )
+                return 
+        
+    def LdapFindUser(self, record = None, is_user_enabled=None, \
+            expected_fields = None):
+        """
+        Search a SFA user with a hrn. User should be already registered 
+        in Senslab LDAP. 
+        Returns one matching entry 
+        """   
+        custom_record = {}
+        if is_user_enabled: 
+          
+            custom_record['enabled'] = is_user_enabled
+        if record:  
+            custom_record.update(record)
+
+
+        req_ldap = self.make_ldap_filters_from_record(custom_record)     
+        return_fields_list = []
+        if expected_fields == None : 
+            return_fields_list = ['mail','givenName', 'sn', 'uid', \
+                                    'sshPublicKey']
+        else : 
+            return_fields_list = expected_fields
+            
+        result_data = self.LdapSearch(req_ldap, return_fields_list )
+        logger.debug("LDAP.PY \t LdapFindUser  result_data %s" %(result_data))
+           
+        if len(result_data) is 0:
+            return None
+        #Asked for a specific user
+        if record :
+            #try:
+            ldapentry = result_data[0][1]
+            logger.debug("LDAP.PY \t LdapFindUser ldapentry %s" %(ldapentry))
+            tmpname = ldapentry['uid'][0]
+
+            tmpemail = ldapentry['mail'][0]
+            if ldapentry['mail'][0] == "unknown":
+                tmpemail = None
+                    
+            #except IndexError: 
+                #logger.error("LDAP ldapFindHRn : no entry for record %s found"\
+                            #%(record))
+                #return None
+                
+            try:
+                hrn = record['hrn']
+                parent_hrn = get_authority(hrn)
+                peer_authority = None
+                if parent_hrn is not self.authname:
+                    peer_authority = parent_hrn
+
+                results =  {   
+                            'type': 'user',
+                            'pkey': ldapentry['sshPublicKey'][0],
+                            #'uid': ldapentry[1]['uid'][0],
+                            'uid': tmpname ,
+                            'email':tmpemail,
+                            #'email': ldapentry[1]['mail'][0],
+                            'first_name': ldapentry['givenName'][0],
+                            'last_name': ldapentry['sn'][0],
+                            #'phone': 'none',
+                            'serial': 'none',
+                            'authority': parent_hrn,
+                            'peer_authority': peer_authority,
+                            'pointer' : -1,
+                            'hrn': hrn,
+                            }
+            except KeyError,error:
+                logger.log_exc("LDAPapi \t LdaFindUser KEyError %s" \
+                                %error )
+                return
+        else:
+        #Asked for all users in ldap
+            results = []
+            for ldapentry in result_data:
+                logger.debug(" LDAP.py LdapFindUser ldapentry name : %s " \
+                                %(ldapentry[1]['uid'][0]))
+                tmpname = ldapentry[1]['uid'][0]
+                hrn=self.authname+"."+ tmpname
+                
+                tmpemail = ldapentry[1]['mail'][0]
+                if ldapentry[1]['mail'][0] == "unknown":
+                    tmpemail = None
+
+        
+                parent_hrn = get_authority(hrn)
+                parent_auth_info = self.senslabauth.get_auth_info(parent_hrn)
+                try:
+                    results.append(  { 
+                            'type': 'user',
+                            'pkey': ldapentry[1]['sshPublicKey'][0],
+                            #'uid': ldapentry[1]['uid'][0],
+                            'uid': tmpname ,
+                            'email':tmpemail,
+                            #'email': ldapentry[1]['mail'][0],
+                            'first_name': ldapentry[1]['givenName'][0],
+                            'last_name': ldapentry[1]['sn'][0],
+                            #'phone': 'none',
+                            'serial': 'none',
+                            'authority': self.authname,
+                            'peer_authority': '',
+                            'pointer' : -1,
+                            'hrn': hrn,
+                            } ) 
+                except KeyError,error:
+                    logger.log_exc("LDAPapi.PY \t LdapFindUser EXCEPTION %s" %(error))
+                    return
+        return results   
+            
diff --git a/sfa/senslab/OARrestapi.py b/sfa/senslab/OARrestapi.py
new file mode 100644 (file)
index 0000000..e4e499b
--- /dev/null
@@ -0,0 +1,547 @@
+#import sys
+from httplib import HTTPConnection, HTTPException
+import json
+#import datetime
+#from time import gmtime, strftime 
+
+#import urllib
+#import urllib2
+from sfa.util.config import Config
+#from sfa.util.xrn import hrn_to_urn, get_authority, Xrn, get_leaf
+
+from sfa.util.sfalogging import logger
+
+
+OARIP = '194.199.16.161'
+
+OAR_REQUEST_POST_URI_DICT = {'POST_job':{'uri': '/oarapi/jobs.json'},
+                            'DELETE_jobs_id':{'uri':'/oarapi/jobs/id.json'},
+                            }
+
+POST_FORMAT = {'json' : {'content':"application/json", 'object':json},}
+
+#OARpostdatareqfields = {'resource' :"/nodes=", 'command':"sleep", \
+                        #'workdir':"/home/", 'walltime':""}
+
+                       
+
+
+
+class OARrestapi:
+    def __init__(self):
+        self.oarserver = {}
+        self.oarserver['ip'] = OARIP
+        self.oarserver['port'] = 8800
+        self.oarserver['uri'] = None
+        self.oarserver['postformat'] = 'json'
+        #logger.setLevelDebug()
+
+        self.jobstates  = ['Terminated', 'Hold', 'Waiting', 'toLaunch', \
+                            'toError', 'toAckReservation', 'Launching', \
+                            'Finishing', 'Running', 'Suspended', 'Resuming',\
+                            'Error']
+                            
+        self.parser = OARGETParser(self)
+       
+            
+    def GETRequestToOARRestAPI(self, request, strval=None , username = None ): 
+        self.oarserver['uri'] = \
+                            OARGETParser.OARrequests_uri_dict[request]['uri']
+        #Get job details with username                   
+        if 'owner' in OARGETParser.OARrequests_uri_dict[request] and username:
+           self.oarserver['uri'] +=  OARGETParser.OARrequests_uri_dict[request]['owner'] + username
+        headers = {}
+        data = json.dumps({})
+        logger.debug("OARrestapi \tGETRequestToOARRestAPI %s" %(request))
+        if strval:
+            self.oarserver['uri'] = self.oarserver['uri'].\
+                                            replace("id",str(strval))
+            logger.debug("OARrestapi: \t  GETRequestToOARRestAPI  \
+                            self.oarserver['uri'] %s strval %s" \
+                            %(self.oarserver['uri'], strval))
+        if username:
+            headers['X-REMOTE_IDENT'] = username 
+        try :  
+            #seems that it does not work if we don't add this
+            headers['content-length'] = '0' 
+
+            conn = HTTPConnection(self.oarserver['ip'], \
+                                                self.oarserver['port'])
+            conn.request("GET", self.oarserver['uri'], data, headers)
+            resp = ( conn.getresponse()).read()
+            conn.close()
+        except HTTPException, error :
+            logger.log_exc("GET_OAR_SRVR : Problem with OAR server : %s " \
+                                                                    %(error))
+            #raise ServerError("GET_OAR_SRVR : Could not reach OARserver")
+        try:
+            js_dict = json.loads(resp)
+            return js_dict
+        
+        except ValueError, error:
+            logger.log_exc("Failed to parse Server Response: %s ERROR %s"\
+                                                            %(js_dict, error))
+            #raise ServerError("Failed to parse Server Response:" + js)
+
+               
+    def POSTRequestToOARRestAPI(self, request, datadict, username=None):
+        """ Used to post a job on OAR , along with data associated 
+        with the job.
+        
+        """
+
+        #first check that all params for are OK 
+        try:
+            self.oarserver['uri'] = OAR_REQUEST_POST_URI_DICT[request]['uri']
+
+        except KeyError:
+            logger.log_exc("OARrestapi \tPOSTRequestToOARRestAPI request not \
+                             valid")
+            return
+        if datadict and 'strval' in datadict:
+            self.oarserver['uri'] = self.oarserver['uri'].replace("id", \
+                                                str(datadict['strval']))
+            del datadict['strval']
+
+        data = json.dumps(datadict)
+        headers = {'X-REMOTE_IDENT':username, \
+                'content-type': POST_FORMAT['json']['content'], \
+                'content-length':str(len(data))}     
+        try :
+
+            conn = HTTPConnection(self.oarserver['ip'], \
+                                        self.oarserver['port'])
+            conn.request("POST", self.oarserver['uri'], data, headers)
+            resp = (conn.getresponse()).read()
+            conn.close()
+        except NotConnected:
+            logger.log_exc("POSTRequestToOARRestAPI NotConnected ERROR: \
+                            data %s \r\n \t\n \t\t headers %s uri %s" \
+                            %(data,headers,self.oarserver['uri']))
+
+            #raise ServerError("POST_OAR_SRVR : error")
+                
+        try:
+            answer = json.loads(resp)
+            logger.debug("POSTRequestToOARRestAPI : answer %s" %(answer))
+            return answer
+
+        except ValueError, error:
+            logger.log_exc("Failed to parse Server Response: error %s answer \
+                            %s" %(error, answer))
+            #raise ServerError("Failed to parse Server Response:" + answer)
+
+
+
+def AddOarNodeId(tuplelist, value):
+    """ Adds Oar internal node id to the nodes attributes """
+    
+    tuplelist.append(('oar_id', int(value)))
+
+       
+def AddNodeNetworkAddr(dictnode, value):
+    #Inserts new key. The value associated is a tuple list
+    node_id = value
+    
+    dictnode[node_id] = [('node_id', node_id),('hostname', node_id) ]  
+    
+    return node_id 
+        
+def AddNodeSite(tuplelist, value):
+    tuplelist.append(('site', str(value)))
+
+def AddNodeRadio(tuplelist, value):
+    tuplelist.append(('radio', str(value)))    
+
+
+def AddMobility(tuplelist, value): 
+    if value is 0:
+        tuplelist.append(('mobile', 'False'))  
+    else :
+        tuplelist.append(('mobile', 'True'))
+
+def AddPosX(tuplelist, value):
+    tuplelist.append(('posx', value))  
+
+def AddPosY(tuplelist, value):
+    tuplelist.append(('posy', value))  
+    
+def AddPosZ(tuplelist, value):
+    tuplelist.append(('posz', value))
+       
+def AddBootState(tuplelist, value):
+    tuplelist.append(('boot_state', str(value)))
+            
+#Insert a new node into the dictnode dictionary
+def AddNodeId(dictnode, value):
+    #Inserts new key. The value associated is a tuple list
+    node_id = int(value)
+    
+    dictnode[node_id] = [('node_id', node_id)] 
+    return node_id 
+
+def AddHardwareType(tuplelist, value):
+    value_list = value.split(':')
+    tuplelist.append(('archi', value_list[0])) 
+    tuplelist.append(('radio', value_list[1]))
+    
+                       
+class OARGETParser:
+    resources_fulljson_dict = {
+        'network_address' : AddNodeNetworkAddr,
+        'site': AddNodeSite, 
+        'radio': AddNodeRadio,
+        'mobile': AddMobility,
+        'x': AddPosX,
+        'y': AddPosY,
+        'z':AddPosZ,
+        'archi':AddHardwareType, 
+        'state':AddBootState,
+        'id' : AddOarNodeId,
+        }
+        
+    def __init__(self, srv) :
+        self.version_json_dict = { 
+            'api_version' : None , 'apilib_version' :None,\
+            'api_timezone': None, 'api_timestamp': None, 'oar_version': None ,}
+        self.config = Config()
+        self.interface_hrn = self.config.SFA_INTERFACE_HRN     
+        self.timezone_json_dict = { 
+            'timezone': None, 'api_timestamp': None, }
+        self.jobs_json_dict = {
+            'total' : None, 'links' : [],\
+            'offset':None , 'items' : [], }
+        self.jobs_table_json_dict = self.jobs_json_dict
+        self.jobs_details_json_dict = self.jobs_json_dict              
+        self.server = srv
+        self.node_dictlist = {}
+        self.raw_json = None
+        self.site_dict = {}
+        self.SendRequest("GET_version")
+        
+        
+
+
+    
+    def ParseVersion(self) : 
+        #print self.raw_json
+        #print >>sys.stderr, self.raw_json
+        if 'oar_version' in self.raw_json :
+            self.version_json_dict.update(api_version = \
+                                                self.raw_json['api_version'], 
+                            apilib_version = self.raw_json['apilib_version'], 
+                            api_timezone = self.raw_json['api_timezone'], 
+                            api_timestamp = self.raw_json['api_timestamp'], 
+                            oar_version = self.raw_json['oar_version'] )
+        else :
+            self.version_json_dict.update(api_version = self.raw_json['api'] ,
+                            apilib_version = self.raw_json['apilib'],
+                            api_timezone = self.raw_json['api_timezone'],
+                            api_timestamp = self.raw_json['api_timestamp'],
+                            oar_version = self.raw_json['oar'] )
+                                
+        print self.version_json_dict['apilib_version']
+        
+            
+    def ParseTimezone(self) : 
+        api_timestamp = self.raw_json['api_timestamp']
+        api_tz = self.raw_json['timezone']
+        return api_timestamp, api_tz
+            
+    def ParseJobs(self) :
+        self.jobs_list = []
+        print " ParseJobs "
+        return self.raw_json
+            
+    def ParseJobsTable(self) : 
+        print "ParseJobsTable"
+                
+    def ParseJobsDetails (self):
+        # currently, this function is not used a lot, 
+        #so i have no idea what be usefull to parse, 
+        #returning the full json. NT
+        #logger.debug("ParseJobsDetails %s " %(self.raw_json))
+        return self.raw_json
+        
+
+    def ParseJobsIds(self):
+        
+        job_resources = ['wanted_resources', 'name', 'id', 'start_time', \
+                        'state','owner','walltime','message']
+        
+        
+        job_resources_full = ['launching_directory', 'links', \
+            'resubmit_job_id', 'owner', 'events', 'message', \
+            'scheduled_start', 'id', 'array_id',  'exit_code', \
+            'properties', 'state','array_index', 'walltime', \
+            'type', 'initial_request', 'stop_time', 'project',\
+            'start_time',  'dependencies','api_timestamp','submission_time', \
+            'reservation', 'stdout_file', 'types', 'cpuset_name', \
+            'name',  'wanted_resources','queue','stderr_file','command']
+
+
+        job_info = self.raw_json
+        #logger.debug("OARESTAPI ParseJobsIds %s" %(self.raw_json))
+        values = []
+        try:
+            for k in job_resources:
+                values.append(job_info[k])
+            return dict(zip(job_resources, values))
+            
+        except KeyError:
+            logger.log_exc("ParseJobsIds KeyError ")
+            
+
+    def ParseJobsIdResources(self):
+        """ Parses the json produced by the request 
+        /oarapi/jobs/id/resources.json.
+        Returns a list of oar node ids that are scheduled for the 
+        given job id.
+        
+        """
+        job_resources = []
+        for resource in self.raw_json['items']:
+            job_resources.append(resource['id'])
+            
+        #logger.debug("OARESTAPI \tParseJobsIdResources %s" %(self.raw_json))
+        return job_resources
+            
+    def ParseResources(self) :
+        """ Parses the json produced by a get_resources request on oar."""
+        
+        #logger.debug("OARESTAPI \tParseResources " )
+        #resources are listed inside the 'items' list from the json
+        self.raw_json = self.raw_json['items']
+        self.ParseNodes()
+
+    def ParseReservedNodes(self):
+        """  Returns an array containing the list of the reserved nodes """
+    
+        #resources are listed inside the 'items' list from the json
+        reservation_list = [] 
+        print "ParseReservedNodes_%s" %(self.raw_json['items'])
+        job = {}
+        #Parse resources info
+        for json_element in  self.raw_json['items']:
+            #In case it is a real reservation (not asap case)
+            if json_element['scheduled_start']:
+                job['t_from'] = json_element['scheduled_start']
+                job['t_until'] = int(json_element['scheduled_start']) + \
+                                                       int(json_element['walltime'])
+                #Get resources id list for the job
+                job['resource_ids'] = \
+                    [ node_dict['id'] for node_dict in json_element['resources'] ]
+            else:
+                job['t_from'] = "As soon as possible"
+                job['t_until'] = "As soon as possible"
+                job['resource_ids'] = ["Undefined"]
+                
+           
+            job['state'] = json_element['state'] 
+            job['lease_id'] = json_element['id'] 
+            
+            
+            job['user'] = json_element['owner']
+            #logger.debug("OARRestapi \tParseReservedNodes job %s" %(job))  
+            reservation_list.append(job)
+            #reset dict
+            job = {}
+        return reservation_list
+    
+    def ParseRunningJobs(self): 
+        """ Gets the list of nodes currently in use from the attributes of the
+        running jobs.
+        
+        """
+        logger.debug("OARESTAPI \tParseRunningJobs__________________________ ") 
+        #resources are listed inside the 'items' list from the json
+        nodes = []
+        for job in  self.raw_json['items']:
+            for node in job['nodes']:
+                nodes.append(node['network_address'])
+        return nodes
+       
+        
+        
+    def ParseDeleteJobs(self):
+        """ No need to parse anything in this function.A POST 
+        is done to delete the job.
+        
+        """
+        return  
+            
+    def ParseResourcesFull(self) :
+        """ This method is responsible for parsing all the attributes 
+        of all the nodes returned by OAR when issuing a get resources full.
+        The information from the nodes and the sites are separated.
+        Updates the node_dictlist so that the dictionnary of the platform's 
+        nodes is available afterwards. 
+        
+        """
+        logger.debug("OARRESTAPI ParseResourcesFull________________________ ")
+        #print self.raw_json[1]
+        #resources are listed inside the 'items' list from the json
+        if self.version_json_dict['apilib_version'] != "0.2.10" :
+            self.raw_json = self.raw_json['items']
+        self.ParseNodes()
+        self.ParseSites()
+        return self.node_dictlist
+        
+    def ParseResourcesFullSites(self) :
+        """ UNUSED. Originally used to get information from the sites.
+        ParseResourcesFull is used instead.
+        
+        """
+        if self.version_json_dict['apilib_version'] != "0.2.10" :
+            self.raw_json = self.raw_json['items']
+        self.ParseNodes()
+        self.ParseSites()
+        return self.site_dict
+        
+
+   
+    def ParseNodes(self): 
+        """ Parse nodes properties from OAR
+        Put them into a dictionary with key = node id and value is a dictionary 
+        of the node properties and properties'values.
+         
+        """
+        node_id = None
+        keys = self.resources_fulljson_dict.keys()
+        keys.sort()
+
+        for dictline in self.raw_json:
+            node_id = None 
+            # dictionary is empty and/or a new node has to be inserted  
+            node_id = self.resources_fulljson_dict['network_address'](\
+                                self.node_dictlist, dictline['network_address']) 
+            for k in keys:
+                if k in dictline:
+                    if k == 'network_address':
+                        continue
+                 
+                    self.resources_fulljson_dict[k](\
+                                    self.node_dictlist[node_id], dictline[k])
+
+            #The last property has been inserted in the property tuple list, 
+            #reset node_id 
+            #Turn the property tuple list (=dict value) into a dictionary
+            self.node_dictlist[node_id] = dict(self.node_dictlist[node_id])
+            node_id = None
+                    
+    def slab_hostname_to_hrn(self, root_auth,  hostname):             
+        return root_auth + '.'+ hostname 
+
+                             
+
+    def ParseSites(self):
+        """ Returns a list of dictionnaries containing the sites' attributes."""
+        
+        nodes_per_site = {}
+        config = Config()
+        #logger.debug(" OARrestapi.py \tParseSites  self.node_dictlist %s"\
+                                                        #%(self.node_dictlist))
+        # Create a list of nodes per site_id
+        for node_id in self.node_dictlist:
+            node  = self.node_dictlist[node_id]
+            
+            if node['site'] not in nodes_per_site:
+                nodes_per_site[node['site']] = []
+                nodes_per_site[node['site']].append(node['node_id'])
+            else:
+                if node['node_id'] not in nodes_per_site[node['site']]:
+                    nodes_per_site[node['site']].append(node['node_id'])
+                        
+        #Create a site dictionary whose key is site_login_base (name of the site)
+        # and value is a dictionary of properties, including the list 
+        #of the node_ids
+        for node_id in self.node_dictlist:
+            node  = self.node_dictlist[node_id]
+            #node.update({'hrn':self.slab_hostname_to_hrn(self.interface_hrn, \
+                                            #node['site'],node['hostname'])})
+            node.update({'hrn':self.slab_hostname_to_hrn(self.interface_hrn, node['hostname'])})
+            self.node_dictlist.update({node_id:node})
+
+            if node['site'] not in self.site_dict:
+                self.site_dict[node['site']] = {
+                    'site':node['site'],
+                    'node_ids':nodes_per_site[node['site']],
+                    'latitude':"48.83726",
+                    'longitude':"- 2.10336",'name':config.SFA_REGISTRY_ROOT_AUTH,
+                    'pcu_ids':[], 'max_slices':None, 'ext_consortium_id':None,
+                    'max_slivers':None, 'is_public':True, 'peer_site_id': None,
+                    'abbreviated_name':"senslab", 'address_ids': [],
+                    'url':"http,//www.senslab.info", 'person_ids':[],
+                    'site_tag_ids':[], 'enabled': True,  'slice_ids':[],
+                    'date_created': None, 'peer_id': None }     
+            #if node['site_login_base'] not in self.site_dict.keys():
+                #self.site_dict[node['site_login_base']] = {'login_base':node['site_login_base'],
+                                                        #'node_ids':nodes_per_site[node['site_login_base']],
+                                                        #'latitude':"48.83726",
+                                                        #'longitude':"- 2.10336",'name':"senslab",
+                                                        #'pcu_ids':[], 'max_slices':None, 'ext_consortium_id':None,
+                                                        #'max_slivers':None, 'is_public':True, 'peer_site_id': None,
+                                                        #'abbreviated_name':"senslab", 'address_ids': [],
+                                                        #'url':"http,//www.senslab.info", 'person_ids':[],
+                                                        #'site_tag_ids':[], 'enabled': True,  'slice_ids':[],
+                                                        #'date_created': None, 'peer_id': None } 
+
+                        
+
+
+    OARrequests_uri_dict = { 
+        'GET_version': 
+                {'uri':'/oarapi/version.json', 'parse_func': ParseVersion},
+        'GET_timezone':
+                {'uri':'/oarapi/timezone.json' ,'parse_func': ParseTimezone },
+        'GET_jobs': 
+                {'uri':'/oarapi/jobs.json','parse_func': ParseJobs},
+        'GET_jobs_id': 
+                {'uri':'/oarapi/jobs/id.json','parse_func': ParseJobsIds},
+        'GET_jobs_id_resources': 
+                {'uri':'/oarapi/jobs/id/resources.json',\
+                'parse_func': ParseJobsIdResources},
+        'GET_jobs_table': 
+                {'uri':'/oarapi/jobs/table.json','parse_func': ParseJobsTable},
+        'GET_jobs_details': 
+                {'uri':'/oarapi/jobs/details.json',\
+                'parse_func': ParseJobsDetails},
+        'GET_reserved_nodes':
+                {'uri':
+                '/oarapi/jobs/details.json?state=Running,Waiting,Launching',\
+                'owner':'&user=',
+                'parse_func':ParseReservedNodes},
+
+                
+        'GET_running_jobs':  
+                {'uri':'/oarapi/jobs/details.json?state=Running',\
+                'parse_func':ParseRunningJobs},
+        'GET_resources_full': 
+                {'uri':'/oarapi/resources/full.json',\
+                'parse_func': ParseResourcesFull},
+        'GET_sites':
+                {'uri':'/oarapi/resources/full.json',\
+                'parse_func': ParseResourcesFullSites},
+        'GET_resources':
+                {'uri':'/oarapi/resources.json' ,'parse_func': ParseResources},
+        'DELETE_jobs_id':
+                {'uri':'/oarapi/jobs/id.json' ,'parse_func': ParseDeleteJobs}
+        }
+
+
+
+    def SendRequest(self, request, strval = None , username = None):
+        """ Connects to OAR , sends the valid GET requests and uses
+        the appropriate json parsing functions.
+        
+        """
+        if request in self.OARrequests_uri_dict :
+            self.raw_json = self.server.GETRequestToOARRestAPI(request, \
+                                                                strval, \
+                                                                username) 
+            return self.OARrequests_uri_dict[request]['parse_func'](self)
+        else:
+            logger.error("OARRESTAPI OARGetParse __init__ : ERROR_REQUEST " \
+                                                                 %(request))
+            
diff --git a/sfa/senslab/TestSuite.py b/sfa/senslab/TestSuite.py
new file mode 100644 (file)
index 0000000..4209409
--- /dev/null
@@ -0,0 +1,263 @@
+###########################################################################
+#    Copyright (C) 2012 by                                       
+#    <savakian@sfa2.grenoble.senslab.info>                                                             
+#
+# Copyright: See COPYING file that comes with this distribution
+#
+###########################################################################
+#LDAP import 
+from sfa.senslab.LDAPapi import *
+import ldap.modlist as modlist
+import ldap as L
+
+#logger sfa
+from sfa.util.sfalogging import logger
+
+#OAR imports
+from datetime import datetime
+from dateutil import tz 
+from time import strftime,gmtime
+from sfa.senslab.OARrestapi import OARrestapi
+
+#Test slabdriver
+from sfa.senslab.slabdriver import SlabDriver
+from sfa.util.config import Config
+
+import sys
+
+
+        
+def parse_options():
+    
+    #arguments supplied
+    if len(sys.argv) > 1 :
+        options_list = sys.argv[1:]
+        #For each valid option, execute the associated function
+        #(defined in the dictionnary supported_options)
+        job_id = 1
+        valid_options_dict = {}
+        value_list = []
+        #Passing options to the script should be done like this :
+        #-10 OAR -2 SlabDriver
+        for option in options_list:
+            if option in supported_options:
+                #update the values used for the fonctions associated 
+                #with the options
+                
+                valid_options_dict[option] = value_list
+                #empty the values list for next option
+                value_list = []
+                print valid_options_dict
+            else:
+                if option[0] == '-':
+                    value_list.append(option[1:])
+                    print "value_list",value_list
+
+
+    return valid_options_dict     
+    
+def TestLdap():
+    logger.setLevelDebug()
+
+    ldap = LDAPapi()
+    ret = ldap.conn.connect(bind=True)
+    ldap.conn.close() 
+    print "TEST ldap.conn.connect(bind=True)" , ret
+    
+    ret = ldap.conn.connect(bind=False)
+    ldap.conn.close()
+    print "TEST ldap.conn.connect(bind=False)", ret
+
+
+    ret = ldap.LdapSearch()
+    print "TEST ldap.LdapSearch ALL",ret
+    
+    ret = ldap.LdapSearch('(uid=avakian)', [])
+    print "\r\n TEST ldap.LdapSearch ids = avakian",ret
+
+
+    password = ldap.generate_password()
+    print "\r\n TEST generate_password ",password 
+    
+    maxi = ldap.find_max_uidNumber()
+    print "\r\n TEST find_max_uidNumber " , maxi
+
+    data = {}
+    data['last_name'] = "Drake"
+    data['first_name']="Tim"
+    data['givenName']= data['first_name']
+    data['mail'] = "robin@arkham.fr"
+    
+    record={}
+    record['hrn'] = 'senslab2.drake'
+    record['last_name'] = "Drake"
+    record['first_name']="Tim"
+    record['mail'] = "robin@arkham.fr"
+    
+    datanight = {}
+    datanight['last_name'] = "Grayson"
+    datanight['first_name']="Dick"
+    datanight['givenName']= datanight['first_name']
+    datanight['mail'] = "nightwing@arkham.fr"
+    
+    
+    record_night = {}
+    record_night['hrn'] = 'senslab2.grayson'
+    record_night['last_name'] = datanight['last_name']
+    record_night['first_name'] = datanight['first_name']
+    record_night['mail'] = datanight['mail']
+    
+    login = ldap.generate_login(data)
+    print "\r\n Robin \tgenerate_login  ", ret 
+    
+    ret = ldap.LdapAddUser(data)
+    print "\r\n Robin  \tLdapAddUser ", ret 
+
+    req_ldap = '(uid=' + login + ')'
+    ret = ldap.LdapSearch(req_ldap, [])
+    print "\r\n Robin \tldap.LdapSearch ids = %s %s"%(login,ret )
+    
+    password = "Thridrobin"
+    enc = ldap.encrypt_password(password)
+    print "\r\n Robin \tencrypt_password ", enc
+    
+    ret = ldap.LdapModifyUser(record, {'userPassword':enc})
+    print "\r\n Robin \tChange password LdapModifyUser ", ret 
+    
+    dn = 'uid=' + login + ',' + ldap.baseDN
+    ret = ldap.LdapDelete(dn)
+    print "\r\n Robin  \tLdapDelete ", ret 
+    
+    ret = ldap.LdapFindUser(record_night)
+    print "\r\n Nightwing \tldap.LdapFindHrn %s : %s"%(record_night,ret)
+    
+    ret = ldap.LdapSearch('(uid=grayson)', [])
+    print "\r\n Nightwing \tldap.LdapSearch ids = %s %s"%('grayson',ret )
+
+    ret = ldap.LdapAddUser(datanight)
+    print "\r\n Nightwing \tLdapAddUser ", ret 
+    
+    ret = ldap.LdapResetPassword(record_night)
+    print "\r\n Nightwing  \tLdapResetPassword de %s : %s "%(record_night,ret)
+    
+    ret = ldap.LdapDeleteUser(record_night)
+    print "\r\n Nightwing   \tLdapDeleteUser ", ret 
+    
+    
+    record_avakian = {}
+    record_avakian['last_name'] = 'avakian'
+    record_avakian['first_name'] = 'sandrine'
+    record_avakian['email'] = 'sandrine.avakian@inria.fr'
+    pubkey = "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAwSUkJ+cr3xM47h8lFkIXJoJhg4wHakTaLJmgTXkzvUmQsQeFB2MjUZ6WAelMXj/EFz2+XkK+bcWNXwfbrLptJQ+XwGpPZlu9YV/kzO63ghVrAyEg0+p7Pn1TO9f1ZYg4R6JfP/3qwH1AsE+X3PNpIewsuEIKwd2wUCJDf5RXJTpl39GizcBFemrRqgs0bdqAN/vUT9YvtWn8fCYR5EfJHVXOK8P1KmnbuGZpk7ryz21pDMlgw13+8aYB+LPkxdv5zG54A5c6o9N3zOCblvRFWaNBqathS8y04cOYWPmyu+Q0Xccwi7vM3Ktm8RoJw+raQNwsmneJOm6KXKnjoOQeiQ== savakian@sfa2.grenoble.senslab.info"
+    
+    ret = ldap.LdapModifyUser(record_avakian, {'sshPublicKey':pubkey})
+    print "\r\n Sandrine \tChange pubkey LdapModifyUser ", ret 
+    
+    password = "ReptileFight"
+    enc = ldap.encrypt_password(password)
+    print "\r\n sandrine \tencrypt_password ", enc
+    
+    ret = ldap.LdapModifyUser(record_avakian, {'userPassword':enc})
+    print "\r\n sandrine \tChange password LdapModifyUser ", ret 
+    return
+
+
+def get_stuff(oar, uri):
+    import httplib
+    import json    
+    headers = {}
+    data = json.dumps({})   
+  
+    headers['X-REMOTE_IDENT'] = 'avakian' 
+      
+    headers['content-length'] = '0' #seems that it does not work if we don't add this
+            
+
+    conn = httplib.HTTPConnection(oar.oarserver['ip'],oar.oarserver['port'])
+    conn.request("GET",uri,data , headers )
+    resp = ( conn.getresponse()).read()
+            #logger.debug("OARrestapi: \t  GETRequestToOARRestAPI  resp %s" %( resp))
+    conn.close()
+      
+
+    js = json.loads(resp)
+    return js
+            
+
+
+  
+def TestOAR(job_id = None):
+    
+    if isinstance(job_id,list) and len(job_id) == 1:
+       job_id = job_id[0]
+        
+    oar = OARrestapi()
+    jobs = oar.parser.SendRequest("GET_reserved_nodes") 
+    print "\r\n OAR GET_reserved_nodes ",jobs
+    
+   
+    
+    jobs = oar.parser.SendRequest("GET_jobs") 
+    print "\r\n OAR GET_jobs ",jobs
+    
+    jobs = oar.parser.SendRequest("GET_jobs_id", job_id, 'avakian')
+    print "\r\n OAR  GET_jobs_id ",jobs
+    
+    uri = '/oarapi/jobs/details.json?state=Running,Waiting,Launching&user=avakian'      
+    raw_json = get_stuff(oar,uri)
+    print "\r\nOAR ", uri, raw_json, "\r\n KKK \t",raw_json.keys()
+    
+    uri = '/oarapi/jobs/' + job_id +'.json'
+    raw_json = get_stuff(oar,uri)  
+    print "\r\n OAR  ",uri,raw_json, "\r\n KKK \t",raw_json.keys()
+    
+    uri = '/oarapi/jobs/' + job_id + '/resources.json'
+    raw_json = get_stuff(oar,uri)
+    print "\r\n OAR  ",uri, raw_json, "\r\n KKK \t",raw_json.keys()
+    
+    time_format = "%Y-%m-%d %H:%M:%S"
+   
+    server_timestamp,server_tz = oar.parser.SendRequest("GET_timezone")
+    
+    print "\r\n OAR  GetTimezone ",server_timestamp, server_tz
+    print(datetime.fromtimestamp(int(server_timestamp)).strftime('%Y-%m-%d %H:%M:%S'))
+
+    uri = '/oarapi/resources/full.json'
+    raw_json = get_stuff(oar,uri)
+    print "\r\n OAR  ",uri, raw_json, "\r\n KKK \t",raw_json.keys()
+    
+   
+    return
+    
+def TestSlabDriver(job_id):
+    if isinstance(job_id,list) and len(job_id) == 1:
+       job_id = job_id[0]
+    slabdriver = SlabDriver(Config())
+    l = slabdriver.GetSlices(slice_filter = 'senslab2.avakian_slice', slice_filter_type = 'slice_hrn')
+    print l
+    l = slabdriver.GetSlices(slice_filter = '29', slice_filter_type = 'record_id_user')
+    print l
+    #slabdriver.DeleteJobs(job_id,'senslab2.avakian_slice')
+   
+def RunAll():
+    TestLdap()
+    TestOAR()
+    
+   
+supported_options = {
+        'OAR' : TestOAR,
+        'LDAP': TestLdap,
+        'driver': TestSlabDriver,
+        'all' : RunAll }
+        
+def main():
+    opts = parse_options()
+    for opt in opts:
+        supported_options[opt](opts[opt])
+        
+    #TestLdap()
+    #TestOAR()
+    
+if __name__ == "__main__":
+    main()    
\ No newline at end of file
diff --git a/sfa/senslab/__init__.py b/sfa/senslab/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sfa/senslab/config/bash_nukem b/sfa/senslab/config/bash_nukem
new file mode 100755 (executable)
index 0000000..b1a56d5
--- /dev/null
@@ -0,0 +1,79 @@
+#!/bin/bash 
+
+# Configuration first : set the local repository
+# where the code can be found
+# Test number of arguments 
+if (( ! $# == 2 ))
+then
+    echo " Usage : bash_nukem repository_directory vm (should be senslab or senslab2)"
+    echo  $#
+    exit
+fi
+
+# Check if  directory exists
+if [ -d $1 ]
+then
+    git_local_repo=$1
+    echo "RESPOSITORY: "  $git_local_repo
+   
+fi
+
+# Set which vm we are working on (sfa-vm or sfa-vm2)
+if [[ $2 = "senslab" || $2 = "senslab2" ]]
+then
+    vm=$2
+    echo $vm
+else
+    echo "Vm options should be senslab or senslab2, not " $2
+    exit 
+fi
+
+# Nuke the database 
+sudo sfaadmin.py registry nuke
+
+# Drop table in slab_sfa
+# to avoid duplicates.
+
+psql -d slab_sfa -U sfa -W -q -c "drop table slice_senslab;"
+
+
+# ATTENTION :Save the config file /etc/sfa/sfa_config
+# before continuing
+
+# Remove all the remaining gid, creds files
+# of the server
+sudo rm -rf /var/lib/sfa
+cd /etc/sfa
+sudo rm -rf *
+sudo service sfa restart
+
+# Put back the config file that you saved before
+cd $git_local_repo
+sudo make clean
+make
+sudo python setup.py install
+# sudo service sfa restart
+
+# Wrote /etc/sfa/configs/site.xml
+# Merged
+#         /etc/sfa/default_config.xml
+# and     /etc/sfa/configs/site.xml
+# into    /etc/sfa/sfa_config.xml
+# sudo sfa-config-tty
+sudo cp $git_local_repo/sfa/senslab/config/$vm/sfa_config /etc/sfa/sfa_config
+sudo cp $git_local_repo/sfa/senslab/config/$vm/sfa_config.xml /etc/sfa/sfa_config.xml
+sudo cp $git_local_repo/sfa/senslab/config/$vm/site.xml  /etc/sfa/site.xml
+# sudo ln -s ldap_config.py  /etc/sfa/ldap_config.py
+sudo cp $git_local_repo/sfa/senslab/config/ldap_config.py  /etc/sfa/ldap_config.py 
+sudo service sfa restart
+
+# User stuff : clean your folder
+cd  ~/.sfi
+rm *.sscert *.cred *.gid sfi_cache.dat
+cd ~
+
+# Import the datbase form ldap
+sudo sfaadmin.py registry import_registry
+sudo service sfa restart
+
+sudo rm -rf /var/lib/sfa/authorities/plc
\ No newline at end of file
diff --git a/sfa/senslab/config/senslab/default_config.xml b/sfa/senslab/config/senslab/default_config.xml
new file mode 100644 (file)
index 0000000..b08f738
--- /dev/null
@@ -0,0 +1,324 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+Default SFA configuration file
+
+Thierry Parmentelat 
+
+-->
+
+<!DOCTYPE configuration PUBLIC "-//PlanetLab Central//DTD PLC configuration//EN" "plc_config.dtd">
+
+<configuration>
+  <variables>
+
+    <!-- ======================================== -->
+    <category id="sfa">
+      <name>General</name>
+      <description>Basic system variables.</description>
+
+      <variablelist>
+        <variable id="generic_flavour" type="string">
+          <name>Generic Flavour</name>
+          <value>pl</value>
+          <description>This string refers to a class located in sfa.generic that describes 
+          which specific implementation needs to be used for api, manager and driver objects.
+          PlanetLab users do not need to change this setting.
+          </description>
+        </variable>
+
+        <variable id="interface_hrn" type="string">
+          <name>Human readable name</name>
+          <value>plc</value>
+          <description>The human readable name for this interface.</description>
+        </variable>
+
+        <variable id="credential_schema" type="string">
+          <name>Credential Schema</name>
+          <value>/etc/sfa/credential.xsd</value>
+          <description>The path to the default credential schema</description>
+        </variable>
+
+        <variable id="api_loglevel" type="int">
+          <name>Debug</name>
+          <value>0</value>
+          <description>Logging level; 0=minimum, 1=info, 2=debug</description>
+        </variable>
+    
+        <variable id="max_slice_renew" type="int">
+          <name>Max Slice Renew</name>
+          <value>60</value>
+          <description>Maximum amout of days a user can extend/renew their slices to</description>
+        </variable>
+
+        <variable id="session_key_path" type="string">
+            <name>User Session Keys Path </name>
+            <value>/var/lib/sfa/session_keys</value>
+            <description>Some services will peform operations on behalf of a user, but make
+            it look like the user is the one performing the operation. Doing this requires a 
+            valid key pair and credential for the user. This option defines the path where 
+            key pairs and credentials are generated and stored.
+            This functionality is used by the SFA web GUI. 
+            </description> 
+        </variable>
+
+      </variablelist>
+    </category>
+
+    <!-- ======================================== -->
+    <category id="sfa_registry">
+      <name>Registry</name>
+      <description>The settings that affect the registry that will run
+      as part of this SFA instance.</description>
+
+      <variablelist>
+       <variable id="enabled" type="boolean">
+         <name>Enable Registry</name>
+         <value>true</value>
+         <description>Allows this local SFA instance to run as a
+         registry.</description>
+       </variable>
+
+       <variable id="host" type="hostname">
+         <name>Hostname</name>
+         <value>localhost</value>
+         <description>The hostname where the registry is expected to
+         be found; using localhost when the local registry is enabled
+         seems reasonable.</description>
+       </variable>
+
+       <variable id="port" type="int">
+         <name>Port number</name>
+         <value>12345</value>
+         <description>The port where the registry is to be found.</description>
+       </variable>
+
+       <variable id="root_auth" type="string">
+         <name>Root Authority</name>
+         <value>plc</value>
+         <description>The hrn of the registry's root auth.</description>
+       </variable>
+
+    </variablelist>
+    </category>
+
+    <!-- ======================================== -->
+    <category id="sfa_sm">
+      <name>Slice Manager</name>
+      <description>The settings that affect the slice manager that will run
+      as part of this SFA instance.</description>
+
+      <variablelist>
+       <variable id="enabled" type="boolean">
+         <name>Enable Slice Manager</name>
+         <value>true</value>
+         <description>Allows this local SFA instance to run as a
+         slice manager.</description>
+       </variable>
+
+       <variable id="host" type="hostname">
+         <name>Hostname</name>
+         <value>localhost</value>
+         <description>The hostname where the slice manager is expected to
+         be found.</description>
+       </variable>
+
+       <variable id="port" type="int">
+         <name>Port number</name>
+         <value>12347</value>
+         <description>The port where the slice manager is to be found.</description>
+       </variable>
+
+       <variable id="caching" type="boolean">
+         <name>Cache advertisement rspec</name>
+         <value>false</value>
+         <description>Enable caching of the global advertisement, as
+         returned by ListResources without a slice argument. </description>
+         </variable>
+
+      </variablelist>
+    </category>
+
+    <!-- ======================================== -->
+    <category id="sfa_aggregate">
+      <name>Aggregate</name>
+      <description>The settings that affect the aggregate manager that will run
+      as part of this SFA instance.</description>
+
+      <variablelist>
+       <variable id="enabled" type="boolean">
+         <name>Enable Aggregate</name>
+         <value>true</value>
+         <description>Allows this local SFA instance to run as an
+         aggregate manager.</description>
+       </variable>
+
+       <variable id="host" type="hostname">
+         <name>Hostname</name>
+         <value>localhost</value>
+         <description>The hostname where the aggregate is expected to
+         be found.</description>
+       </variable>
+
+       <variable id="port" type="int">
+         <name>Port number</name>
+         <value>12346</value>
+         <description>The port where the aggregate is to be found.</description>
+       </variable>
+
+       <variable id="caching" type="boolean">
+         <name>Cache advertisement rspec</name>
+         <value>true</value>
+         <description>Enable caching of the global advertisement, as
+         returned by ListResources without a slice argument. </description>
+         </variable>
+
+      </variablelist>
+
+    </category>
+
+    <!-- ======================================== -->
+    <category id="sfa_db">
+      <name></name>
+      <description>The settings that tell this SFA instance where to find its database. You can essentially leave this as-is unless you plan on hosting your data on some other box.</description>
+
+      <variablelist>
+       <variable id="enabled" type="boolean">
+         <name>Enabled</name>
+         <value>true</value>
+         <description>Enable the database server on this machine.</description>
+       </variable>
+
+       <variable id="host" type="hostname">
+         <name>Database host</name>
+         <value>localhost</value>
+         <description>The host where the SFA database can be reached.</description>
+       </variable>
+
+       <variable id="port" type="int">
+         <name>Database port</name>
+         <value>5432</value>
+         <description>The port where the SFA database can be reached.</description>
+       </variable>
+
+       <variable id="user" type="string">
+         <name>Database user</name>
+         <value>sfadbuser</value>
+         <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+       </variable>
+
+       <variable id="password" type="string">
+         <name>Database password</name>
+         <value></value>
+         <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+       </variable>
+
+       <variable id="name" type="string">
+         <name>Database name</name>
+         <value>sfa</value>
+         <description>SFA database name.</description>
+       </variable>
+
+
+      </variablelist>
+    </category>
+
+    <!-- ======================================== -->
+    <category id="sfa_flashpolicy">
+      <name>SFA Flash Policy</name>
+      <description>The settings that affect the flash policy server that will run
+      as part of this SFA instance.</description>
+
+      <variablelist>
+        <variable id="enabled" type="boolean">
+          <name>Enable Flash Policy Server</name>
+          <value>false</value>
+          <description>Allows this local SFA instance to run a
+          flash policy server.</description>
+        </variable>
+        <variable id="config_file" type="string">
+          <name>Flash policy config file</name>
+          <value>/etc/sfa/sfa_flashpolicy_config.xml</value>
+          <description>The path to where the flash policy config file can be reached.</description>
+        </variable>
+        <variable id="port" type="int">
+          <name>Flash policy port</name>
+          <value>843</value>
+          <description>The flash policy server port.</description>
+        </variable>
+      </variablelist>
+    </category>
+
+    <!-- ======================================== -->
+    <category id="sfa_plc">
+      <name></name>
+      <description>The settings that tell this SFA instance how to interact with the underlying PLC. Refer to plc-config-tty on this installation for more information.</description>
+
+      <variablelist>
+       <variable id="user" type="string">
+         <name>PLC login name for an admin user; SFA will carry on operations under this account.</name>
+         <value>root@localhost.localdomain</value>
+         <description></description>
+       </variable>
+
+       <variable id="password" type="string">
+         <name>Password</name>
+         <value>root</value>
+         <description>The PLC password for SFA_PLC_USER.</description>
+       </variable>
+
+       <variable id="url" type="string">
+         <name>URL</name>
+         <value>https://localhost:443/PLCAPI/</value>
+         <description>Full URL of PLC interface.</description>
+       </variable>
+
+      </variablelist>
+    </category>
+
+    <!-- ======================================== -->
+    <category id="sfa_federica">
+      <name></name>
+      <description>The settings that tell this SFA instance how to interact with the FEDERICA testbed.</description>
+
+      <variablelist>
+       <variable id="url" type="string">
+         <name>XMLRPC URL</name>
+         <value>https://root:password@federica.sfa.wrapper.com:8443/fedewrapper/xmlrpc/</value>
+         <description>URL for the federica xmlrpc API; login and password need to be set like in http://login:password@hostname:port/the/path </description>
+       </variable>
+      </variablelist>
+    </category>
+
+    <!-- ======================================== -->
+    <category id="sfa_nova">
+      <name>SFA Flash Policy</name>
+      <description>The settings that affect how SFA connects to 
+                   the Nova/EC2 API</description>
+      <variablelist>
+        <variable id="user" type="string">
+          <name>Sfa nova user</name>
+          <value>novaadmin</value>
+          <description>Account/context to use when performing 
+                       administrative nova operations</description>
+        </variable>
+        <variable id="api_url" type="string">
+          <name>Nova API url</name>
+          <value>127.0.0.1</value>
+          <description>The Nova/EC2 API url </description>
+        </variable>
+        <variable id="api_port" type="int">
+          <name>Nova API Port</name>
+          <value>8773</value>
+          <description>The Nova/EC2 API port.</description>
+        </variable>
+      </variablelist>
+    </category>
+
+  </variables>
+
+  <comps>
+    <!-- deprecated - not used anymore - use .lst files instead -->
+  </comps>
+
+</configuration>
diff --git a/sfa/senslab/config/senslab/sfa_config b/sfa/senslab/config/senslab/sfa_config
new file mode 100644 (file)
index 0000000..2ded2d4
--- /dev/null
@@ -0,0 +1,151 @@
+# DO NOT EDIT. This file was automatically generated at
+# Mon Jun 25 15:01:21 2012 from:
+# 
+# /etc/sfa/sfa_config.xml
+
+# XMLRPC URL
+# URL for the federica xmlrpc API; login and password need to be set
+# like in http://login:password@hostname:port/the/path
+SFA_FEDERICA_URL='https://root:password@federica.sfa.wrapper.com:8443/fedewrapper/xmlrpc/'
+
+# Cache advertisement rspec
+# Enable caching of the global advertisement, as returned by
+# ListResources without a slice argument.
+SFA_AGGREGATE_CACHING=1
+
+# Hostname
+# The hostname where the aggregate is expected to be found.
+SFA_AGGREGATE_HOST='localhost'
+
+# Enable Aggregate
+# Allows this local SFA instance to run as an aggregate manager.
+SFA_AGGREGATE_ENABLED=1
+
+# Port number
+# The port where the aggregate is to be found.
+SFA_AGGREGATE_PORT=12346
+
+# Database name
+# SFA database name.
+SFA_DB_NAME='sfa'
+
+# Enabled
+# Enable the database server on this machine.
+SFA_DB_ENABLED=1
+
+# Database host
+# The host where the SFA database can be reached.
+SFA_DB_HOST='localhost'
+
+# Database user
+# When SFA gets co-hosted with a myplc, this should match the PLC
+# config.
+SFA_DB_USER='sfa'
+
+# Database password
+# When SFA gets co-hosted with a myplc, this should match the PLC
+# config.
+SFA_DB_PASSWORD='sfa'
+
+# Database port
+# The port where the SFA database can be reached.
+SFA_DB_PORT=5432
+
+# Flash policy config file
+# The path to where the flash policy config file can be reached.
+SFA_FLASHPOLICY_CONFIG_FILE='/etc/sfa/sfa_flashpolicy_config.xml'
+
+# Enable Flash Policy Server
+# Allows this local SFA instance to run a flash policy server.
+SFA_FLASHPOLICY_ENABLED=0
+
+# Flash policy port
+# The flash policy server port.
+SFA_FLASHPOLICY_PORT=843
+
+# Nova API Port
+# The Nova/EC2 API port.
+SFA_NOVA_API_PORT=8773
+
+# Sfa nova user
+# Account/context to use when performing administrative nova operations
+SFA_NOVA_USER='novaadmin'
+
+# Nova API url
+# The Nova/EC2 API url
+SFA_NOVA_API_URL='127.0.0.1'
+
+# URL
+# Full URL of PLC interface.
+SFA_PLC_URL='https://localhost:443/PLCAPI/'
+
+# Password
+# The PLC password for SFA_PLC_USER.
+SFA_PLC_PASSWORD='root'
+
+# PLC login name for an admin user; SFA will carry on operations under this account.
+SFA_PLC_USER='root@localhost.localdomain'
+
+# Root Authority
+# The hrn of the registry's root auth.
+SFA_REGISTRY_ROOT_AUTH='senslab'
+
+# Hostname
+# The hostname where the registry is expected to be found; using
+# localhost when the local registry is enabled seems reasonable.
+SFA_REGISTRY_HOST='localhost'
+
+# Enable Registry
+# Allows this local SFA instance to run as a registry.
+SFA_REGISTRY_ENABLED=1
+
+# Port number
+# The port where the registry is to be found.
+SFA_REGISTRY_PORT=12345
+
+# Cache advertisement rspec
+# Enable caching of the global advertisement, as returned by
+# ListResources without a slice argument.
+SFA_SM_CACHING=0
+
+# Hostname
+# The hostname where the slice manager is expected to be found.
+SFA_SM_HOST='localhost'
+
+# Enable Slice Manager
+# Allows this local SFA instance to run as a slice manager.
+SFA_SM_ENABLED=1
+
+# Port number
+# The port where the slice manager is to be found.
+SFA_SM_PORT=12347
+
+# Human readable name
+# The human readable name for this interface.
+SFA_INTERFACE_HRN='senslab'
+
+# Generic Flavour
+# This string refers to a class located in sfa.generic that describes
+# which specific implementation needs to be used for api, manager and
+# driver objects. PlanetLab users do not need to change this setting.
+SFA_GENERIC_FLAVOUR='slab'
+
+# Credential Schema
+# The path to the default credential schema
+SFA_CREDENTIAL_SCHEMA='/etc/sfa/credential.xsd'
+
+# Debug
+# Logging level; 0=minimum, 1=info, 2=debug
+SFA_API_LOGLEVEL=0
+
+# User Session Keys Path 
+# Some services will peform operations on behalf of a user, but make it
+# look like the user is the one performing the operation. Doing this
+# requires a valid key pair and credential for the user. This option
+# defines the path where key pairs and credentials are generated and
+# stored. This functionality is used by the SFA web GUI.
+SFA_SESSION_KEY_PATH='/var/lib/sfa/session_keys'
+
+# Max Slice Renew
+# Maximum amout of days a user can extend/renew their slices to
+SFA_MAX_SLICE_RENEW=60
diff --git a/sfa/senslab/config/senslab/sfa_config.xml b/sfa/senslab/config/senslab/sfa_config.xml
new file mode 100644 (file)
index 0000000..3283e12
--- /dev/null
@@ -0,0 +1,253 @@
+<?xml version="1.0" encoding="utf-8"?>
+<configuration>
+  <variables>
+    <category id="sfa">
+      <name>General</name>
+      <description>Basic system variables.</description>
+      <variablelist>
+        <variable id="generic_flavour" type="string">
+          <name>Generic Flavour</name>
+          <value>slab</value>
+          <description>This string refers to a class located in sfa.generic that describes 
+          which specific implementation needs to be used for api, manager and driver objects.
+          PlanetLab users do not need to change this setting.
+          </description>
+        </variable>
+        <variable id="interface_hrn" type="string">
+          <name>Human readable name</name>
+          <value>senslab</value>
+          <description>The human readable name for this interface.</description>
+        </variable>
+        <variable id="credential_schema" type="string">
+          <name>Credential Schema</name>
+          <value>/etc/sfa/credential.xsd</value>
+          <description>The path to the default credential schema</description>
+        </variable>
+        <variable id="api_loglevel" type="int">
+          <name>Debug</name>
+          <value>0</value>
+          <description>Logging level; 0=minimum, 1=info, 2=debug</description>
+        </variable>
+        <variable id="max_slice_renew" type="int">
+          <name>Max Slice Renew</name>
+          <value>60</value>
+          <description>Maximum amout of days a user can extend/renew their slices to</description>
+        </variable>
+        <variable id="session_key_path" type="string">
+          <name>User Session Keys Path </name>
+          <value>/var/lib/sfa/session_keys</value>
+          <description>Some services will peform operations on behalf of a user, but make
+            it look like the user is the one performing the operation. Doing this requires a 
+            valid key pair and credential for the user. This option defines the path where 
+            key pairs and credentials are generated and stored.
+            This functionality is used by the SFA web GUI. 
+            </description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_registry">
+      <name>Registry</name>
+      <description>The settings that affect the registry that will run
+      as part of this SFA instance.</description>
+      <variablelist>
+        <variable id="enabled" type="boolean">
+          <name>Enable Registry</name>
+          <value>true</value>
+          <description>Allows this local SFA instance to run as a
+         registry.</description>
+        </variable>
+        <variable id="host" type="hostname">
+          <name>Hostname</name>
+          <value>localhost</value>
+          <description>The hostname where the registry is expected to
+         be found; using localhost when the local registry is enabled
+         seems reasonable.</description>
+        </variable>
+        <variable id="port" type="int">
+          <name>Port number</name>
+          <value>12345</value>
+          <description>The port where the registry is to be found.</description>
+        </variable>
+        <variable id="root_auth" type="string">
+          <name>Root Authority</name>
+          <value>senslab</value>
+          <description>The hrn of the registry's root auth.</description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_sm">
+      <name>Slice Manager</name>
+      <description>The settings that affect the slice manager that will run
+      as part of this SFA instance.</description>
+      <variablelist>
+        <variable id="enabled" type="boolean">
+          <name>Enable Slice Manager</name>
+          <value>true</value>
+          <description>Allows this local SFA instance to run as a
+         slice manager.</description>
+        </variable>
+        <variable id="host" type="hostname">
+          <name>Hostname</name>
+          <value>localhost</value>
+          <description>The hostname where the slice manager is expected to
+         be found.</description>
+        </variable>
+        <variable id="port" type="int">
+          <name>Port number</name>
+          <value>12347</value>
+          <description>The port where the slice manager is to be found.</description>
+        </variable>
+        <variable id="caching" type="boolean">
+          <name>Cache advertisement rspec</name>
+          <value>false</value>
+          <description>Enable caching of the global advertisement, as
+         returned by ListResources without a slice argument. </description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_aggregate">
+      <name>Aggregate</name>
+      <description>The settings that affect the aggregate manager that will run
+      as part of this SFA instance.</description>
+      <variablelist>
+        <variable id="enabled" type="boolean">
+          <name>Enable Aggregate</name>
+          <value>true</value>
+          <description>Allows this local SFA instance to run as an
+         aggregate manager.</description>
+        </variable>
+        <variable id="host" type="hostname">
+          <name>Hostname</name>
+          <value>localhost</value>
+          <description>The hostname where the aggregate is expected to
+         be found.</description>
+        </variable>
+        <variable id="port" type="int">
+          <name>Port number</name>
+          <value>12346</value>
+          <description>The port where the aggregate is to be found.</description>
+        </variable>
+        <variable id="caching" type="boolean">
+          <name>Cache advertisement rspec</name>
+          <value>true</value>
+          <description>Enable caching of the global advertisement, as
+         returned by ListResources without a slice argument. </description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_db">
+      <name/>
+      <description>The settings that tell this SFA instance where to find its database. You can essentially leave this as-is unless you plan on hosting your data on some other box.</description>
+      <variablelist>
+        <variable id="enabled" type="boolean">
+          <name>Enabled</name>
+          <value>true</value>
+          <description>Enable the database server on this machine.</description>
+        </variable>
+        <variable id="host" type="hostname">
+          <name>Database host</name>
+          <value>localhost</value>
+          <description>The host where the SFA database can be reached.</description>
+        </variable>
+        <variable id="port" type="int">
+          <name>Database port</name>
+          <value>5432</value>
+          <description>The port where the SFA database can be reached.</description>
+        </variable>
+        <variable id="user" type="string">
+          <name>Database user</name>
+          <value>sfa</value>
+          <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+        </variable>
+        <variable id="password" type="string">
+          <name>Database password</name>
+          <value>sfa</value>
+          <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+        </variable>
+        <variable id="name" type="string">
+          <name>Database name</name>
+          <value>sfa</value>
+          <description>SFA database name.</description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_flashpolicy">
+      <name>SFA Flash Policy</name>
+      <description>The settings that affect the flash policy server that will run
+      as part of this SFA instance.</description>
+      <variablelist>
+        <variable id="enabled" type="boolean">
+          <name>Enable Flash Policy Server</name>
+          <value>false</value>
+          <description>Allows this local SFA instance to run a
+          flash policy server.</description>
+        </variable>
+        <variable id="config_file" type="string">
+          <name>Flash policy config file</name>
+          <value>/etc/sfa/sfa_flashpolicy_config.xml</value>
+          <description>The path to where the flash policy config file can be reached.</description>
+        </variable>
+        <variable id="port" type="int">
+          <name>Flash policy port</name>
+          <value>843</value>
+          <description>The flash policy server port.</description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_plc">
+      <name/>
+      <description>The settings that tell this SFA instance how to interact with the underlying PLC. Refer to plc-config-tty on this installation for more information.</description>
+      <variablelist>
+        <variable id="user" type="string">
+          <name>PLC login name for an admin user; SFA will carry on operations under this account.</name>
+          <value>root@localhost.localdomain</value>
+          <description/>
+        </variable>
+        <variable id="password" type="string">
+          <name>Password</name>
+          <value>root</value>
+          <description>The PLC password for SFA_PLC_USER.</description>
+        </variable>
+        <variable id="url" type="string">
+          <name>URL</name>
+          <value>https://localhost:443/PLCAPI/</value>
+          <description>Full URL of PLC interface.</description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_federica">
+      <name/>
+      <description>The settings that tell this SFA instance how to interact with the FEDERICA testbed.</description>
+      <variablelist>
+        <variable id="url" type="string">
+          <name>XMLRPC URL</name>
+          <value>https://root:password@federica.sfa.wrapper.com:8443/fedewrapper/xmlrpc/</value>
+          <description>URL for the federica xmlrpc API; login and password need to be set like in http://login:password@hostname:port/the/path </description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_nova">
+      <name>SFA Flash Policy</name>
+      <description>The settings that affect how SFA connects to 
+                   the Nova/EC2 API</description>
+      <variablelist>
+        <variable id="user" type="string">
+          <name>Sfa nova user</name>
+          <value>novaadmin</value>
+          <description>Account/context to use when performing 
+                       administrative nova operations</description>
+        </variable>
+        <variable id="api_url" type="string">
+          <name>Nova API url</name>
+          <value>127.0.0.1</value>
+          <description>The Nova/EC2 API url </description>
+        </variable>
+        <variable id="api_port" type="int">
+          <name>Nova API Port</name>
+          <value>8773</value>
+          <description>The Nova/EC2 API port.</description>
+        </variable>
+      </variablelist>
+    </category>
+  </variables>
+</configuration>
diff --git a/sfa/senslab/config/senslab/site.xml b/sfa/senslab/config/senslab/site.xml
new file mode 100644 (file)
index 0000000..7f40ea6
--- /dev/null
@@ -0,0 +1,86 @@
+<?xml version="1.0" encoding="utf-8"?>
+<configuration>
+  <variables>
+    <category id="sfa_aggregate">
+      <name>Aggregate</name>
+      <description>The settings that affect the aggregate manager that will run
+      as part of this SFA instance.</description>
+      <variablelist>
+        <variable id="port" type="int">
+          <name>Port number</name>
+          <value>12346</value>
+          <description>The port where the aggregate is to be found.</description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_db">
+      <name/>
+      <description>The settings that tell this SFA instance where to find its database. You can essentially leave this as-is unless you plan on hosting your data on some other box.</description>
+      <variablelist>
+        <variable id="user" type="string">
+          <name>Database user</name>
+          <value>sfa</value>
+          <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+        </variable>
+        <variable id="password" type="string">
+          <name>Database password</name>
+          <value>sfa</value>
+          <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_registry">
+      <name>Registry</name>
+      <description>The settings that affect the registry that will run
+      as part of this SFA instance.</description>
+      <variablelist>
+        <variable id="root_auth" type="string">
+          <name>Root Authority</name>
+          <value>senslab</value>
+          <description>The hrn of the registry's root auth.</description>
+        </variable>
+        <variable id="port" type="int">
+          <name>Port number</name>
+          <value>12345</value>
+          <description>The port where the registry is to be found.</description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_sm">
+      <name>Slice Manager</name>
+      <description>The settings that affect the slice manager that will run
+      as part of this SFA instance.</description>
+      <variablelist>
+        <variable id="port" type="int">
+          <name>Port number</name>
+          <value>12347</value>
+          <description>The port where the slice manager is to be found.</description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa">
+      <name>General</name>
+      <description>Basic system variables.</description>
+      <variablelist>
+        <variable id="interface_hrn" type="string">
+          <name>Human readable name</name>
+          <value>senslab</value>
+          <description>The human readable name for this interface.</description>
+        </variable>
+        <variable id="generic_flavour" type="string">
+          <name>Generic Flavour</name>
+          <value>slab</value>
+          <description>This string refers to a class located in sfa.generic that describes 
+          which specific implementation needs to be used for api, manager and driver objects.
+          PlanetLab users do not need to change this setting.
+          </description>
+        </variable>
+        <variable id="api_loglevel" type="int">
+          <name>Debug</name>
+          <value>0</value>
+          <description>Logging level; 0=minimum, 1=info, 2=debug</description>
+        </variable>
+      </variablelist>
+    </category>
+  </variables>
+</configuration>
diff --git a/sfa/senslab/config/senslab2/default_config.xml b/sfa/senslab/config/senslab2/default_config.xml
new file mode 100644 (file)
index 0000000..b08f738
--- /dev/null
@@ -0,0 +1,324 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+Default SFA configuration file
+
+Thierry Parmentelat 
+
+-->
+
+<!DOCTYPE configuration PUBLIC "-//PlanetLab Central//DTD PLC configuration//EN" "plc_config.dtd">
+
+<configuration>
+  <variables>
+
+    <!-- ======================================== -->
+    <category id="sfa">
+      <name>General</name>
+      <description>Basic system variables.</description>
+
+      <variablelist>
+        <variable id="generic_flavour" type="string">
+          <name>Generic Flavour</name>
+          <value>pl</value>
+          <description>This string refers to a class located in sfa.generic that describes 
+          which specific implementation needs to be used for api, manager and driver objects.
+          PlanetLab users do not need to change this setting.
+          </description>
+        </variable>
+
+        <variable id="interface_hrn" type="string">
+          <name>Human readable name</name>
+          <value>plc</value>
+          <description>The human readable name for this interface.</description>
+        </variable>
+
+        <variable id="credential_schema" type="string">
+          <name>Credential Schema</name>
+          <value>/etc/sfa/credential.xsd</value>
+          <description>The path to the default credential schema</description>
+        </variable>
+
+        <variable id="api_loglevel" type="int">
+          <name>Debug</name>
+          <value>0</value>
+          <description>Logging level; 0=minimum, 1=info, 2=debug</description>
+        </variable>
+    
+        <variable id="max_slice_renew" type="int">
+          <name>Max Slice Renew</name>
+          <value>60</value>
+          <description>Maximum amout of days a user can extend/renew their slices to</description>
+        </variable>
+
+        <variable id="session_key_path" type="string">
+            <name>User Session Keys Path </name>
+            <value>/var/lib/sfa/session_keys</value>
+            <description>Some services will peform operations on behalf of a user, but make
+            it look like the user is the one performing the operation. Doing this requires a 
+            valid key pair and credential for the user. This option defines the path where 
+            key pairs and credentials are generated and stored.
+            This functionality is used by the SFA web GUI. 
+            </description> 
+        </variable>
+
+      </variablelist>
+    </category>
+
+    <!-- ======================================== -->
+    <category id="sfa_registry">
+      <name>Registry</name>
+      <description>The settings that affect the registry that will run
+      as part of this SFA instance.</description>
+
+      <variablelist>
+       <variable id="enabled" type="boolean">
+         <name>Enable Registry</name>
+         <value>true</value>
+         <description>Allows this local SFA instance to run as a
+         registry.</description>
+       </variable>
+
+       <variable id="host" type="hostname">
+         <name>Hostname</name>
+         <value>localhost</value>
+         <description>The hostname where the registry is expected to
+         be found; using localhost when the local registry is enabled
+         seems reasonable.</description>
+       </variable>
+
+       <variable id="port" type="int">
+         <name>Port number</name>
+         <value>12345</value>
+         <description>The port where the registry is to be found.</description>
+       </variable>
+
+       <variable id="root_auth" type="string">
+         <name>Root Authority</name>
+         <value>plc</value>
+         <description>The hrn of the registry's root auth.</description>
+       </variable>
+
+    </variablelist>
+    </category>
+
+    <!-- ======================================== -->
+    <category id="sfa_sm">
+      <name>Slice Manager</name>
+      <description>The settings that affect the slice manager that will run
+      as part of this SFA instance.</description>
+
+      <variablelist>
+       <variable id="enabled" type="boolean">
+         <name>Enable Slice Manager</name>
+         <value>true</value>
+         <description>Allows this local SFA instance to run as a
+         slice manager.</description>
+       </variable>
+
+       <variable id="host" type="hostname">
+         <name>Hostname</name>
+         <value>localhost</value>
+         <description>The hostname where the slice manager is expected to
+         be found.</description>
+       </variable>
+
+       <variable id="port" type="int">
+         <name>Port number</name>
+         <value>12347</value>
+         <description>The port where the slice manager is to be found.</description>
+       </variable>
+
+       <variable id="caching" type="boolean">
+         <name>Cache advertisement rspec</name>
+         <value>false</value>
+         <description>Enable caching of the global advertisement, as
+         returned by ListResources without a slice argument. </description>
+         </variable>
+
+      </variablelist>
+    </category>
+
+    <!-- ======================================== -->
+    <category id="sfa_aggregate">
+      <name>Aggregate</name>
+      <description>The settings that affect the aggregate manager that will run
+      as part of this SFA instance.</description>
+
+      <variablelist>
+       <variable id="enabled" type="boolean">
+         <name>Enable Aggregate</name>
+         <value>true</value>
+         <description>Allows this local SFA instance to run as an
+         aggregate manager.</description>
+       </variable>
+
+       <variable id="host" type="hostname">
+         <name>Hostname</name>
+         <value>localhost</value>
+         <description>The hostname where the aggregate is expected to
+         be found.</description>
+       </variable>
+
+       <variable id="port" type="int">
+         <name>Port number</name>
+         <value>12346</value>
+         <description>The port where the aggregate is to be found.</description>
+       </variable>
+
+       <variable id="caching" type="boolean">
+         <name>Cache advertisement rspec</name>
+         <value>true</value>
+         <description>Enable caching of the global advertisement, as
+         returned by ListResources without a slice argument. </description>
+         </variable>
+
+      </variablelist>
+
+    </category>
+
+    <!-- ======================================== -->
+    <category id="sfa_db">
+      <name></name>
+      <description>The settings that tell this SFA instance where to find its database. You can essentially leave this as-is unless you plan on hosting your data on some other box.</description>
+
+      <variablelist>
+       <variable id="enabled" type="boolean">
+         <name>Enabled</name>
+         <value>true</value>
+         <description>Enable the database server on this machine.</description>
+       </variable>
+
+       <variable id="host" type="hostname">
+         <name>Database host</name>
+         <value>localhost</value>
+         <description>The host where the SFA database can be reached.</description>
+       </variable>
+
+       <variable id="port" type="int">
+         <name>Database port</name>
+         <value>5432</value>
+         <description>The port where the SFA database can be reached.</description>
+       </variable>
+
+       <variable id="user" type="string">
+         <name>Database user</name>
+         <value>sfadbuser</value>
+         <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+       </variable>
+
+       <variable id="password" type="string">
+         <name>Database password</name>
+         <value></value>
+         <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+       </variable>
+
+       <variable id="name" type="string">
+         <name>Database name</name>
+         <value>sfa</value>
+         <description>SFA database name.</description>
+       </variable>
+
+
+      </variablelist>
+    </category>
+
+    <!-- ======================================== -->
+    <category id="sfa_flashpolicy">
+      <name>SFA Flash Policy</name>
+      <description>The settings that affect the flash policy server that will run
+      as part of this SFA instance.</description>
+
+      <variablelist>
+        <variable id="enabled" type="boolean">
+          <name>Enable Flash Policy Server</name>
+          <value>false</value>
+          <description>Allows this local SFA instance to run a
+          flash policy server.</description>
+        </variable>
+        <variable id="config_file" type="string">
+          <name>Flash policy config file</name>
+          <value>/etc/sfa/sfa_flashpolicy_config.xml</value>
+          <description>The path to where the flash policy config file can be reached.</description>
+        </variable>
+        <variable id="port" type="int">
+          <name>Flash policy port</name>
+          <value>843</value>
+          <description>The flash policy server port.</description>
+        </variable>
+      </variablelist>
+    </category>
+
+    <!-- ======================================== -->
+    <category id="sfa_plc">
+      <name></name>
+      <description>The settings that tell this SFA instance how to interact with the underlying PLC. Refer to plc-config-tty on this installation for more information.</description>
+
+      <variablelist>
+       <variable id="user" type="string">
+         <name>PLC login name for an admin user; SFA will carry on operations under this account.</name>
+         <value>root@localhost.localdomain</value>
+         <description></description>
+       </variable>
+
+       <variable id="password" type="string">
+         <name>Password</name>
+         <value>root</value>
+         <description>The PLC password for SFA_PLC_USER.</description>
+       </variable>
+
+       <variable id="url" type="string">
+         <name>URL</name>
+         <value>https://localhost:443/PLCAPI/</value>
+         <description>Full URL of PLC interface.</description>
+       </variable>
+
+      </variablelist>
+    </category>
+
+    <!-- ======================================== -->
+    <category id="sfa_federica">
+      <name></name>
+      <description>The settings that tell this SFA instance how to interact with the FEDERICA testbed.</description>
+
+      <variablelist>
+       <variable id="url" type="string">
+         <name>XMLRPC URL</name>
+         <value>https://root:password@federica.sfa.wrapper.com:8443/fedewrapper/xmlrpc/</value>
+         <description>URL for the federica xmlrpc API; login and password need to be set like in http://login:password@hostname:port/the/path </description>
+       </variable>
+      </variablelist>
+    </category>
+
+    <!-- ======================================== -->
+    <category id="sfa_nova">
+      <name>SFA Flash Policy</name>
+      <description>The settings that affect how SFA connects to 
+                   the Nova/EC2 API</description>
+      <variablelist>
+        <variable id="user" type="string">
+          <name>Sfa nova user</name>
+          <value>novaadmin</value>
+          <description>Account/context to use when performing 
+                       administrative nova operations</description>
+        </variable>
+        <variable id="api_url" type="string">
+          <name>Nova API url</name>
+          <value>127.0.0.1</value>
+          <description>The Nova/EC2 API url </description>
+        </variable>
+        <variable id="api_port" type="int">
+          <name>Nova API Port</name>
+          <value>8773</value>
+          <description>The Nova/EC2 API port.</description>
+        </variable>
+      </variablelist>
+    </category>
+
+  </variables>
+
+  <comps>
+    <!-- deprecated - not used anymore - use .lst files instead -->
+  </comps>
+
+</configuration>
diff --git a/sfa/senslab/config/senslab2/sfa_config b/sfa/senslab/config/senslab2/sfa_config
new file mode 100644 (file)
index 0000000..91ba5a8
--- /dev/null
@@ -0,0 +1,151 @@
+# DO NOT EDIT. This file was automatically generated at
+# Mon Jun 25 15:01:21 2012 from:
+# 
+# /etc/sfa/sfa_config.xml
+
+# XMLRPC URL
+# URL for the federica xmlrpc API; login and password need to be set
+# like in http://login:password@hostname:port/the/path
+SFA_FEDERICA_URL='https://root:password@federica.sfa.wrapper.com:8443/fedewrapper/xmlrpc/'
+
+# Cache advertisement rspec
+# Enable caching of the global advertisement, as returned by
+# ListResources without a slice argument.
+SFA_AGGREGATE_CACHING=1
+
+# Hostname
+# The hostname where the aggregate is expected to be found.
+SFA_AGGREGATE_HOST='localhost'
+
+# Enable Aggregate
+# Allows this local SFA instance to run as an aggregate manager.
+SFA_AGGREGATE_ENABLED=1
+
+# Port number
+# The port where the aggregate is to be found.
+SFA_AGGREGATE_PORT=52346
+
+# Database name
+# SFA database name.
+SFA_DB_NAME='sfa'
+
+# Enabled
+# Enable the database server on this machine.
+SFA_DB_ENABLED=1
+
+# Database host
+# The host where the SFA database can be reached.
+SFA_DB_HOST='localhost'
+
+# Database user
+# When SFA gets co-hosted with a myplc, this should match the PLC
+# config.
+SFA_DB_USER='sfa'
+
+# Database password
+# When SFA gets co-hosted with a myplc, this should match the PLC
+# config.
+SFA_DB_PASSWORD='sfa'
+
+# Database port
+# The port where the SFA database can be reached.
+SFA_DB_PORT=5432
+
+# Flash policy config file
+# The path to where the flash policy config file can be reached.
+SFA_FLASHPOLICY_CONFIG_FILE='/etc/sfa/sfa_flashpolicy_config.xml'
+
+# Enable Flash Policy Server
+# Allows this local SFA instance to run a flash policy server.
+SFA_FLASHPOLICY_ENABLED=0
+
+# Flash policy port
+# The flash policy server port.
+SFA_FLASHPOLICY_PORT=843
+
+# Nova API Port
+# The Nova/EC2 API port.
+SFA_NOVA_API_PORT=8773
+
+# Sfa nova user
+# Account/context to use when performing administrative nova operations
+SFA_NOVA_USER='novaadmin'
+
+# Nova API url
+# The Nova/EC2 API url
+SFA_NOVA_API_URL='127.0.0.1'
+
+# URL
+# Full URL of PLC interface.
+SFA_PLC_URL='https://localhost:443/PLCAPI/'
+
+# Password
+# The PLC password for SFA_PLC_USER.
+SFA_PLC_PASSWORD='root'
+
+# PLC login name for an admin user; SFA will carry on operations under this account.
+SFA_PLC_USER='root@localhost.localdomain'
+
+# Root Authority
+# The hrn of the registry's root auth.
+SFA_REGISTRY_ROOT_AUTH='senslab2'
+
+# Hostname
+# The hostname where the registry is expected to be found; using
+# localhost when the local registry is enabled seems reasonable.
+SFA_REGISTRY_HOST='localhost'
+
+# Enable Registry
+# Allows this local SFA instance to run as a registry.
+SFA_REGISTRY_ENABLED=1
+
+# Port number
+# The port where the registry is to be found.
+SFA_REGISTRY_PORT=52345
+
+# Cache advertisement rspec
+# Enable caching of the global advertisement, as returned by
+# ListResources without a slice argument.
+SFA_SM_CACHING=0
+
+# Hostname
+# The hostname where the slice manager is expected to be found.
+SFA_SM_HOST='localhost'
+
+# Enable Slice Manager
+# Allows this local SFA instance to run as a slice manager.
+SFA_SM_ENABLED=1
+
+# Port number
+# The port where the slice manager is to be found.
+SFA_SM_PORT=52347
+
+# Human readable name
+# The human readable name for this interface.
+SFA_INTERFACE_HRN='senslab2'
+
+# Generic Flavour
+# This string refers to a class located in sfa.generic that describes
+# which specific implementation needs to be used for api, manager and
+# driver objects. PlanetLab users do not need to change this setting.
+SFA_GENERIC_FLAVOUR='slab'
+
+# Credential Schema
+# The path to the default credential schema
+SFA_CREDENTIAL_SCHEMA='/etc/sfa/credential.xsd'
+
+# Debug
+# Logging level; 0=minimum, 1=info, 2=debug
+SFA_API_LOGLEVEL=2
+
+# User Session Keys Path 
+# Some services will peform operations on behalf of a user, but make it
+# look like the user is the one performing the operation. Doing this
+# requires a valid key pair and credential for the user. This option
+# defines the path where key pairs and credentials are generated and
+# stored. This functionality is used by the SFA web GUI.
+SFA_SESSION_KEY_PATH='/var/lib/sfa/session_keys'
+
+# Max Slice Renew
+# Maximum amout of days a user can extend/renew their slices to
+SFA_MAX_SLICE_RENEW=60
diff --git a/sfa/senslab/config/senslab2/sfa_config.xml b/sfa/senslab/config/senslab2/sfa_config.xml
new file mode 100644 (file)
index 0000000..a8e9050
--- /dev/null
@@ -0,0 +1,253 @@
+<?xml version="1.0" encoding="utf-8"?>
+<configuration>
+  <variables>
+    <category id="sfa">
+      <name>General</name>
+      <description>Basic system variables.</description>
+      <variablelist>
+        <variable id="generic_flavour" type="string">
+          <name>Generic Flavour</name>
+          <value>slab</value>
+          <description>This string refers to a class located in sfa.generic that describes 
+          which specific implementation needs to be used for api, manager and driver objects.
+          PlanetLab users do not need to change this setting.
+          </description>
+        </variable>
+        <variable id="interface_hrn" type="string">
+          <name>Human readable name</name>
+          <value>senslab2</value>
+          <description>The human readable name for this interface.</description>
+        </variable>
+        <variable id="credential_schema" type="string">
+          <name>Credential Schema</name>
+          <value>/etc/sfa/credential.xsd</value>
+          <description>The path to the default credential schema</description>
+        </variable>
+        <variable id="api_loglevel" type="int">
+          <name>Debug</name>
+          <value>2</value>
+          <description>Logging level; 0=minimum, 1=info, 2=debug</description>
+        </variable>
+        <variable id="max_slice_renew" type="int">
+          <name>Max Slice Renew</name>
+          <value>60</value>
+          <description>Maximum amout of days a user can extend/renew their slices to</description>
+        </variable>
+        <variable id="session_key_path" type="string">
+          <name>User Session Keys Path </name>
+          <value>/var/lib/sfa/session_keys</value>
+          <description>Some services will peform operations on behalf of a user, but make
+            it look like the user is the one performing the operation. Doing this requires a 
+            valid key pair and credential for the user. This option defines the path where 
+            key pairs and credentials are generated and stored.
+            This functionality is used by the SFA web GUI. 
+            </description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_registry">
+      <name>Registry</name>
+      <description>The settings that affect the registry that will run
+      as part of this SFA instance.</description>
+      <variablelist>
+        <variable id="enabled" type="boolean">
+          <name>Enable Registry</name>
+          <value>true</value>
+          <description>Allows this local SFA instance to run as a
+         registry.</description>
+        </variable>
+        <variable id="host" type="hostname">
+          <name>Hostname</name>
+          <value>localhost</value>
+          <description>The hostname where the registry is expected to
+         be found; using localhost when the local registry is enabled
+         seems reasonable.</description>
+        </variable>
+        <variable id="port" type="int">
+          <name>Port number</name>
+          <value>52345</value>
+          <description>The port where the registry is to be found.</description>
+        </variable>
+        <variable id="root_auth" type="string">
+          <name>Root Authority</name>
+          <value>senslab2</value>
+          <description>The hrn of the registry's root auth.</description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_sm">
+      <name>Slice Manager</name>
+      <description>The settings that affect the slice manager that will run
+      as part of this SFA instance.</description>
+      <variablelist>
+        <variable id="enabled" type="boolean">
+          <name>Enable Slice Manager</name>
+          <value>true</value>
+          <description>Allows this local SFA instance to run as a
+         slice manager.</description>
+        </variable>
+        <variable id="host" type="hostname">
+          <name>Hostname</name>
+          <value>localhost</value>
+          <description>The hostname where the slice manager is expected to
+         be found.</description>
+        </variable>
+        <variable id="port" type="int">
+          <name>Port number</name>
+          <value>52347</value>
+          <description>The port where the slice manager is to be found.</description>
+        </variable>
+        <variable id="caching" type="boolean">
+          <name>Cache advertisement rspec</name>
+          <value>false</value>
+          <description>Enable caching of the global advertisement, as
+         returned by ListResources without a slice argument. </description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_aggregate">
+      <name>Aggregate</name>
+      <description>The settings that affect the aggregate manager that will run
+      as part of this SFA instance.</description>
+      <variablelist>
+        <variable id="enabled" type="boolean">
+          <name>Enable Aggregate</name>
+          <value>true</value>
+          <description>Allows this local SFA instance to run as an
+         aggregate manager.</description>
+        </variable>
+        <variable id="host" type="hostname">
+          <name>Hostname</name>
+          <value>localhost</value>
+          <description>The hostname where the aggregate is expected to
+         be found.</description>
+        </variable>
+        <variable id="port" type="int">
+          <name>Port number</name>
+          <value>52346</value>
+          <description>The port where the aggregate is to be found.</description>
+        </variable>
+        <variable id="caching" type="boolean">
+          <name>Cache advertisement rspec</name>
+          <value>true</value>
+          <description>Enable caching of the global advertisement, as
+         returned by ListResources without a slice argument. </description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_db">
+      <name/>
+      <description>The settings that tell this SFA instance where to find its database. You can essentially leave this as-is unless you plan on hosting your data on some other box.</description>
+      <variablelist>
+        <variable id="enabled" type="boolean">
+          <name>Enabled</name>
+          <value>true</value>
+          <description>Enable the database server on this machine.</description>
+        </variable>
+        <variable id="host" type="hostname">
+          <name>Database host</name>
+          <value>localhost</value>
+          <description>The host where the SFA database can be reached.</description>
+        </variable>
+        <variable id="port" type="int">
+          <name>Database port</name>
+          <value>5432</value>
+          <description>The port where the SFA database can be reached.</description>
+        </variable>
+        <variable id="user" type="string">
+          <name>Database user</name>
+          <value>sfa</value>
+          <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+        </variable>
+        <variable id="password" type="string">
+          <name>Database password</name>
+          <value>sfa</value>
+          <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+        </variable>
+        <variable id="name" type="string">
+          <name>Database name</name>
+          <value>sfa</value>
+          <description>SFA database name.</description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_flashpolicy">
+      <name>SFA Flash Policy</name>
+      <description>The settings that affect the flash policy server that will run
+      as part of this SFA instance.</description>
+      <variablelist>
+        <variable id="enabled" type="boolean">
+          <name>Enable Flash Policy Server</name>
+          <value>false</value>
+          <description>Allows this local SFA instance to run a
+          flash policy server.</description>
+        </variable>
+        <variable id="config_file" type="string">
+          <name>Flash policy config file</name>
+          <value>/etc/sfa/sfa_flashpolicy_config.xml</value>
+          <description>The path to where the flash policy config file can be reached.</description>
+        </variable>
+        <variable id="port" type="int">
+          <name>Flash policy port</name>
+          <value>843</value>
+          <description>The flash policy server port.</description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_plc">
+      <name/>
+      <description>The settings that tell this SFA instance how to interact with the underlying PLC. Refer to plc-config-tty on this installation for more information.</description>
+      <variablelist>
+        <variable id="user" type="string">
+          <name>PLC login name for an admin user; SFA will carry on operations under this account.</name>
+          <value>root@localhost.localdomain</value>
+          <description/>
+        </variable>
+        <variable id="password" type="string">
+          <name>Password</name>
+          <value>root</value>
+          <description>The PLC password for SFA_PLC_USER.</description>
+        </variable>
+        <variable id="url" type="string">
+          <name>URL</name>
+          <value>https://localhost:443/PLCAPI/</value>
+          <description>Full URL of PLC interface.</description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_federica">
+      <name/>
+      <description>The settings that tell this SFA instance how to interact with the FEDERICA testbed.</description>
+      <variablelist>
+        <variable id="url" type="string">
+          <name>XMLRPC URL</name>
+          <value>https://root:password@federica.sfa.wrapper.com:8443/fedewrapper/xmlrpc/</value>
+          <description>URL for the federica xmlrpc API; login and password need to be set like in http://login:password@hostname:port/the/path </description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_nova">
+      <name>SFA Flash Policy</name>
+      <description>The settings that affect how SFA connects to 
+                   the Nova/EC2 API</description>
+      <variablelist>
+        <variable id="user" type="string">
+          <name>Sfa nova user</name>
+          <value>novaadmin</value>
+          <description>Account/context to use when performing 
+                       administrative nova operations</description>
+        </variable>
+        <variable id="api_url" type="string">
+          <name>Nova API url</name>
+          <value>127.0.0.1</value>
+          <description>The Nova/EC2 API url </description>
+        </variable>
+        <variable id="api_port" type="int">
+          <name>Nova API Port</name>
+          <value>8773</value>
+          <description>The Nova/EC2 API port.</description>
+        </variable>
+      </variablelist>
+    </category>
+  </variables>
+</configuration>
diff --git a/sfa/senslab/config/senslab2/site.xml b/sfa/senslab/config/senslab2/site.xml
new file mode 100644 (file)
index 0000000..04421fb
--- /dev/null
@@ -0,0 +1,86 @@
+<?xml version="1.0" encoding="utf-8"?>
+<configuration>
+  <variables>
+    <category id="sfa_aggregate">
+      <name>Aggregate</name>
+      <description>The settings that affect the aggregate manager that will run
+      as part of this SFA instance.</description>
+      <variablelist>
+        <variable id="port" type="int">
+          <name>Port number</name>
+          <value>52346</value>
+          <description>The port where the aggregate is to be found.</description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_db">
+      <name/>
+      <description>The settings that tell this SFA instance where to find its database. You can essentially leave this as-is unless you plan on hosting your data on some other box.</description>
+      <variablelist>
+        <variable id="user" type="string">
+          <name>Database user</name>
+          <value>sfa</value>
+          <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+        </variable>
+        <variable id="password" type="string">
+          <name>Database password</name>
+          <value>sfa</value>
+          <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_registry">
+      <name>Registry</name>
+      <description>The settings that affect the registry that will run
+      as part of this SFA instance.</description>
+      <variablelist>
+        <variable id="root_auth" type="string">
+          <name>Root Authority</name>
+          <value>senslab2</value>
+          <description>The hrn of the registry's root auth.</description>
+        </variable>
+        <variable id="port" type="int">
+          <name>Port number</name>
+          <value>52345</value>
+          <description>The port where the registry is to be found.</description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa_sm">
+      <name>Slice Manager</name>
+      <description>The settings that affect the slice manager that will run
+      as part of this SFA instance.</description>
+      <variablelist>
+        <variable id="port" type="int">
+          <name>Port number</name>
+          <value>52347</value>
+          <description>The port where the slice manager is to be found.</description>
+        </variable>
+      </variablelist>
+    </category>
+    <category id="sfa">
+      <name>General</name>
+      <description>Basic system variables.</description>
+      <variablelist>
+        <variable id="interface_hrn" type="string">
+          <name>Human readable name</name>
+          <value>senslab2</value>
+          <description>The human readable name for this interface.</description>
+        </variable>
+        <variable id="generic_flavour" type="string">
+          <name>Generic Flavour</name>
+          <value>slab</value>
+          <description>This string refers to a class located in sfa.generic that describes 
+          which specific implementation needs to be used for api, manager and driver objects.
+          PlanetLab users do not need to change this setting.
+          </description>
+        </variable>
+        <variable id="api_loglevel" type="int">
+          <name>Debug</name>
+          <value>2</value>
+          <description>Logging level; 0=minimum, 1=info, 2=debug</description>
+        </variable>
+      </variablelist>
+    </category>
+  </variables>
+</configuration>
diff --git a/sfa/senslab/sfa-bare b/sfa/senslab/sfa-bare
new file mode 100755 (executable)
index 0000000..745955c
--- /dev/null
@@ -0,0 +1,69 @@
+#!/bin/bash
+#
+# sfa  starts sfa service
+#
+# chkconfig: 2345 61 39
+#
+# description:   starts sfa service
+#
+
+# Source config
+[ -f /etc/sfa/sfa_config ] && . /etc/sfa/sfa_config
+
+# source function library
+. /etc/init.d/functions
+
+start() {
+
+    if [ "$SFA_REGISTRY_ENABLED" -eq 1 ]; then
+        action $"SFA Registry" daemon /usr/bin/sfa-server.py -r -d $OPTIONS
+    fi
+
+    if [ "$SFA_AGGREGATE_ENABLED" -eq 1 ]; then
+        action $"SFA Aggregate" daemon /usr/bin/sfa-server.py -a -d $OPTIONS
+    fi
+        
+    if [ "$SFA_SM_ENABLED" -eq 1 ]; then
+        action "SFA SliceMgr" daemon /usr/bin/sfa-server.py -s -d $OPTIONS
+    fi
+
+    if [ "$SFA_FLASHPOLICY_ENABLED" -eq 1 ]; then
+        action "Flash Policy Server" daemon /usr/bin/sfa_flashpolicy.py --file="$SFA_FLASHPOLICY_CONFIG_FILE" --port=$SFA_FLASHPOLICY_PORT -d
+    fi
+
+    RETVAL=$?
+    [ $RETVAL -eq 0 ] && touch /var/lock/subsys/sfa-server.py
+
+}
+
+stop() {
+    action $"Shutting down SFA" killproc sfa-server.py
+    RETVAL=$?
+
+    [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/sfa-server.py
+}
+
+
+case "$1" in
+    start) start ;;
+    stop) stop ;;
+    reload) reload force ;;
+    restart) stop; start ;;
+    condrestart)
+       if [ -f /var/lock/subsys/sfa-server.py ]; then
+            stop
+            start
+       fi
+       ;;
+    status)
+       status sfa-server.py
+       RETVAL=$?
+       ;;
+    *)
+       echo $"Usage: $0 {start|stop|reload|restart|condrestart|status}"
+       exit 1
+       ;;
+esac
+
+exit $RETVAL
+
diff --git a/sfa/senslab/slabaggregate.py b/sfa/senslab/slabaggregate.py
new file mode 100644 (file)
index 0000000..7298a33
--- /dev/null
@@ -0,0 +1,389 @@
+#import httplib
+#import json
+import time
+
+
+#from sfa.util.config import Config
+from sfa.util.xrn import hrn_to_urn, urn_to_hrn, urn_to_sliver_id
+
+from sfa.rspecs.rspec import RSpec
+from sfa.rspecs.elements.versions.slabv1Node import SlabPosition
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+#from sfa.rspecs.elements.login import Login
+#from sfa.rspecs.elements.services import Services
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.lease import Lease
+from sfa.rspecs.elements.granularity import Granularity
+from sfa.rspecs.version_manager import VersionManager
+
+#from sfa.util.sfatime import datetime_to_epoch
+
+from sfa.rspecs.elements.versions.slabv1Node import SlabNode
+from sfa.util.sfalogging import logger
+
+from sfa.util.xrn import Xrn
+
+def slab_xrn_to_hostname(xrn):
+    return Xrn.unescape(Xrn(xrn=xrn, type='node').get_leaf())
+
+def slab_xrn_object(root_auth, hostname):
+    """Attributes are urn and hrn.
+    Get the hostname using slab_xrn_to_hostname on the urn.
+    
+    """
+    return Xrn('.'.join( [root_auth, Xrn.escape(hostname)]), type='node')
+
+class SlabAggregate:
+
+    sites = {}
+    nodes = {}
+    api = None
+    interfaces = {}
+    links = {}
+    node_tags = {}
+    
+    prepared = False
+
+    user_options = {}
+    
+    def __init__(self, driver):
+        self.driver = driver
+
+    def get_slice_and_slivers(self, slice_xrn):
+        """
+        Returns a dict of slivers keyed on the sliver's node_id
+        """
+        slivers = {}
+        sfa_slice = None
+        if not slice_xrn:
+            return (sfa_slice, slivers)
+        slice_urn = hrn_to_urn(slice_xrn, 'slice')
+        slice_hrn, _ = urn_to_hrn(slice_xrn)
+        slice_name = slice_hrn
+
+        slices = self.driver.GetSlices(slice_filter= str(slice_name), \
+                                                slice_filter_type = 'slice_hrn')
+        logger.debug("Slabaggregate api \tget_slice_and_slivers  slices %s " \
+                                                                    %(slices))
+        if not slices:
+            return (sfa_slice, slivers)
+        #if isinstance(sfa_slice, list):
+            #sfa_slice = slices[0]
+        #else:
+            #sfa_slice = slices
+
+        # sort slivers by node id , if there is a job
+        #and therfore, node allocated to this slice
+        for sfa_slice in slices:
+            try:
+                    
+                for node in sfa_slice['node_ids']:
+                    #node_id = self.driver.root_auth + '.' + node_id
+                    sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, \
+                                    sfa_slice['record_id_slice'], node['hostname']),
+                                    'name': sfa_slice['slice_hrn'],
+                                    'type': 'slab-node', 
+                                    'tags': []})
+                    logger.log_exc("SLABAGGREGATE \t \
+                                        get_slice_and_slivers node_id %s "%(node))
+                    slivers[node['hostname']] = sliver
+            except KeyError:
+                logger.log_exc("SLABAGGREGATE \t \
+                                        get_slice_and_slivers KeyError ")
+        
+        #Add default sliver attribute :
+        #connection information for senslab
+        tmp = sfa_slice['slice_hrn'].split('.')
+        ldap_username = tmp[1].split('_')[0]
+        vmaddr = 'ssh ' + ldap_username + '@grenoble.senslab.info'
+        slivers['default_sliver'] =  {'vm': vmaddr , 'login': ldap_username}
+        ## sort sliver attributes by node id    
+        ##tags = self.driver.GetSliceTags({'slice_tag_id': slice['slice_tag_ids']})
+        ##for tag in tags:
+            ### most likely a default/global sliver attribute (node_id == None)
+            ##if tag['node_id'] not in slivers:
+                ##sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, slice['slice_id'], ""),
+                                 ##'name': 'slab-vm',
+                                 ##'tags': []})
+                ##slivers[tag['node_id']] = sliver
+            ##slivers[tag['node_id']]['tags'].append(tag)
+        logger.debug("SLABAGGREGATE api get_slice_and_slivers  slivers %s "\
+                                                             %(slivers))
+        return (slices, slivers)
+            
+
+        
+    def get_nodes(self, slices=None, slivers=[], options={}):
+        # NT: the semantic of this function is not clear to me :
+        # if slice is not defined, then all the nodes should be returned
+        # if slice is defined, we should return only the nodes that 
+        # are part of this slice
+        # but what is the role of the slivers parameter ?
+        # So i assume that slice['node_ids'] will be the same as slivers for us
+        #filter_dict = {}
+        #if slice_xrn:
+            #if not slices or not slices['node_ids']:
+                #return ([],[])
+        tags_filter = {}
+        
+        # get the granularity in second for the reservation system
+        grain = self.driver.GetLeaseGranularity()
+        
+        # Commenting this part since all nodes should be returned, 
+        # even if a slice is provided
+        #if slice :
+        #    if 'node_ids' in slice and slice['node_ids']:
+        #        #first case, a non empty slice was provided
+        #        filter['hostname'] = slice['node_ids']
+        #        tags_filter=filter.copy()
+        #        nodes = self.driver.GetNodes(filter['hostname'])
+        #    else :
+        #        #second case, a slice was provided, but is empty
+        #        nodes={}
+        #else :
+        #    #third case, no slice was provided
+        #    nodes = self.driver.GetNodes()
+        nodes = self.driver.GetNodes()
+        #geni_available = options.get('geni_available')    
+        #if geni_available:
+            #filter['boot_state'] = 'boot'     
+       
+        #filter.update({'peer_id': None})
+        #nodes = self.driver.GetNodes(filter['hostname'])
+        
+        #site_ids = []
+        #interface_ids = []
+        #tag_ids = []
+        nodes_dict = {}
+        for node in nodes:
+            #site_ids.append(node['site_id'])
+            #interface_ids.extend(node['interface_ids'])
+            #tag_ids.extend(node['node_tag_ids'])
+            nodes_dict[node['node_id']] = node
+        
+        # get sites
+        #sites_dict  = self.get_sites({'site_id': site_ids}) 
+        # get interfaces
+        #interfaces = self.get_interfaces({'interface_id':interface_ids}) 
+        # get tags
+        #node_tags = self.get_node_tags(tags_filter)
+        
+        #if slices, this means we got to list all the nodes given to this slice
+        # Make a list of all the nodes in the slice before getting their attributes
+        rspec_nodes = []
+        slice_nodes_list = []
+        if slices:
+            for one_slice in slices:
+                for node in one_slice['node_ids']:
+                    slice_nodes_list.append(node['hostname'])
+                   
+        reserved_nodes = self.driver.GetNodesCurrentlyInUse()
+        logger.debug("SLABAGGREGATE api get_rspec slice_nodes_list  %s "\
+                                                             %(slice_nodes_list)) 
+        for node in nodes:
+            # skip whitelisted nodes
+            #if node['slice_ids_whitelist']:
+                #if not slice or slice['slice_id'] not in node['slice_ids_whitelist']:
+                    #continue
+            #rspec_node = Node()
+            logger.debug("SLABAGGREGATE api get_rspec node  %s "\
+                                                             %(node)) 
+            if slice_nodes_list == [] or node['hostname'] in slice_nodes_list:
+                   
+                rspec_node = SlabNode()
+                # xxx how to retrieve site['login_base']
+                #site_id=node['site_id']
+                #site=sites_dict[site_id]
+                rspec_node['mobile'] = node['mobile']
+                rspec_node['archi'] = node['archi']
+                rspec_node['radio'] = node['radio']
+    
+                slab_xrn = slab_xrn_object(self.driver.root_auth, node['hostname'])
+                rspec_node['component_id'] = slab_xrn.urn
+                rspec_node['component_name'] = node['hostname']  
+                rspec_node['component_manager_id'] = \
+                                hrn_to_urn(self.driver.root_auth, 'authority+sa')
+                
+                # Senslab's nodes are federated : there is only one authority 
+                # for all Senslab sites, registered in SFA.
+                # Removing the part including the site in authority_id SA 27/07/12
+                rspec_node['authority_id'] = rspec_node['component_manager_id']  
+    
+                # do not include boot state (<available> element) in the manifest rspec
+                
+               
+                rspec_node['boot_state'] = node['boot_state']
+                if node['hostname'] in reserved_nodes:
+                    rspec_node['boot_state'] = "Reserved"
+                rspec_node['exclusive'] = 'True'
+                rspec_node['hardware_types'] = [HardwareType({'name': 'slab-node'})]
+    
+                # only doing this because protogeni rspec needs
+                # to advertise available initscripts 
+                #rspec_node['pl_initscripts'] = None
+                # add site/interface info to nodes.
+                # assumes that sites, interfaces and tags have already been prepared.
+                #site = sites_dict[node['site_id']]
+                location = Location({'country':'France'})
+                rspec_node['location'] = location
+            
+            
+                position = SlabPosition()
+                for field in position :
+                    try:
+                        position[field] = node[field]
+                    except KeyError, error :
+                        logger.log_exc("SLABAGGREGATE\t get_rspec position %s "%(error))
+    
+                rspec_node['position'] = position
+                #rspec_node['interfaces'] = []
+               
+                #tags = [PLTag(node_tags[tag_id]) for tag_id in node['node_tag_ids']]
+                # Granularity
+                granularity = Granularity({'grain': grain})
+                rspec_node['granularity'] = granularity
+                rspec_node['tags'] = []
+                if node['hostname'] in slivers:
+                    # add sliver info
+                    sliver = slivers[node['hostname']]
+                    rspec_node['sliver_id'] = sliver['sliver_id']
+                    rspec_node['client_id'] = node['hostname']
+                    rspec_node['slivers'] = [sliver]
+                    
+                    # slivers always provide the ssh service
+                    #login = Login({'authentication': 'ssh-keys', 'hostname': node['hostname'], 'port':'22', 'username': sliver['name']})
+                    #service = Services({'login': login})
+                    #rspec_node['services'] = [service]
+                rspec_nodes.append(rspec_node)
+
+        return (rspec_nodes)       
+
+    def get_leases(self, slice_record = None, options = {}):
+    
+        now = int(time.time())
+        lease_filter = {'clip': now }
+        
+        #self.driver.synchronize_oar_and_slice_table()
+        #if slice_record:
+            #lease_filter.update({'name': slice_record['name']})
+        return_fields = ['lease_id', 'hostname', 'site_id', \
+                            'name', 'start_time', 'duration']
+        #leases = self.driver.GetLeases(lease_filter)
+        leases = self.driver.GetLeases()
+        grain = self.driver.GetLeaseGranularity()
+        site_ids = []
+        rspec_leases = []
+        for lease in leases:
+            #as many leases as there are nodes in the job 
+            for node in lease['reserved_nodes']: 
+                rspec_lease = Lease()
+                rspec_lease['lease_id'] = lease['lease_id']
+                site = node['site_id']
+                slab_xrn = slab_xrn_object(self.driver.root_auth, node['hostname'])
+                rspec_lease['component_id'] = slab_xrn.urn
+                #rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn, \
+                                        #site, node['hostname'])
+                rspec_lease['slice_id'] = lease['slice_id']
+                rspec_lease['start_time'] = lease['t_from']
+                rspec_lease['duration'] = (lease['t_until'] - lease['t_from']) \
+                                                                    / grain   
+                rspec_leases.append(rspec_lease)
+        return rspec_leases    
+        
+   
+        #rspec_leases = []
+        #for lease in leases:
+       
+            #rspec_lease = Lease()
+            
+            ## xxx how to retrieve site['login_base']
+
+            #rspec_lease['lease_id'] = lease['lease_id']
+            #rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn, \
+                                        #site['login_base'], lease['hostname'])
+            #slice_hrn = slicename_to_hrn(self.driver.hrn, lease['name'])
+            #slice_urn = hrn_to_urn(slice_hrn, 'slice')
+            #rspec_lease['slice_id'] = slice_urn
+            #rspec_lease['t_from'] = lease['t_from']
+            #rspec_lease['t_until'] = lease['t_until']          
+            #rspec_leases.append(rspec_lease)
+        #return rspec_leases   
+#from plc/aggregate.py 
+    def get_rspec(self, slice_xrn=None, version = None, options={}):
+
+        rspec = None
+        version_manager = VersionManager()     
+        version = version_manager.get_version(version)
+        logger.debug("SlabAggregate \t get_rspec ***version %s \
+                    version.type %s  version.version %s options %s \r\n" \
+                    %(version,version.type,version.version,options))
+
+        if not slice_xrn:
+            rspec_version = version_manager._get_version(version.type, \
+                                                    version.version, 'ad')
+
+        else:
+            rspec_version = version_manager._get_version(version.type, \
+                                                version.version, 'manifest')
+           
+        slices, slivers = self.get_slice_and_slivers(slice_xrn)
+        #at this point sliver may be empty if no senslab job 
+        #is running for this user/slice.
+        rspec = RSpec(version=rspec_version, user_options=options)
+
+        
+        #if slice and 'expires' in slice:
+           #rspec.xml.set('expires',  datetime_to_epoch(slice['expires']))
+         # add sliver defaults
+        #nodes, links = self.get_nodes(slice, slivers)
+        logger.debug("\r\n \r\n SlabAggregate \tget_rspec ******* slice_xrn %s \r\n \r\n"\
+                                            %(slice_xrn)) 
+                                            
+        try:                                    
+            lease_option = options['list_leases']
+        except KeyError:
+            #If no options are specified, at least print the resources
+            if slice_xrn :
+                lease_option = 'all'
+            pass 
+        
+        if lease_option in ['all', 'resources']:
+        #if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'leases':
+            nodes = self.get_nodes(slices, slivers) 
+            logger.debug("SlabAggregate \tget_rspec **** \
+                        nodes %s \r\n" %(nodes))
+            #In case creating a job,  slice_xrn is not set to None
+            rspec.version.add_nodes(nodes)
+            if slice_xrn :
+                #Get user associated with this slice
+                #user = dbsession.query(RegRecord).filter_by(record_id = \
+                                                #slices['record_id_user']).first()
+
+                #ldap_username = (user.hrn).split('.')[1]
+                
+                
+                #for one_slice in slices :
+                ldap_username = slices[0]['slice_hrn']
+                tmp = ldap_username.split('.')
+                ldap_username = tmp[1].split('_')[0]
+              
+                if version.type == "Slab":
+                    rspec.version.add_connection_information(ldap_username)
+
+            default_sliver = slivers.get('default_sliver', [])
+            if default_sliver:
+                #default_sliver_attribs = default_sliver.get('tags', [])
+                logger.debug("SlabAggregate \tget_rspec **** \
+                        default_sliver%s \r\n" %(default_sliver))
+                for attrib in default_sliver:
+                    rspec.version.add_default_sliver_attribute(attrib, \
+                                                               default_sliver[attrib])  
+        if lease_option in ['all','leases']:                                                         
+        #if options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'resources':
+            leases = self.get_leases(slices)
+            rspec.version.add_leases(leases)
+            
+        logger.debug("SlabAggregate \tget_rspec ******* rspec_toxml %s \r\n"\
+                                            %(rspec.toxml())) 
+        return rspec.toxml()          
diff --git a/sfa/senslab/slabdriver.py b/sfa/senslab/slabdriver.py
new file mode 100644 (file)
index 0000000..f009aa4
--- /dev/null
@@ -0,0 +1,1617 @@
+import subprocess
+
+from datetime import datetime
+from time import gmtime
+
+from sfa.util.faults import SliverDoesNotExist, UnknownSfaType
+from sfa.util.sfalogging import logger
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegUser
+
+from sfa.trust.credential import Credential
+
+
+from sfa.managers.driver import Driver
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec
+
+from sfa.util.xrn import hrn_to_urn, urn_to_sliver_id, get_leaf
+
+
+## thierry: everything that is API-related (i.e. handling incoming requests) 
+# is taken care of 
+# SlabDriver should be really only about talking to the senslab testbed
+
+
+from sfa.senslab.OARrestapi import  OARrestapi
+from sfa.senslab.LDAPapi import LDAPapi
+
+from sfa.senslab.slabpostgres import SlabDB, slab_dbsession, SliceSenslab
+                                                                
+from sfa.senslab.slabaggregate import SlabAggregate, slab_xrn_to_hostname, \
+                                                            slab_xrn_object
+from sfa.senslab.slabslices import SlabSlices
+
+
+
+
+
+# thierry : note
+# this inheritance scheme is so that the driver object can receive
+# GetNodes or GetSites sorts of calls directly
+# and thus minimize the differences in the managers with the pl version
+class SlabDriver(Driver):
+
+    def __init__(self, config):
+        Driver.__init__ (self, config)
+        self.config = config
+        self.hrn = config.SFA_INTERFACE_HRN
+
+        self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
+
+        self.oar = OARrestapi()
+        self.ldap = LDAPapi()
+        self.time_format = "%Y-%m-%d %H:%M:%S"
+        self.db = SlabDB(config,debug = True)
+        self.cache = None
+        
+    
+    def sliver_status(self, slice_urn, slice_hrn):
+        """Receive a status request for slice named urn/hrn 
+        urn:publicid:IDN+senslab+nturro_slice hrn senslab.nturro_slice
+        shall return a structure as described in
+        http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
+        NT : not sure if we should implement this or not, but used by sface.
+        
+        """
+        
+        #First get the slice with the slice hrn
+        slice_list =  self.GetSlices(slice_filter = slice_hrn, \
+                                    slice_filter_type = 'slice_hrn')
+        
+        if len(slice_list) is 0:
+            raise SliverDoesNotExist("%s  slice_hrn" % (slice_hrn))
+        
+        #Slice has the same slice hrn for each slice in the slice/lease list
+        #So fetch the info on the user once 
+        one_slice = slice_list[0] 
+        recuser = dbsession.query(RegRecord).filter_by(record_id = \
+                                            one_slice['record_id_user']).first()
+        
+        #Make a list of all the nodes hostnames  in use for this slice
+        slice_nodes_list = []
+        for sl in slice_list:
+            for node in sl['node_ids']:
+                slice_nodes_list.append(node['hostname'])
+            
+        #Get all the corresponding nodes details    
+        nodes_all = self.GetNodes({'hostname':slice_nodes_list},
+                                ['node_id', 'hostname','site','boot_state'])
+        nodeall_byhostname = dict([(n['hostname'], n) for n in nodes_all])  
+          
+          
+          
+        for sl in slice_list:
+
+              #For compatibility
+            top_level_status = 'empty' 
+            result = {}
+            result.fromkeys(['geni_urn','pl_login','geni_status','geni_resources'],None)
+            result['pl_login'] = recuser.hrn
+            logger.debug("Slabdriver - sliver_status Sliver status urn %s hrn %s sl\
+                             %s \r\n " %(slice_urn, slice_hrn, sl))
+            try:
+                nodes_in_slice = sl['node_ids']
+            except KeyError:
+                #No job in the slice
+                result['geni_status'] = top_level_status
+                result['geni_resources'] = [] 
+                return result
+           
+            top_level_status = 'ready' 
+
+            #A job is running on Senslab for this slice
+            # report about the local nodes that are in the slice only
+         
+            result['geni_urn'] = slice_urn
+            
+
+            
+            #timestamp = float(sl['startTime']) + float(sl['walltime']) 
+            #result['pl_expires'] = strftime(self.time_format, \
+                                                    #gmtime(float(timestamp)))
+            #result['slab_expires'] = strftime(self.time_format,\
+                                                    #gmtime(float(timestamp)))
+            
+            resources = []
+            for node in sl['node_ids']:
+                res = {}
+                #res['slab_hostname'] = node['hostname']
+                #res['slab_boot_state'] = node['boot_state']
+                
+                res['pl_hostname'] = node['hostname']
+                res['pl_boot_state'] = nodeall_byhostname[node['hostname']]['boot_state']
+                #res['pl_last_contact'] = strftime(self.time_format, \
+                                                    #gmtime(float(timestamp)))
+                sliver_id = urn_to_sliver_id(slice_urn, sl['record_id_slice'], \
+                                            nodeall_byhostname[node['hostname']]['node_id']) 
+                res['geni_urn'] = sliver_id 
+                if nodeall_byhostname[node['hostname']]['boot_state'] == 'Alive':
+
+                    res['geni_status'] = 'ready'
+                else:
+                    res['geni_status'] = 'failed'
+                    top_level_status = 'failed' 
+                    
+                res['geni_error'] = ''
+        
+                resources.append(res)
+                
+            result['geni_status'] = top_level_status
+            result['geni_resources'] = resources 
+            logger.debug("SLABDRIVER \tsliver_statusresources %s res %s "\
+                                                    %(resources,res))
+            return result        
+            
+             
+    def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, \
+                                                             users, options):
+        aggregate = SlabAggregate(self)
+        
+        slices = SlabSlices(self)
+        peer = slices.get_peer(slice_hrn)
+        sfa_peer = slices.get_sfa_peer(slice_hrn)
+        slice_record = None 
+        if not isinstance(creds, list):
+            creds = [creds]
+    
+        if users:
+            slice_record = users[0].get('slice_record', {})
+    
+        # parse rspec
+        rspec = RSpec(rspec_string)
+        logger.debug("SLABDRIVER.PY \t create_sliver \tr spec.version %s slice_record %s " \
+                                                            %(rspec.version,slice_record))
+        
+        #self.synchronize_oar_and_slice_table(slice_hrn)
+        # ensure site record exists?
+        # ensure slice record exists
+        #Removed options to verify_slice SA 14/08/12
+        sfa_slice = slices.verify_slice(slice_hrn, slice_record, peer, \
+                                                    sfa_peer)
+                                                    
+        #requested_attributes returned by rspec.version.get_slice_attributes() 
+        #unused, removed SA 13/08/12
+        rspec.version.get_slice_attributes()
+
+        logger.debug("SLABDRIVER.PY create_sliver slice %s " %(sfa_slice))
+        
+        # ensure person records exists
+        #verify_persons returns added persons but since the return value
+        #is not used 
+        slices.verify_persons(slice_hrn, sfa_slice, users, peer, \
+                                                    sfa_peer, options=options)
+        
+
+        
+        # add/remove slice from nodes 
+       
+        requested_slivers = [node.get('component_name') \
+                            for node in rspec.version.get_nodes_with_slivers()]
+        l = [ node for node in rspec.version.get_nodes_with_slivers() ]
+        logger.debug("SLADRIVER \tcreate_sliver requested_slivers \
+                                    requested_slivers %s  listnodes %s" \
+                                    %(requested_slivers,l))
+        #verify_slice_nodes returns nodes, but unused here. Removed SA 13/08/12.
+        slices.verify_slice_nodes(sfa_slice, requested_slivers, peer) 
+        
+        # add/remove leases
+        requested_lease_list = []
+        kept_leases = []
+        for lease in rspec.version.get_leases():
+            single_requested_lease = {}
+            logger.debug("SLABDRIVER.PY \tcreate_sliver lease %s " %(lease))
+            if not lease.get('lease_id'):
+                single_requested_lease['hostname'] = \
+                            slab_xrn_to_hostname(lease.get('component_id').strip())
+                single_requested_lease['start_time'] = lease.get('start_time')
+                single_requested_lease['duration'] = lease.get('duration')
+            else:
+                kept_leases.append(int(lease['lease_id']))
+            if single_requested_lease.get('hostname'):
+                requested_lease_list.append(single_requested_lease)
+                
+        #dCreate dict of leases by start_time, regrouping nodes reserved
+        #at the same
+        #time, for the same amount of time = one job on OAR
+        requested_job_dict = {}
+        for lease in requested_lease_list:
+            
+            #In case it is an asap experiment start_time is empty
+            if lease['start_time'] == '':
+                lease['start_time'] = '0' 
+                
+            if lease['start_time'] not in requested_job_dict:
+                if isinstance(lease['hostname'], str):
+                    lease['hostname'] =  [lease['hostname']]
+                    
+                requested_job_dict[lease['start_time']] = lease
+                
+            else :
+                job_lease = requested_job_dict[lease['start_time']]
+                if lease['duration'] == job_lease['duration'] :
+                    job_lease['hostname'].append(lease['hostname'])
+                    
+          
+                
+                        
+        logger.debug("SLABDRIVER.PY \tcreate_sliver  requested_job_dict %s " %(requested_job_dict))    
+        #verify_slice_leases returns the leases , but the return value is unused
+        #here. Removed SA 13/08/12           
+        slices.verify_slice_leases(sfa_slice, \
+                                    requested_job_dict, kept_leases, peer)
+        
+        return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
+        
+        
+    def delete_sliver (self, slice_urn, slice_hrn, creds, options):
+        
+        sfa_slice_list  = self.GetSlices(slice_filter = slice_hrn, \
+                                            slice_filter_type = 'slice_hrn')
+        
+        if not sfa_slice_list:
+            return 1
+        
+        #Delete all in the slice
+        for sfa_slice in sfa_slice_list:
+
+        
+            logger.debug("SLABDRIVER.PY delete_sliver slice %s" %(sfa_slice))
+            slices = SlabSlices(self)
+            # determine if this is a peer slice
+        
+            peer = slices.get_peer(slice_hrn) 
+            #TODO delete_sliver SA : UnBindObjectFromPeer should be 
+            #used when there is another 
+            #senslab testbed, which is not the case 14/08/12 . 
+            
+            logger.debug("SLABDRIVER.PY delete_sliver peer %s" %(peer))
+            try:
+                if peer:
+                    self.UnBindObjectFromPeer('slice', \
+                                            sfa_slice['record_id_slice'], peer,None)
+                self.DeleteSliceFromNodes(sfa_slice)
+            finally:
+                if peer:
+                    self.BindObjectToPeer('slice', sfa_slice['record_id_slice'], \
+                                                peer, sfa_slice['peer_slice_id'])
+            return 1
+            
+            
+    def AddSlice(self, slice_record):
+        slab_slice = SliceSenslab( slice_hrn = slice_record['slice_hrn'], \
+                        record_id_slice= slice_record['record_id_slice'] , \
+                        record_id_user= slice_record['record_id_user'], \
+                        peer_authority = slice_record['peer_authority'])
+        logger.debug("SLABDRIVER.PY \tAddSlice slice_record %s slab_slice %s" \
+                                            %(slice_record,slab_slice))
+        slab_dbsession.add(slab_slice)
+        slab_dbsession.commit()
+        return
+        
+    # first 2 args are None in case of resource discovery
+    def list_resources (self, slice_urn, slice_hrn, creds, options):
+        #cached_requested = options.get('cached', True) 
+    
+        version_manager = VersionManager()
+        # get the rspec's return format from options
+        rspec_version = \
+                version_manager.get_version(options.get('geni_rspec_version'))
+        version_string = "rspec_%s" % (rspec_version)
+    
+        #panos adding the info option to the caching key (can be improved)
+        if options.get('info'):
+            version_string = version_string + "_" + \
+                                        options.get('info', 'default')
+    
+        # look in cache first
+        #if cached_requested and self.cache and not slice_hrn:
+            #rspec = self.cache.get(version_string)
+            #if rspec:
+                #logger.debug("SlabDriver.ListResources: \
+                                    #returning cached advertisement")
+                #return rspec 
+    
+        #panos: passing user-defined options
+        aggregate = SlabAggregate(self)
+        origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
+        options.update({'origin_hrn':origin_hrn})
+        rspec =  aggregate.get_rspec(slice_xrn=slice_urn, \
+                                        version=rspec_version, options=options)
+       
+        # cache the result
+        #if self.cache and not slice_hrn:
+            #logger.debug("Slab.ListResources: stores advertisement in cache")
+            #self.cache.add(version_string, rspec)
+    
+        return rspec
+        
+        
+    def list_slices (self, creds, options):
+        # look in cache first
+        #if self.cache:
+            #slices = self.cache.get('slices')
+            #if slices:
+                #logger.debug("PlDriver.list_slices returns from cache")
+                #return slices
+    
+        # get data from db 
+
+        slices = self.GetSlices()        
+        logger.debug("SLABDRIVER.PY \tlist_slices hrn %s \r\n \r\n" %(slices))        
+        slice_hrns = [slab_slice['slice_hrn'] for slab_slice in slices]
+        #slice_hrns = [slicename_to_hrn(self.hrn, slab_slice['slice_hrn']) \
+                                                    #for slab_slice in slices]
+        slice_urns = [hrn_to_urn(slice_hrn, 'slice') \
+                                                for slice_hrn in slice_hrns]
+
+        # cache the result
+        #if self.cache:
+            #logger.debug ("SlabDriver.list_slices stores value in cache")
+            #self.cache.add('slices', slice_urns) 
+    
+        return slice_urns
+    
+    #No site or node register supported
+    def register (self, sfa_record, hrn, pub_key):
+        record_type = sfa_record['type']
+        slab_record = self.sfa_fields_to_slab_fields(record_type, hrn, \
+                                                            sfa_record)
+    
+
+        if record_type == 'slice':
+            acceptable_fields = ['url', 'instantiation', 'name', 'description']
+            for key in slab_record.keys():
+                if key not in acceptable_fields:
+                    slab_record.pop(key) 
+            logger.debug("SLABDRIVER.PY register")
+            slices = self.GetSlices(slice_filter=slab_record['hrn'], \
+                                            slice_filter_type = 'slice_hrn')
+            if not slices:
+                pointer = self.AddSlice(slab_record)
+            else:
+                pointer = slices[0]['slice_id']
+    
+        elif record_type == 'user':  
+            persons = self.GetPersons([sfa_record])
+            #persons = self.GetPersons([sfa_record['hrn']])
+            if not persons:
+                sfa_record['enabled'] = False
+                #For Senslab LDAP, if the user is a new one, disable the 
+                #account so that admins have to acknowledge the user first.
+                pointer = self.AddPerson(dict(sfa_record))
+                #add in LDAP 
+            else:
+                pointer = persons[0]['person_id']
+                
+            #Does this make sense to senslab ?
+            #if 'enabled' in sfa_record and sfa_record['enabled']:
+                #self.UpdatePerson(pointer, \
+                                    #{'enabled': sfa_record['enabled']})
+                
+            #TODO register Change this AddPersonToSite stuff 05/07/2012 SA   
+            #No site in senslab 28/08/12 SA
+           
+    
+            # What roles should this user have?
+            #No user roles in Slab/SFA 28/08/12 SA: roles are handled in LDAP
+            # Add the user's key
+            if pub_key:
+                self.AddPersonKey(pointer, {'key_type' : 'ssh', \
+                                                'key' : pub_key})
+                
+        #No node adding outside OAR
+
+        return pointer
+            
+    #No site or node record update allowed       
+    def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
+        pointer = old_sfa_record['pointer']
+        old_sfa_record_type = old_sfa_record['type']
+
+        # new_key implemented for users only
+        if new_key and old_sfa_record_type not in [ 'user' ]:
+            raise UnknownSfaType(old_sfa_record_type)
+        
+        #if (type == "authority"):
+            #self.shell.UpdateSite(pointer, new_sfa_record)
+    
+        if old_sfa_record_type == "slice":
+            slab_record = self.sfa_fields_to_slab_fields(old_sfa_record_type, \
+                                                hrn, new_sfa_record)
+            if 'name' in slab_record:
+                slab_record.pop('name')
+                #Prototype should be UpdateSlice(self,
+                #auth, slice_id_or_name, slice_fields)
+                #Senslab cannot update slice since slice = job
+                #so we must delete and create another job
+                self.UpdateSlice(pointer, slab_record)
+    
+        elif old_sfa_record_type == "user":
+            update_fields = {}
+            all_fields = new_sfa_record
+            for key in all_fields.keys():
+                if key in ['first_name', 'last_name', 'title', 'email',
+                           'password', 'phone', 'url', 'bio', 'accepted_aup',
+                           'enabled']:
+                    update_fields[key] = all_fields[key]
+            self.UpdatePerson(pointer, update_fields)
+    
+            if new_key:
+                # must check this key against the previous one if it exists
+                persons = self.GetPersons([pointer], ['key_ids'])
+                person = persons[0]
+                keys = person['key_ids']
+                keys = self.GetKeys(person['key_ids'])
+                
+                # Delete all stale keys
+                key_exists = False
+                for key in keys:
+                    if new_key != key['key']:
+                        self.DeleteKey(key['key_id'])
+                    else:
+                        key_exists = True
+                if not key_exists:
+                    self.AddPersonKey(pointer, {'key_type': 'ssh', \
+                                                    'key': new_key})
+
+
+        return True
+        
+
+    def remove (self, sfa_record):
+        sfa_record_type = sfa_record['type']
+        hrn = sfa_record['hrn']
+        if sfa_record_type == 'user':
+
+            #get user from senslab ldap  
+            person = self.GetPersons(sfa_record)
+            #No registering at a given site in Senslab.
+            #Once registered to the LDAP, all senslab sites are
+            #accesible.
+            if person :
+                #Mark account as disabled in ldap
+                self.DeletePerson(sfa_record)
+        elif sfa_record_type == 'slice':
+            if self.GetSlices(slice_filter = hrn, \
+                                    slice_filter_type = 'slice_hrn'):
+                self.DeleteSlice(sfa_record)
+
+        #elif type == 'authority':
+            #if self.GetSites(pointer):
+                #self.DeleteSite(pointer)
+
+        return True
+            
+            
+            
+    #TODO clean GetPeers. 05/07/12SA        
+    def GetPeers (self, auth = None, peer_filter=None, return_fields_list=None):
+
+        existing_records = {}
+        existing_hrns_by_types = {}
+        logger.debug("SLABDRIVER \tGetPeers auth = %s, peer_filter %s, \
+                    return_field %s " %(auth , peer_filter, return_fields_list))
+        all_records = dbsession.query(RegRecord).filter(RegRecord.type.like('%authority%')).all()
+        for record in all_records:
+            existing_records[(record.hrn, record.type)] = record
+            if record.type not in existing_hrns_by_types:
+                existing_hrns_by_types[record.type] = [record.hrn]
+                logger.debug("SLABDRIVER \tGetPeer\t NOT IN \
+                    existing_hrns_by_types %s " %( existing_hrns_by_types))
+            else:
+                
+                logger.debug("SLABDRIVER \tGetPeer\t \INNN  type %s hrn %s " \
+                                                %(record.type,record.hrn))
+                existing_hrns_by_types[record.type].append(record.hrn)
+
+                        
+        logger.debug("SLABDRIVER \tGetPeer\texisting_hrns_by_types %s "\
+                                             %( existing_hrns_by_types))
+        records_list = [] 
+      
+        try: 
+            if peer_filter:
+                records_list.append(existing_records[(peer_filter,'authority')])
+            else :
+                for hrn in existing_hrns_by_types['authority']:
+                    records_list.append(existing_records[(hrn,'authority')])
+                    
+            logger.debug("SLABDRIVER \tGetPeer \trecords_list  %s " \
+                                            %(records_list))
+
+        except KeyError:
+            pass
+                
+        return_records = records_list
+        if not peer_filter and not return_fields_list:
+            return records_list
+
+       
+        logger.debug("SLABDRIVER \tGetPeer return_records %s " \
+                                                    %(return_records))
+        return return_records
+        
+     
+    #TODO  : Handling OR request in make_ldap_filters_from_records 
+    #instead of the for loop 
+    #over the records' list
+    def GetPersons(self, person_filter=None):
+        """
+        person_filter should be a list of dictionnaries when not set to None.
+        Returns a list of users whose accounts are enabled found in ldap.
+       
+        """
+        logger.debug("SLABDRIVER \tGetPersons person_filter %s" \
+                                                    %(person_filter))
+        person_list = []
+        if person_filter and isinstance(person_filter, list):
+        #If we are looking for a list of users (list of dict records)
+        #Usually the list contains only one user record
+            for searched_attributes in person_filter:
+                
+                #Get only enabled user accounts in senslab LDAP : 
+                #add a filter for make_ldap_filters_from_record
+                person = self.ldap.LdapFindUser(searched_attributes, \
+                                is_user_enabled=True)
+                person_list.append(person)
+          
+        else:
+            #Get only enabled user accounts in senslab LDAP : 
+            #add a filter for make_ldap_filters_from_record
+            person_list  = self.ldap.LdapFindUser(is_user_enabled=True)  
+
+        return person_list
+
+    def GetTimezone(self):
+        server_timestamp, server_tz = self.oar.parser.\
+                                            SendRequest("GET_timezone")
+        return server_timestamp, server_tz
+    
+
+    def DeleteJobs(self, job_id, slice_hrn):
+        if not job_id or job_id is -1:
+            return
+        username  = slice_hrn.split(".")[-1].rstrip("_slice")
+        reqdict = {}
+        reqdict['method'] = "delete"
+        reqdict['strval'] = str(job_id)
+       
+
+        answer = self.oar.POSTRequestToOARRestAPI('DELETE_jobs_id', \
+                                                    reqdict,username)
+        logger.debug("SLABDRIVER \tDeleteJobs jobid  %s \r\n answer %s \
+                                username %s" %(job_id,answer, username))
+        return answer
+
+            
+        
+        ##TODO : Unused GetJobsId ? SA 05/07/12
+    #def GetJobsId(self, job_id, username = None ):
+        #"""
+        #Details about a specific job. 
+        #Includes details about submission time, jot type, state, events, 
+        #owner, assigned ressources, walltime etc...
+            
+        #"""
+        #req = "GET_jobs_id"
+        #node_list_k = 'assigned_network_address'
+        ##Get job info from OAR    
+        #job_info = self.oar.parser.SendRequest(req, job_id, username)
+
+        #logger.debug("SLABDRIVER \t GetJobsId  %s " %(job_info))
+        #try:
+            #if job_info['state'] == 'Terminated':
+                #logger.debug("SLABDRIVER \t GetJobsId job %s TERMINATED"\
+                                                            #%(job_id))
+                #return None
+            #if job_info['state'] == 'Error':
+                #logger.debug("SLABDRIVER \t GetJobsId ERROR message %s "\
+                                                            #%(job_info))
+                #return None
+                                                            
+        #except KeyError:
+            #logger.error("SLABDRIVER \tGetJobsId KeyError")
+            #return None 
+        
+        #parsed_job_info  = self.get_info_on_reserved_nodes(job_info, \
+                                                            #node_list_k)
+        ##Replaces the previous entry 
+        ##"assigned_network_address" / "reserved_resources"
+        ##with "node_ids"
+        #job_info.update({'node_ids':parsed_job_info[node_list_k]})
+        #del job_info[node_list_k]
+        #logger.debug(" \r\nSLABDRIVER \t GetJobsId job_info %s " %(job_info))
+        #return job_info
+
+        
+    def GetJobsResources(self, job_id, username = None):
+        #job_resources=['reserved_resources', 'assigned_resources',\
+                            #'job_id', 'job_uri', 'assigned_nodes',\
+                             #'api_timestamp']
+        #assigned_res = ['resource_id', 'resource_uri']
+        #assigned_n = ['node', 'node_uri']
+
+        req = "GET_jobs_id_resources"
+       
+               
+        #Get job resources list from OAR    
+        node_id_list = self.oar.parser.SendRequest(req, job_id, username)
+        logger.debug("SLABDRIVER \t GetJobsResources  %s " %(node_id_list))
+        
+        hostname_list = \
+            self.__get_hostnames_from_oar_node_ids(node_id_list)
+        
+
+        #Replaces the previous entry "assigned_network_address" / 
+        #"reserved_resources"
+        #with "node_ids"
+        job_info = {'node_ids': hostname_list}
+
+        return job_info
+
+            
+    def get_info_on_reserved_nodes(self, job_info, node_list_name):
+        #Get the list of the testbed nodes records and make a 
+        #dictionnary keyed on the hostname out of it
+        node_list_dict = self.GetNodes() 
+        #node_hostname_list = []
+        node_hostname_list = [node['hostname'] for node in node_list_dict] 
+        #for node in node_list_dict:
+            #node_hostname_list.append(node['hostname'])
+        node_dict = dict(zip(node_hostname_list, node_list_dict))
+        try :
+            reserved_node_hostname_list = []
+            for index in range(len(job_info[node_list_name])):
+               #job_info[node_list_name][k] = 
+                reserved_node_hostname_list[index] = \
+                        node_dict[job_info[node_list_name][index]]['hostname']
+                            
+            logger.debug("SLABDRIVER \t get_info_on_reserved_nodes \
+                        reserved_node_hostname_list %s" \
+                        %(reserved_node_hostname_list))
+        except KeyError:
+            logger.error("SLABDRIVER \t get_info_on_reserved_nodes KEYERROR " )
+            
+        return reserved_node_hostname_list  
+            
+    def GetNodesCurrentlyInUse(self):
+        """Returns a list of all the nodes already involved in an oar job"""
+        return self.oar.parser.SendRequest("GET_running_jobs") 
+    
+    def __get_hostnames_from_oar_node_ids(self, resource_id_list ):
+        full_nodes_dict_list = self.GetNodes()
+        #Put the full node list into a dictionary keyed by oar node id
+        oar_id_node_dict = {}
+        for node in full_nodes_dict_list:
+            oar_id_node_dict[node['oar_id']] = node
+            
+        logger.debug("SLABDRIVER \t  __get_hostnames_from_oar_node_ids\
+                        oar_id_node_dict %s" %(oar_id_node_dict))
+
+        hostname_dict_list = [] 
+        for resource_id in resource_id_list:
+            #Because jobs requested "asap" do not have defined resources
+            if resource_id is not "Undefined":
+                hostname_dict_list.append({'hostname' : \
+                        oar_id_node_dict[resource_id]['hostname'], 
+                        'site_id' :  oar_id_node_dict[resource_id]['site']})
+                
+            #hostname_list.append(oar_id_node_dict[resource_id]['hostname'])
+        return hostname_dict_list 
+        
+    def GetReservedNodes(self,username = None):
+        #Get the nodes in use and the reserved nodes
+        reservation_dict_list = \
+                        self.oar.parser.SendRequest("GET_reserved_nodes", username = username)
+        
+        
+        for resa in reservation_dict_list:
+            logger.debug ("GetReservedNodes resa %s"%(resa))
+            #dict list of hostnames and their site
+            resa['reserved_nodes'] = \
+                self.__get_hostnames_from_oar_node_ids(resa['resource_ids'])
+                
+        #del resa['resource_ids']
+        return reservation_dict_list
+     
+    def GetNodes(self, node_filter_dict = None, return_fields_list = None):
+        """
+        node_filter_dict : dictionnary of lists
+        
+        """
+        node_dict_by_id = self.oar.parser.SendRequest("GET_resources_full")
+        node_dict_list = node_dict_by_id.values()
+        
+        #No  filtering needed return the list directly
+        if not (node_filter_dict or return_fields_list):
+            return node_dict_list
+        
+        return_node_list = []
+        if node_filter_dict:
+            for filter_key in node_filter_dict:
+                try:
+                    #Filter the node_dict_list by each value contained in the 
+                    #list node_filter_dict[filter_key]
+                    for value in node_filter_dict[filter_key]:
+                        for node in node_dict_list:
+                            if node[filter_key] == value:
+                                if return_fields_list :
+                                    tmp = {}
+                                    for k in return_fields_list:
+                                        tmp[k] = node[k]     
+                                    return_node_list.append(tmp)
+                                else:
+                                    return_node_list.append(node)
+                except KeyError:
+                    logger.log_exc("GetNodes KeyError")
+                    return
+
+
+        return return_node_list
+    
+  
+    def GetSites(self, site_filter_name_list = None, return_fields_list = None):
+        site_dict = self.oar.parser.SendRequest("GET_sites")
+        #site_dict : dict where the key is the sit ename
+        return_site_list = []
+        if not ( site_filter_name_list or return_fields_list):
+            return_site_list = site_dict.values()
+            return return_site_list
+        
+        for site_filter_name in site_filter_name_list:
+            if site_filter_name in site_dict:
+                if return_fields_list:
+                    for field in return_fields_list:
+                        tmp = {}
+                        try:
+                            tmp[field] = site_dict[site_filter_name][field]
+                        except KeyError:
+                            logger.error("GetSites KeyError %s "%(field))
+                            return None
+                    return_site_list.append(tmp)
+                else:
+                    return_site_list.append( site_dict[site_filter_name])
+            
+
+        return return_site_list
+                
+                
+      
+    def GetSlices(self, slice_filter = None, slice_filter_type = None):
+    #def GetSlices(self, slice_filter = None, slice_filter_type = None, \
+                                            #return_fields_list = None):
+        """ Get the slice records from the slab db. 
+        Returns a slice ditc if slice_filter  and slice_filter_type 
+        are specified.
+        Returns a list of slice dictionnaries if there are no filters
+        specified. 
+       
+        """
+        login = None
+        return_slice_list = []
+        slicerec  = {}
+        slicerec_dict = {}
+        authorized_filter_types_list = ['slice_hrn', 'record_id_user']
+        slicerec_dictlist = []
+        
+             
+        if slice_filter_type in authorized_filter_types_list:
+            
+            
+            def __get_slice_records(slice_filter = None, slice_filter_type = None):
+       
+                login = None
+                #Get list of slices based on the slice hrn
+                if slice_filter_type == 'slice_hrn':
+        
+                    login = slice_filter.split(".")[1].split("_")[0] 
+                    
+                    #DO NOT USE RegSlice - reg_researchers to get the hrn of the user
+                    #otherwise will mess up the RegRecord in Resolve, don't know
+                    #why - SA 08/08/2012
+                    
+                    #Only one entry for one user  = one slice in slice_senslab table
+                    slicerec = slab_dbsession.query(SliceSenslab).filter_by(slice_hrn = slice_filter).first()
+                    
+                #Get slice based on user id                             
+                if slice_filter_type == 'record_id_user':
+                    slicerec = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = slice_filter).first()
+                    
+                if slicerec is None:
+                    return login, []
+                else:
+                    fixed_slicerec_dict = slicerec.dump_sqlalchemyobj_to_dict()
+                    
+                    if login is None :
+                        login = fixed_slicerec_dict['slice_hrn'].split(".")[1].split("_")[0] 
+                    return login, fixed_slicerec_dict
+                
+            
+            
+            
+            login, fixed_slicerec_dict = __get_slice_records(slice_filter, slice_filter_type)
+            logger.debug(" SLABDRIVER \tGetSlices login %s \
+                                            slice record %s" \
+                                            %(login, fixed_slicerec_dict))
+    
+            
+    
+            #One slice can have multiple jobs
+            
+            leases_list = self.GetReservedNodes(username = login)
+            #If no job is running or no job scheduled            
+            if leases_list == [] :
+                return [fixed_slicerec_dict]
+            
+            #Several jobs for one slice  
+            for lease in leases_list : 
+                slicerec_dict = {} 
+                      
+                
+                #Check with OAR the status of the job if a job id is in 
+                #the slice record 
+                
+            
+            
+                slicerec_dict['oar_job_id'] = lease['lease_id']
+                slicerec_dict.update({'node_ids':lease['reserved_nodes']})
+                slicerec_dict.update(fixed_slicerec_dict)
+                slicerec_dict.update({'hrn':\
+                                    str(fixed_slicerec_dict['slice_hrn'])})
+                    
+    
+                slicerec_dictlist.append(slicerec_dict)
+                logger.debug("SLABDRIVER.PY  \tGetSlices  slicerec_dict %s slicerec_dictlist %s" %(slicerec_dict, slicerec_dictlist))
+                
+            logger.debug("SLABDRIVER.PY  \tGetSlices  RETURN slicerec_dictlist  %s"\
+                                                        %(slicerec_dictlist))
+                            
+            return slicerec_dictlist
+                
+                
+        else:
+            
+            slice_list = slab_dbsession.query(SliceSenslab).all()
+            leases_list = self.GetReservedNodes()
+            
+          
+            slicerec_dictlist = []
+            return_slice_list = []
+            for record in slice_list:
+                return_slice_list.append(record.dump_sqlalchemyobj_to_dict())
+                
+            for fixed_slicerec_dict in return_slice_list:
+                slicerec_dict = {} 
+                owner = fixed_slicerec_dict['slice_hrn'].split(".")[1].split("_")[0] 
+                for lease in leases_list:   
+                    if owner == lease['user']:
+                        slicerec_dict['oar_job_id'] = lease['lease_id']
+                        slicerec_dict.update({'node_ids':lease['reserved_nodes']})
+                        slicerec_dict.update(fixed_slicerec_dict)
+                        slicerec_dict.update({'hrn':\
+                                    str(fixed_slicerec_dict['slice_hrn'])})
+                        slicerec_dictlist.append(slicerec_dict)
+            
+            logger.debug("SLABDRIVER.PY  \tGetSlices RETURN slices %s \
+                        slice_filter %s " %(return_slice_list, slice_filter))
+        
+        #if return_fields_list:
+            #return_slice_list  = parse_filter(sliceslist, \
+                                #slice_filter,'slice', return_fields_list)
+
+        return slicerec_dictlist
+        
+    
+    def testbed_name (self): return self.hrn
+         
+    # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
+    def aggregate_version (self):
+        version_manager = VersionManager()
+        ad_rspec_versions = []
+        request_rspec_versions = []
+        for rspec_version in version_manager.versions:
+            if rspec_version.content_type in ['*', 'ad']:
+                ad_rspec_versions.append(rspec_version.to_dict())
+            if rspec_version.content_type in ['*', 'request']:
+                request_rspec_versions.append(rspec_version.to_dict()) 
+        return {
+            'testbed':self.testbed_name(),
+            'geni_request_rspec_versions': request_rspec_versions,
+            'geni_ad_rspec_versions': ad_rspec_versions,
+            }
+          
+          
+          
+          
+          
+          
+    ##
+    # Convert SFA fields to PLC fields for use when registering up updating
+    # registry record in the PLC database
+    #
+    # @param type type of record (user, slice, ...)
+    # @param hrn human readable name
+    # @param sfa_fields dictionary of SFA fields
+    # @param slab_fields dictionary of PLC fields (output)
+
+    def sfa_fields_to_slab_fields(self, sfa_type, hrn, record):
+
+
+        slab_record = {}
+        #for field in record:
+        #    slab_record[field] = record[field]
+        if sfa_type == "slice":
+            #instantion used in get_slivers ? 
+            if not "instantiation" in slab_record:
+                slab_record["instantiation"] = "senslab-instantiated"
+            #slab_record["hrn"] = hrn_to_pl_slicename(hrn)     
+            #Unused hrn_to_pl_slicename because Slab's hrn already in the appropriate form SA 23/07/12
+            slab_record["hrn"] = hrn 
+            logger.debug("SLABDRIVER.PY sfa_fields_to_slab_fields \
+                        slab_record %s  " %(slab_record['hrn']))
+            if "url" in record:
+                slab_record["url"] = record["url"]
+            if "description" in record:
+                slab_record["description"] = record["description"]
+            if "expires" in record:
+                slab_record["expires"] = int(record["expires"])
+                
+        #nodes added by OAR only and then imported to SFA
+        #elif type == "node":
+            #if not "hostname" in slab_record:
+                #if not "hostname" in record:
+                    #raise MissingSfaInfo("hostname")
+                #slab_record["hostname"] = record["hostname"]
+            #if not "model" in slab_record:
+                #slab_record["model"] = "geni"
+                
+        #One authority only 
+        #elif type == "authority":
+            #slab_record["login_base"] = hrn_to_slab_login_base(hrn)
+
+            #if not "name" in slab_record:
+                #slab_record["name"] = hrn
+
+            #if not "abbreviated_name" in slab_record:
+                #slab_record["abbreviated_name"] = hrn
+
+            #if not "enabled" in slab_record:
+                #slab_record["enabled"] = True
+
+            #if not "is_public" in slab_record:
+                #slab_record["is_public"] = True
+
+        return slab_record
+
+    
+
+            
+    def __transforms_timestamp_into_date(self, xp_utc_timestamp = None):
+        """ Transforms unix timestamp into valid OAR date format """
+        
+        #Used in case of a scheduled experiment (not immediate)
+        #To run an XP immediately, don't specify date and time in RSpec 
+        #They will be set to None. 
+        if xp_utc_timestamp:
+            #transform the xp_utc_timestamp into server readable time  
+            xp_server_readable_date = datetime.fromtimestamp(int(\
+                                xp_utc_timestamp)).strftime(self.time_format)
+
+            return xp_server_readable_date
+            
+        else:
+            return None
+        
+   
+
+             
+    def LaunchExperimentOnOAR(self, added_nodes, slice_name, \
+                        lease_start_time, lease_duration, slice_user=None):
+        lease_dict = {}
+        lease_dict['lease_start_time'] = lease_start_time
+        lease_dict['lease_duration'] = lease_duration
+        lease_dict['added_nodes'] = added_nodes
+        lease_dict['slice_name'] = slice_name
+        lease_dict['slice_user'] = slice_user
+        lease_dict['grain'] = self.GetLeaseGranularity()
+        lease_dict['time_format'] = self.time_format
+        
+        def __create_job_structure_request_for_OAR(lease_dict):
+            """ Creates the structure needed for a correct POST on OAR.
+            Makes the timestamp transformation into the appropriate format.
+            Sends the POST request to create the job with the resources in 
+            added_nodes.
+            
+            """
+
+            nodeid_list = []
+            reqdict = {}
+    
+            
+            reqdict['workdir'] = '/tmp'   
+            reqdict['resource'] = "{network_address in ("   
+    
+            for node in lease_dict['added_nodes']: 
+                logger.debug("\r\n \r\n OARrestapi \t __create_job_structure_request_for_OAR \
+                                                                node %s" %(node))
+    
+                # Get the ID of the node 
+                nodeid = node
+                reqdict['resource'] += "'" + nodeid + "', "
+                nodeid_list.append(nodeid)
+    
+            custom_length = len(reqdict['resource'])- 2
+            reqdict['resource'] = reqdict['resource'][0:custom_length] + \
+                                                ")}/nodes=" + str(len(nodeid_list))
+    
+            def __process_walltime(duration):
+                """ Calculates the walltime in seconds from the duration in H:M:S
+                    specified in the RSpec.
+                    
+                """
+                if duration:
+                    # Fixing the walltime by adding a few delays. 
+                    # First put the walltime in seconds oarAdditionalDelay = 20;
+                    #  additional delay for /bin/sleep command to
+                    # take in account  prologue and epilogue scripts execution
+                    # int walltimeAdditionalDelay = 120;  additional delay
+                    desired_walltime = duration 
+                    total_walltime = desired_walltime + 140#+2 min 20
+                    sleep_walltime = desired_walltime + 20 #+20 sec
+                    walltime = []
+                    #Put the walltime back in str form
+                    #First get the hours
+                    walltime.append(str(total_walltime / 3600))
+                    total_walltime = total_walltime - 3600 * int(walltime[0])
+                    #Get the remaining minutes
+                    walltime.append(str(total_walltime / 60))
+                    total_walltime = total_walltime - 60 * int(walltime[1])
+                    #Get the seconds
+                    walltime.append(str(total_walltime))
+    
+                else:
+                    logger.log_exc(" __process_walltime duration null")
+                    
+                return walltime, sleep_walltime
+                    
+
+            walltime, sleep_walltime = \
+                        __process_walltime(int(lease_dict['lease_duration'])*lease_dict['grain'])
+    
+    
+            reqdict['resource'] += ",walltime=" + str(walltime[0]) + \
+                                ":" + str(walltime[1]) + ":" + str(walltime[2])
+            reqdict['script_path'] = "/bin/sleep " + str(sleep_walltime)
+    
+            #In case of a scheduled experiment (not immediate)
+            #To run an XP immediately, don't specify date and time in RSpec 
+            #They will be set to None.
+            if lease_dict['lease_start_time'] is not '0':
+                #Readable time accepted by OAR
+                start_time = datetime.fromtimestamp(int(lease_dict['lease_start_time'])).\
+                                                        strftime(lease_dict['time_format'])
+                reqdict['reservation'] = start_time
+            #If there is not start time, Immediate XP. No need to add special 
+            # OAR parameters
+    
+    
+            reqdict['type'] = "deploy" 
+            reqdict['directory'] = ""
+            reqdict['name'] = "SFA_" + lease_dict['slice_user']
+    
+            return reqdict
+        
+                                   
+        #Create the request for OAR
+        reqdict = __create_job_structure_request_for_OAR(lease_dict)
+         # first step : start the OAR job and update the job 
+        logger.debug("SLABDRIVER.PY \tLaunchExperimentOnOAR reqdict %s\
+                             \r\n "  %(reqdict))  
+       
+        answer = self.oar.POSTRequestToOARRestAPI('POST_job', \
+                                                            reqdict, slice_user)
+        logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid   %s " %(answer))
+        try:       
+            jobid = answer['id']
+        except KeyError:
+            logger.log_exc("SLABDRIVER \tLaunchExperimentOnOAR \
+                                Impossible to create job  %s "  %(answer))
+            return
+        
+        
+        def __configure_experiment(jobid, added_nodes):
+            # second step : configure the experiment
+            # we need to store the nodes in a yaml (well...) file like this :
+            # [1,56,23,14,45,75] with name /tmp/sfa<jobid>.json
+            job_file = open('/tmp/sfa/'+ str(jobid) + '.json', 'w')
+            job_file.write('[')
+            job_file.write(str(added_nodes[0].strip('node')))
+            for node in added_nodes[1:len(added_nodes)] :
+                job_file.write(', '+ node.strip('node'))
+            job_file.write(']')
+            job_file.close()
+            return 
+        
+        def __launch_senslab_experiment(jobid):   
+            # third step : call the senslab-experiment wrapper
+            #command= "java -jar target/sfa-1.0-jar-with-dependencies.jar 
+            # "+str(jobid)+" "+slice_user
+            javacmdline = "/usr/bin/java"
+            jarname = \
+                "/opt/senslabexperimentwrapper/sfa-1.0-jar-with-dependencies.jar"
+            #ret=subprocess.check_output(["/usr/bin/java", "-jar", ", \
+                                                        #str(jobid), slice_user])
+            output = subprocess.Popen([javacmdline, "-jar", jarname, str(jobid), \
+                                slice_user],stdout=subprocess.PIPE).communicate()[0]
+    
+            logger.debug("SLABDRIVER \t __configure_experiment wrapper returns%s " \
+                                                                    %(output))
+            return 
+        
+        
+        
+        if jobid :
+            logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid %s \
+                    added_nodes %s slice_user %s" %(jobid, added_nodes, slice_user))
+            
+        
+            __configure_experiment(jobid, added_nodes)
+            __launch_senslab_experiment(jobid) 
+            
+        return
+        
+    def AddLeases(self, hostname_list, slice_record, lease_start_time, lease_duration):
+        logger.debug("SLABDRIVER \r\n \r\n \t AddLeases hostname_list %s  \
+                slice_record %s lease_start_time %s lease_duration %s  "\
+                 %( hostname_list, slice_record , lease_start_time, \
+                 lease_duration))
+
+        tmp = slice_record['PI'][0].split(".")
+        username = tmp[(len(tmp)-1)]
+        self.LaunchExperimentOnOAR(hostname_list, slice_record['name'], lease_start_time, lease_duration, username)
+        start_time = datetime.fromtimestamp(int(lease_start_time)).strftime(self.time_format)
+        logger.debug("SLABDRIVER \t AddLeases hostname_list start_time %s " %(start_time))
+        
+        return
+    
+    
+    #Delete the jobs from job_senslab table
+    def DeleteSliceFromNodes(self, slice_record):
+
+        self.DeleteJobs(slice_record['oar_job_id'], slice_record['hrn'])
+        return   
+    
+    def GetLeaseGranularity(self):
+        """ Returns the granularity of Senslab testbed.
+        Defined in seconds. """
+        
+        grain = 60 
+        return grain
+    
+    def GetLeases(self, lease_filter_dict=None):
+        unfiltered_reservation_list = self.GetReservedNodes()
+        
+        ##Synchronize slice_table of sfa senslab db
+        #self.synchronize_oar_and_slice_table(unfiltered_reservation_list)
+        
+        reservation_list = []
+        #Find the slice associated with this user senslab ldap uid
+        logger.debug(" SLABDRIVER.PY \tGetLeases ")
+        #Create user dict first to avoir looking several times for
+        #the same user in LDAP SA 27/07/12
+        resa_user_dict = {}
+        for resa in unfiltered_reservation_list:
+            logger.debug("SLABDRIVER \tGetLeases USER %s"\
+                                            %(resa['user']))    
+            if resa['user'] not in resa_user_dict: 
+                logger.debug("SLABDRIVER \tGetLeases userNOTIN ")
+                ldap_info = self.ldap.LdapSearch('(uid='+resa['user']+')')
+                ldap_info = ldap_info[0][1]
+                user = dbsession.query(RegUser).filter_by(email = \
+                                                    ldap_info['mail'][0]).first()
+                #Separated in case user not in database : record_id not defined SA 17/07//12
+                query_slice_info = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = user.record_id)
+                if query_slice_info:
+                    slice_info = query_slice_info.first()
+                else:
+                    slice_info = None
+                    
+                resa_user_dict[resa['user']] = {}
+                resa_user_dict[resa['user']]['ldap_info'] = user
+                resa_user_dict[resa['user']]['slice_info'] = slice_info
+        logger.debug("SLABDRIVER \tGetLeases resa_user_dict %s"\
+                                            %(resa_user_dict))         
+        for resa in unfiltered_reservation_list:
+            
+            
+            #Put the slice_urn  
+            resa['slice_hrn'] = resa_user_dict[resa['user']]['slice_info'].slice_hrn
+            resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')    
+            #Put the slice_urn 
+            #resa['slice_id'] = hrn_to_urn(slice_info.slice_hrn, 'slice')
+            resa['component_id_list'] = []
+            #Transform the hostnames into urns (component ids)
+            for node in resa['reserved_nodes']:
+                #resa['component_id_list'].append(hostname_to_urn(self.hrn, \
+                         #self.root_auth, node['hostname']))
+                slab_xrn = slab_xrn_object(self.root_auth, node['hostname'])
+                resa['component_id_list'].append(slab_xrn.urn)
+        
+        #Filter the reservation list if necessary
+        #Returns all the leases associated with a given slice
+        if lease_filter_dict:
+            logger.debug("SLABDRIVER \tGetLeases lease_filter_dict %s"\
+                                            %(lease_filter_dict))
+            for resa in unfiltered_reservation_list:
+                if lease_filter_dict['name'] == resa['slice_hrn']:
+                    reservation_list.append(resa)
+        else:
+            reservation_list = unfiltered_reservation_list
+            
+        logger.debug(" SLABDRIVER.PY \tGetLeases reservation_list %s"\
+                                                    %(reservation_list))
+        return reservation_list
+            
+    def augment_records_with_testbed_info (self, sfa_records):
+        return self.fill_record_info (sfa_records)
+    
+    def fill_record_info(self, record_list):
+        """
+        Given a SFA record, fill in the senslab specific and SFA specific
+        fields in the record. 
+        """
+                    
+        logger.debug("SLABDRIVER \tfill_record_info records %s " %(record_list))
+        if not isinstance(record_list, list):
+            record_list = [record_list]
+            
+        try:
+            for record in record_list:
+                #If the record is a SFA slice record, then add information 
+                #about the user of this slice. This kind of 
+                #information is in the Senslab's DB.
+                if str(record['type']) == 'slice':
+                    #Get slab slice record.
+                    recslice_list = self.GetSlices(slice_filter = \
+                                                str(record['hrn']),\
+                                                slice_filter_type = 'slice_hrn')
+                    
+                    recuser = dbsession.query(RegRecord).filter_by(record_id = \
+                                            recslice_list[0]['record_id_user']).first()
+                    logger.debug("SLABDRIVER \tfill_record_info TYPE SLICE RECUSER %s " %(recuser))
+                    record.update({'PI':[recuser.hrn],
+                                'researcher': [recuser.hrn],
+                                'name':record['hrn'], 
+                                'oar_job_id':[rec['oar_job_id'] for rec in recslice_list],
+                                'node_ids': [],
+                                'person_ids':[recslice_list[0]['record_id_user']],
+                                'geni_urn':'',  #For client_helper.py compatibility
+                                'keys':'',  #For client_helper.py compatibility
+                                'key_ids':''})  #For client_helper.py compatibility
+
+                    #for rec in recslice_list:
+                        #record['oar_job_id'].append(rec['oar_job_id'])
+                    logger.debug( "SLABDRIVER.PY \t fill_record_info SLICE \
+                                                    recslice_list  %s \r\n \t RECORD %s \r\n \r\n" %(recslice_list,record)) 
+                if str(record['type']) == 'user':
+                    #The record is a SFA user record.
+                    #Get the information about his slice from Senslab's DB
+                    #and add it to the user record.
+                    recslice_list = self.GetSlices(\
+                            slice_filter = record['record_id'],\
+                            slice_filter_type = 'record_id_user')
+                                            
+                    logger.debug( "SLABDRIVER.PY \t fill_record_info TYPE USER \
+                                                recslice_list %s \r\n \t RECORD %s \r\n" %(recslice_list , record)) 
+                    #Append slice record in records list, 
+                    #therefore fetches user and slice info again(one more loop)
+                    #Will update PIs and researcher for the slice
+                    recuser = dbsession.query(RegRecord).filter_by(record_id = \
+                                                recslice_list[0]['record_id_user']).first()
+                    logger.debug( "SLABDRIVER.PY \t fill_record_info USER  \
+                                                recuser %s \r\n \r\n" %(recuser)) 
+                    recslice = {}
+                    recslice = recslice_list[0]
+                    recslice.update({'PI':[recuser.hrn],
+                        'researcher': [recuser.hrn],
+                        'name':record['hrn'], 
+                        'node_ids': [],
+                        'oar_job_id': [rec['oar_job_id'] for rec in recslice_list],
+                        'person_ids':[recslice_list[0]['record_id_user']]})
+                    recslice.update({'type':'slice', \
+                                                'hrn':recslice_list[0]['slice_hrn']})
+                    #for rec in recslice_list:
+                        #recslice['oar_job_id'].append(rec['oar_job_id'])
+
+                    #GetPersons takes [] as filters 
+                    #user_slab = self.GetPersons([{'hrn':recuser.hrn}])
+                    user_slab = self.GetPersons([record])
+    
+                    
+                    record.update(user_slab[0])
+                    #For client_helper.py compatibility
+                    record.update( { 'geni_urn':'',
+                    'keys':'',
+                    'key_ids':'' })                
+                    record_list.append(recslice)
+                    
+                    logger.debug("SLABDRIVER.PY \tfill_record_info ADDING SLICE\
+                                INFO TO USER records %s" %(record_list)) 
+                logger.debug("SLABDRIVER.PY \tfill_record_info END \
+                                #record %s \r\n \r\n " %(record))     
+
+        except TypeError, error:
+            logger.log_exc("SLABDRIVER \t fill_record_info  EXCEPTION %s"\
+                                                                     %(error))
+        #logger.debug("SLABDRIVER.PY \t fill_record_info ENDENDEND ")
+                              
+        return
+        
+        #self.fill_record_slab_info(records)
+    
+    
+        
+
+    
+    #TODO Update membership?    update_membership_list SA 05/07/12
+    #def update_membership_list(self, oldRecord, record, listName, addFunc, \
+                                                                #delFunc):
+        ## get a list of the HRNs tht are members of the old and new records
+        #if oldRecord:
+            #oldList = oldRecord.get(listName, [])
+        #else:
+            #oldList = []     
+        #newList = record.get(listName, [])
+
+        ## if the lists are the same, then we don't have to update anything
+        #if (oldList == newList):
+            #return
+
+        ## build a list of the new person ids, by looking up each person to get
+        ## their pointer
+        #newIdList = []
+        #table = SfaTable()
+        #records = table.find({'type': 'user', 'hrn': newList})
+        #for rec in records:
+            #newIdList.append(rec['pointer'])
+
+        ## build a list of the old person ids from the person_ids field 
+        #if oldRecord:
+            #oldIdList = oldRecord.get("person_ids", [])
+            #containerId = oldRecord.get_pointer()
+        #else:
+            ## if oldRecord==None, then we are doing a Register, instead of an
+            ## update.
+            #oldIdList = []
+            #containerId = record.get_pointer()
+
+    ## add people who are in the new list, but not the oldList
+        #for personId in newIdList:
+            #if not (personId in oldIdList):
+                #addFunc(self.plauth, personId, containerId)
+
+        ## remove people who are in the old list, but not the new list
+        #for personId in oldIdList:
+            #if not (personId in newIdList):
+                #delFunc(self.plauth, personId, containerId)
+
+    #def update_membership(self, oldRecord, record):
+       
+        #if record.type == "slice":
+            #self.update_membership_list(oldRecord, record, 'researcher',
+                                        #self.users.AddPersonToSlice,
+                                        #self.users.DeletePersonFromSlice)
+        #elif record.type == "authority":
+            ## xxx TODO
+            #pass
+
+### thierry
+# I don't think you plan on running a component manager at this point
+# let me clean up the mess of ComponentAPI that is deprecated anyways
+
+
+#TODO FUNCTIONS SECTION 04/07/2012 SA
+
+    #TODO : Is UnBindObjectFromPeer still necessary ? Currently does nothing
+    #04/07/2012 SA
+    def UnBindObjectFromPeer(self, auth, object_type, object_id, shortname):
+        """ This method is a hopefully temporary hack to let the sfa correctly
+        detach the objects it creates from a remote peer object. This is 
+        needed so that the sfa federation link can work in parallel with 
+        RefreshPeer, as RefreshPeer depends on remote objects being correctly 
+        marked.
+        Parameters:
+        auth : struct, API authentication structure
+            AuthMethod : string, Authentication method to use 
+        object_type : string, Object type, among 'site','person','slice',
+        'node','key'
+        object_id : int, object_id
+        shortname : string, peer shortname 
+        FROM PLC DOC
+        
+        """
+        logger.warning("SLABDRIVER \tUnBindObjectFromPeer EMPTY-\
+                        DO NOTHING \r\n ")
+        return 
+    
+    #TODO Is BindObjectToPeer still necessary ? Currently does nothing 
+    #04/07/2012 SA
+    def BindObjectToPeer(self, auth, object_type, object_id, shortname=None, \
+                                                    remote_object_id=None):
+        """This method is a hopefully temporary hack to let the sfa correctly 
+        attach the objects it creates to a remote peer object. This is needed 
+        so that the sfa federation link can work in parallel with RefreshPeer, 
+        as RefreshPeer depends on remote objects being correctly marked.
+        Parameters:
+        shortname : string, peer shortname 
+        remote_object_id : int, remote object_id, set to 0 if unknown 
+        FROM PLC API DOC
+        
+        """
+        logger.warning("SLABDRIVER \tBindObjectToPeer EMPTY - DO NOTHING \r\n ")
+        return
+    
+    #TODO UpdateSlice 04/07/2012 SA
+    #Funciton should delete and create another job since oin senslab slice=job
+    def UpdateSlice(self, auth, slice_id_or_name, slice_fields=None):    
+        """Updates the parameters of an existing slice with the values in 
+        slice_fields.
+        Users may only update slices of which they are members. 
+        PIs may update any of the slices at their sites, or any slices of 
+        which they are members. Admins may update any slice.
+        Only PIs and admins may update max_nodes. Slices cannot be renewed
+        (by updating the expires parameter) more than 8 weeks into the future.
+         Returns 1 if successful, faults otherwise.
+        FROM PLC API DOC
+        
+        """  
+        logger.warning("SLABDRIVER UpdateSlice EMPTY - DO NOTHING \r\n ")
+        return
+    
+    #TODO UpdatePerson 04/07/2012 SA
+    def UpdatePerson(self, auth, person_id_or_email, person_fields=None):
+        """Updates a person. Only the fields specified in person_fields 
+        are updated, all other fields are left untouched.
+        Users and techs can only update themselves. PIs can only update
+        themselves and other non-PIs at their sites.
+        Returns 1 if successful, faults otherwise.
+        FROM PLC API DOC
+         
+        """
+        logger.warning("SLABDRIVER UpdatePerson EMPTY - DO NOTHING \r\n ")
+        return
+    
+    #TODO GetKeys 04/07/2012 SA
+    def GetKeys(self, auth, key_filter=None, return_fields=None):
+        """Returns an array of structs containing details about keys. 
+        If key_filter is specified and is an array of key identifiers, 
+        or a struct of key attributes, only keys matching the filter 
+        will be returned. If return_fields is specified, only the 
+        specified details will be returned.
+
+        Admin may query all keys. Non-admins may only query their own keys.
+        FROM PLC API DOC
+        
+        """
+        logger.warning("SLABDRIVER  GetKeys EMPTY - DO NOTHING \r\n ")
+        return
+    
+    #TODO DeleteKey 04/07/2012 SA
+    def DeleteKey(self, auth, key_id):
+        """  Deletes a key.
+         Non-admins may only delete their own keys.
+         Returns 1 if successful, faults otherwise.
+         FROM PLC API DOC
+         
+        """
+        logger.warning("SLABDRIVER  DeleteKey EMPTY - DO NOTHING \r\n ")
+        return
+
+    
+    #TODO : Check rights to delete person 
+    def DeletePerson(self, auth, person_record):
+        """ Disable an existing account in senslab LDAP.
+        Users and techs can only delete themselves. PIs can only 
+        delete themselves and other non-PIs at their sites. 
+        ins can delete anyone.
+        Returns 1 if successful, faults otherwise.
+        FROM PLC API DOC
+        
+        """
+        #Disable user account in senslab LDAP
+        ret = self.ldap.LdapMarkUserAsDeleted(person_record)
+        logger.warning("SLABDRIVER DeletePerson %s " %(person_record))
+        return ret
+    
+    #TODO Check DeleteSlice, check rights 05/07/2012 SA
+    def DeleteSlice(self, auth, slice_record):
+        """ Deletes the specified slice.
+         Senslab : Kill the job associated with the slice if there is one
+         using DeleteSliceFromNodes.
+         Updates the slice record in slab db to remove the slice nodes.
+         
+         Users may only delete slices of which they are members. PIs may 
+         delete any of the slices at their sites, or any slices of which 
+         they are members. Admins may delete any slice.
+         Returns 1 if successful, faults otherwise.
+         FROM PLC API DOC
+        
+        """
+        self.DeleteSliceFromNodes(slice_record)
+        logger.warning("SLABDRIVER DeleteSlice %s "%(slice_record))
+        return
+    
+    #TODO AddPerson 04/07/2012 SA
+    #def AddPerson(self, auth,  person_fields=None): 
+    def AddPerson(self, record):#TODO fixing 28/08//2012 SA
+        """Adds a new account. Any fields specified in records are used, 
+        otherwise defaults are used.
+        Accounts are disabled by default. To enable an account, 
+        use UpdatePerson().
+        Returns the new person_id (> 0) if successful, faults otherwise. 
+        FROM PLC API DOC
+        
+        """
+        ret = self.ldap.LdapAddUser(record)
+        logger.warning("SLABDRIVER AddPerson return code %s \r\n ", ret)
+        return
+    
+    #TODO AddPersonToSite 04/07/2012 SA
+    def AddPersonToSite (self, auth, person_id_or_email, \
+                                                site_id_or_login_base=None):
+        """  Adds the specified person to the specified site. If the person is 
+        already a member of the site, no errors are returned. Does not change 
+        the person's primary site.
+        Returns 1 if successful, faults otherwise.
+        FROM PLC API DOC
+        
+        """
+        logger.warning("SLABDRIVER AddPersonToSite EMPTY - DO NOTHING \r\n ")
+        return
+    
+    #TODO AddRoleToPerson : Not sure if needed in senslab 04/07/2012 SA
+    def AddRoleToPerson(self, auth, role_id_or_name, person_id_or_email):
+        """Grants the specified role to the person.
+        PIs can only grant the tech and user roles to users and techs at their 
+        sites. Admins can grant any role to any user.
+        Returns 1 if successful, faults otherwise.
+        FROM PLC API DOC
+        
+        """
+        logger.warning("SLABDRIVER AddRoleToPerson EMPTY - DO NOTHING \r\n ")
+        return
+    
+    #TODO AddPersonKey 04/07/2012 SA
+    def AddPersonKey(self, auth, person_id_or_email, key_fields=None):
+        """Adds a new key to the specified account.
+        Non-admins can only modify their own keys.
+        Returns the new key_id (> 0) if successful, faults otherwise.
+        FROM PLC API DOC
+        
+        """
+        logger.warning("SLABDRIVER AddPersonKey EMPTY - DO NOTHING \r\n ")
+        return
+    
+    def DeleteLeases(self, leases_id_list, slice_hrn ):
+        for job_id in leases_id_list:
+            self.DeleteJobs(job_id, slice_hrn)
+        
+        logger.debug("SLABDRIVER DeleteLeases leases_id_list %s slice_hrn %s \
+                \r\n " %(leases_id_list, slice_hrn))
+        return 
diff --git a/sfa/senslab/slabpostgres.py b/sfa/senslab/slabpostgres.py
new file mode 100644 (file)
index 0000000..96c9498
--- /dev/null
@@ -0,0 +1,192 @@
+import sys
+
+from sqlalchemy import create_engine, and_
+from sqlalchemy.orm import sessionmaker
+
+from sfa.util.config import Config
+from sfa.util.sfalogging import logger
+
+from sqlalchemy import Column, Integer, String, DateTime
+from sqlalchemy import Table, Column, MetaData, join, ForeignKey
+import sfa.storage.model as model
+from sfa.storage.model import RegSlice
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import relationship, backref
+
+
+from sqlalchemy.dialects import postgresql
+
+from sqlalchemy import MetaData, Table
+from sqlalchemy.exc import NoSuchTableError
+
+from sqlalchemy import String
+from sfa.storage.alchemy import dbsession
+
+#Dict holding the columns names of the table as keys
+#and their type, used for creation of the table
+slice_table = {'record_id_user':'integer PRIMARY KEY references X ON DELETE CASCADE ON UPDATE CASCADE','oar_job_id':'integer DEFAULT -1',  'record_id_slice':'integer', 'slice_hrn':'text NOT NULL'}
+
+#Dict with all the specific senslab tables
+tablenames_dict = {'slice_senslab': slice_table}
+
+##############################
+
+
+
+SlabBase = declarative_base()
+
+
+
+
+class SliceSenslab (SlabBase):
+    __tablename__ = 'slice_senslab' 
+    #record_id_user = Column(Integer, primary_key=True)
+
+    slice_hrn = Column(String,primary_key=True)
+    peer_authority = Column( String,nullable = True)
+    record_id_slice = Column(Integer)    
+    record_id_user = Column(Integer) 
+
+    #oar_job_id = Column( Integer,default = -1)
+    #node_list = Column(postgresql.ARRAY(String), nullable =True)
+    
+    def __init__ (self, slice_hrn =None, record_id_slice=None, record_id_user= None,peer_authority=None):
+        if record_id_slice: 
+            self.record_id_slice = record_id_slice
+        if slice_hrn:
+            self.slice_hrn = slice_hrn
+        if record_id_user: 
+            self.record_id_user= record_id_user
+        if peer_authority:
+            self.peer_authority = peer_authority
+            
+            
+    def __repr__(self):
+        result="<Record id user =%s, slice hrn=%s, Record id slice =%s ,peer_authority =%s"% \
+                (self.record_id_user, self.slice_hrn, self.record_id_slice, self.peer_authority)
+        result += ">"
+        return result
+          
+    def dump_sqlalchemyobj_to_dict(self):
+        dict = {'slice_hrn':self.slice_hrn,
+        'peer_authority':self.peer_authority,
+        'record_id':self.record_id_slice, 
+        'record_id_user':self.record_id_user,
+        'record_id_slice':self.record_id_slice, }
+        return dict 
+          
+             
+
+#class PeerSenslab(SlabBase):
+    #__tablename__ = 'peer_senslab' 
+    #peername = Column(String, nullable = False)
+    #peerid = Column( Integer,primary_key=True)
+    
+    #def __init__ (self,peername = None ):
+        #if peername:
+            #self.peername = peername
+            
+            
+      #def __repr__(self):
+        #result="<Peer id  =%s, Peer name =%s" % (self.peerid, self.peername)
+        #result += ">"
+        #return result
+          
+class SlabDB:
+    def __init__(self,config, debug = False):
+        self.sl_base = SlabBase
+        dbname="slab_sfa"
+        if debug == True :
+            l_echo_pool = True
+            l_echo=True 
+        else :
+            l_echo_pool = False
+            l_echo = False 
+        # will be created lazily on-demand
+        self.slab_session = None
+        # the former PostgreSQL.py used the psycopg2 directly and was doing
+        #self.connection.set_client_encoding("UNICODE")
+        # it's unclear how to achieve this in sqlalchemy, nor if it's needed at all
+        # http://www.sqlalchemy.org/docs/dialects/postgresql.html#unicode
+        # we indeed have /var/lib/pgsql/data/postgresql.conf where
+        # this setting is unset, it might be an angle to tweak that if need be
+        # try a unix socket first - omitting the hostname does the trick
+        unix_url = "postgresql+psycopg2://%s:%s@:%s/%s"%\
+            (config.SFA_DB_USER,config.SFA_DB_PASSWORD,config.SFA_DB_PORT,dbname)
+        print >>sys.stderr, " \r\n \r\n SLAPOSTGRES INIT unix_url %s" %(unix_url)
+        # the TCP fallback method
+        tcp_url = "postgresql+psycopg2://%s:%s@%s:%s/%s"%\
+            (config.SFA_DB_USER,config.SFA_DB_PASSWORD,config.SFA_DB_HOST,config.SFA_DB_PORT,dbname)
+        for url in [ unix_url, tcp_url ] :
+            try:
+                self.slab_engine = create_engine (url,echo_pool = l_echo_pool, echo = l_echo)
+                self.check()
+                self.url=url
+                return
+            except:
+                pass
+        self.slab_engine=None
+        raise Exception,"Could not connect to database"
+    
+    
+    
+    def check (self):
+        self.slab_engine.execute ("select 1").scalar()
+        
+        
+        
+    def session (self):
+        if self.slab_session is None:
+            Session=sessionmaker ()
+            self.slab_session=Session(bind=self.slab_engine)
+        return self.slab_session
+        
+        
+   
+        
+    #Close connection to database
+    def close(self):
+        if self.connection is not None:
+            self.connection.close()
+            self.connection = None
+            
+   
+        
+        
+    def exists(self, tablename):
+        """
+        Checks if the table specified as tablename exists.
+    
+        """
+       
+        try:
+            metadata = MetaData (bind=self.slab_engine)
+            table=Table (tablename, metadata, autoload=True)
+           
+            return True
+        except NoSuchTableError:
+            print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES EXISTS NOPE! tablename %s " %(tablename)
+            return False
+       
+    
+    def createtable(self, tablename ):
+        """
+        Creates the specifed table. Uses the global dictionnary holding the tablenames and
+        the table schema.
+    
+        """
+
+        print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES createtable SlabBase.metadata.sorted_tables %s \r\n engine %s" %(SlabBase.metadata.sorted_tables , slab_engine)
+        SlabBase.metadata.create_all(slab_engine)
+        return
+    
+    
+
+       
+
+
+from sfa.util.config import Config
+
+slab_alchemy= SlabDB(Config())
+slab_engine=slab_alchemy.slab_engine
+slab_dbsession=slab_alchemy.session()
diff --git a/sfa/senslab/slabslices.py b/sfa/senslab/slabslices.py
new file mode 100644 (file)
index 0000000..7779fcb
--- /dev/null
@@ -0,0 +1,680 @@
+from sfa.util.xrn import get_authority, urn_to_hrn
+from sfa.util.sfalogging import logger
+
+
+MAXINT =  2L**31-1
+
+class SlabSlices:
+
+    rspec_to_slice_tag = {'max_rate':'net_max_rate'}
+
+    #def __init__(self, api, ttl = .5, origin_hrn=None):
+        #self.api = api
+        ##filepath = path + os.sep + filename
+        #self.policy = Policy(self.api)    
+        #self.origin_hrn = origin_hrn
+        #self.registry = api.registries[api.hrn]
+        #self.credential = api.getCredential()
+        #self.nodes = []
+        #self.persons = []
+
+
+    def __init__(self, driver):
+        self.driver = driver
+        
+    ##Used in SFACE?    
+    #def get_slivers(self, xrn, node=None):
+        #hrn, hrn_type = urn_to_hrn(xrn)
+         
+        #slice_name = hrn_to_pl_slicename(hrn)
+        ## XX Should we just call PLCAPI.GetSliceTicket(slice_name) instead
+        ## of doing all of this?
+        ##return self.api.driver.GetSliceTicket(self.auth, slice_name) 
+        
+
+       
+        #sfa_slice = self.driver.GetSlices(slice_filter = slice_name, \
+        #                                   slice_filter_type = 'slice_hrn')
+
+        ## Get user information
+        ##TODO
+        #alchemy_person = dbsession.query(RegRecord).filter_by(record_id = \
+                                        #sfa_slice['record_id_user']).first()
+
+        #slivers = []
+        #sliver_attributes = []
+            
+        #if sfa_slice['oar_job_id'] is not -1:
+            #nodes_all = self.driver.GetNodes({'hostname': \
+                                                    #sfa_slice['node_ids']},
+                            #['node_id', 'hostname','site','boot_state'])
+            #nodeall_byhostname = dict([(n['hostname'], n) for n in nodes_all])
+            #nodes = sfa_slice['node_ids']
+            
+            #for node in nodes:
+                ##for sliver_attribute in filter(lambda a: a['node_id'] == \
+                                                #node['node_id'], slice_tags):
+                #sliver_attribute['tagname'] = 'slab-tag'
+                #sliver_attribute['value'] = 'slab-value'
+                #sliver_attributes.append(sliver_attribute['tagname'])
+                #attributes.append({'tagname': sliver_attribute['tagname'],
+                                    #'value': sliver_attribute['value']})
+
+            ## set nodegroup slice attributes
+            #for slice_tag in filter(lambda a: a['nodegroup_id'] \
+                                        #in node['nodegroup_ids'], slice_tags):
+                ## Do not set any nodegroup slice attributes for
+                ## which there is at least one sliver attribute
+                ## already set.
+                #if slice_tag not in slice_tags:
+                    #attributes.append({'tagname': slice_tag['tagname'],
+                        #'value': slice_tag['value']})
+
+            #for slice_tag in filter(lambda a: a['node_id'] is None, \
+                                                            #slice_tags):
+                ## Do not set any global slice attributes for
+                ## which there is at least one sliver attribute
+                ## already set.
+                #if slice_tag['tagname'] not in sliver_attributes:
+                    #attributes.append({'tagname': slice_tag['tagname'],
+                                   #'value': slice_tag['value']})
+
+            ## XXX Sanity check; though technically this should 
+            ## be a system invariant
+            ## checked with an assertion
+            #if sfa_slice['expires'] > MAXINT:  sfa_slice['expires']= MAXINT
+            
+            #slivers.append({
+                #'hrn': hrn,
+                #'name': sfa_slice['name'],
+                #'slice_id': sfa_slice['slice_id'],
+                #'instantiation': sfa_slice['instantiation'],
+                #'expires': sfa_slice['expires'],
+                #'keys': keys,
+                #'attributes': attributes
+            #})
+
+        #return slivers
+        
+        
+        
+
+
+        #return slivers
+    def get_peer(self, xrn):
+        hrn, hrn_type = urn_to_hrn(xrn)
+        #Does this slice belong to a local site or a peer senslab site?
+        peer = None
+        
+        # get this slice's authority (site)
+        slice_authority = get_authority(hrn)
+        site_authority = slice_authority
+        # get this site's authority (sfa root authority or sub authority)
+        #site_authority = get_authority(slice_authority).lower()
+        logger.debug("SLABSLICES \ get_peer slice_authority  %s \
+                    site_authority %s hrn %s" %(slice_authority, \
+                                        site_authority, hrn))
+        #This slice belongs to the current site
+        if site_authority == self.driver.root_auth :
+            return None
+        # check if we are already peered with this site_authority, if so
+        #peers = self.driver.GetPeers({})  
+        peers = self.driver.GetPeers(peer_filter = slice_authority)
+        for peer_record in peers:
+          
+            if site_authority == peer_record.hrn:
+                peer = peer_record
+        logger.debug(" SLABSLICES \tget_peer peer  %s " %(peer))
+        return peer
+
+    def get_sfa_peer(self, xrn):
+        hrn, hrn_type = urn_to_hrn(xrn)
+
+        # return the authority for this hrn or None if we are the authority
+        sfa_peer = None
+        slice_authority = get_authority(hrn)
+        site_authority = get_authority(slice_authority)
+
+        if site_authority != self.driver.hrn:
+            sfa_peer = site_authority
+
+        return sfa_peer
+        
+        
+    def verify_slice_leases(self, sfa_slice, requested_jobs_dict, kept_leases, \
+        peer):
+
+       
+        #First get the list of current leases from OAR          
+        leases = self.driver.GetLeases({'name':sfa_slice['name']})
+        #leases = self.driver.GetLeases({'name':sfa_slice['name']}, ['lease_id'])
+        if leases : 
+            current_leases = [lease['lease_id'] for lease in leases]
+            #Deleted leases are the ones with lease id not declared in the Rspec
+            deleted_leases = list(set(current_leases).difference(kept_leases))
+    
+            try:
+                if peer:
+                    #peer = RegAuyhority object is unsubscriptable
+                    #TODO :UnBindObjectFromPeer Quick and dirty auth='senslab2 SA 27/07/12
+                    self.driver.UnBindObjectFromPeer('senslab2', 'slice', \
+                                    sfa_slice['record_id_slice'], peer.hrn)
+                
+                self.driver.DeleteLeases(deleted_leases, \
+                                        sfa_slice['name'])
+               
+            #TODO : catch other exception?
+            except KeyError: 
+                logger.log_exc('Failed to add/remove slice leases')
+                
+        #Add new leases        
+        for start_time in requested_jobs_dict:
+            job = requested_jobs_dict[start_time]
+            self.driver.AddLeases(job['hostname'], \
+                        sfa_slice, int(job['start_time']), \
+                        int(job['duration']))
+                        
+        return leases
+
+    def verify_slice_nodes(self, sfa_slice, requested_slivers, peer):
+        current_slivers = []
+        deleted_nodes = []
+        
+        if sfa_slice['node_ids']:
+            nodes = self.driver.GetNodes(sfa_slice['node_ids'], ['hostname'])
+            current_slivers = [node['hostname'] for node in nodes]
+    
+            # remove nodes not in rspec
+            deleted_nodes = list(set(current_slivers).\
+                                                difference(requested_slivers))
+            # add nodes from rspec
+            #added_nodes = list(set(requested_slivers).difference(current_slivers))
+
+            #Update the table with the nodes that populate the slice
+            logger.debug("SLABSLICES \tverify_slice_nodes slice %s\
+                                         \r\n \r\n deleted_nodes %s"\
+                                        %(sfa_slice,deleted_nodes))
+
+            if deleted_nodes:
+                self.driver.DeleteSliceFromNodes(sfa_slice['name'], \
+                                                                deleted_nodes)
+            return nodes
+
+            
+
+    def free_egre_key(self):
+        used = set()
+        for tag in self.driver.GetSliceTags({'tagname': 'egre_key'}):
+            used.add(int(tag['value']))
+
+        for i in range(1, 256):
+            if i not in used:
+                key = i
+                break
+        else:
+            raise KeyError("No more EGRE keys available")
+
+        return str(key)
+
+  
+       
+                        
+        
+
+    def handle_peer(self, site, sfa_slice, persons, peer):
+        if peer:
+            # bind site
+            try:
+                if site:
+                    self.driver.BindObjectToPeer('site', site['site_id'], \
+                                        peer['shortname'], sfa_slice['site_id'])
+            except Exception, error:
+                self.driver.DeleteSite(site['site_id'])
+                raise error
+            
+            # bind slice
+            try:
+                if sfa_slice:
+                    self.driver.BindObjectToPeer('slice', slice['slice_id'], \
+                                    peer['shortname'], sfa_slice['slice_id'])
+            except Exception, error:
+                self.driver.DeleteSlice(sfa_slice['slice_id'])
+                raise error 
+
+            # bind persons
+            for person in persons:
+                try:
+                    self.driver.BindObjectToPeer('person', \
+                                    person['person_id'], peer['shortname'], \
+                                    person['peer_person_id'])
+
+                    for (key, remote_key_id) in zip(person['keys'], \
+                                                        person['key_ids']):
+                        try:
+                            self.driver.BindObjectToPeer( 'key', \
+                                            key['key_id'], peer['shortname'], \
+                                            remote_key_id)
+                        except:
+                            self.driver.DeleteKey(key['key_id'])
+                            logger.log_exc("failed to bind key: %s \
+                                            to peer: %s " % (key['key_id'], \
+                                            peer['shortname']))
+                except Exception, error:
+                    self.driver.DeletePerson(person['person_id'])
+                    raise error       
+
+        return sfa_slice
+
+    #def verify_site(self, slice_xrn, slice_record={}, peer=None, \
+                                        #sfa_peer=None, options={}):
+        #(slice_hrn, type) = urn_to_hrn(slice_xrn)
+        #site_hrn = get_authority(slice_hrn)
+        ## login base can't be longer than 20 characters
+        ##slicename = hrn_to_pl_slicename(slice_hrn)
+        #authority_name = slice_hrn.split('.')[0]
+        #login_base = authority_name[:20]
+        #logger.debug(" SLABSLICES.PY \tverify_site authority_name %s  \
+                                        #login_base %s slice_hrn %s" \
+                                        #%(authority_name,login_base,slice_hrn)
+        
+        #sites = self.driver.GetSites(login_base)
+        #if not sites:
+            ## create new site record
+            #site = {'name': 'geni.%s' % authority_name,
+                    #'abbreviated_name': authority_name,
+                    #'login_base': login_base,
+                    #'max_slices': 100,
+                    #'max_slivers': 1000,
+                    #'enabled': True,
+                    #'peer_site_id': None}
+            #if peer:
+                #site['peer_site_id'] = slice_record.get('site_id', None)
+            #site['site_id'] = self.driver.AddSite(site)
+            ## exempt federated sites from monitor policies
+            #self.driver.AddSiteTag(site['site_id'], 'exempt_site_until', \
+                                                                #"20200101")
+            
+            ### is this still necessary?
+            ### add record to the local registry 
+            ##if sfa_peer and slice_record:
+                ##peer_dict = {'type': 'authority', 'hrn': site_hrn, \
+                             ##'peer_authority': sfa_peer, 'pointer': \
+                                                        #site['site_id']}
+                ##self.registry.register_peer_object(self.credential, peer_dict)
+        #else:
+            #site =  sites[0]
+            #if peer:
+                ## unbind from peer so we can modify if necessary.
+                ## Will bind back later
+                #self.driver.UnBindObjectFromPeer('site', site['site_id'], \
+                                                            #peer['shortname']) 
+        
+        #return site        
+
+    def verify_slice(self, slice_hrn, slice_record, peer, sfa_peer):
+
+        #login_base = slice_hrn.split(".")[0]
+        slicename = slice_hrn
+        slices_list = self.driver.GetSlices(slice_filter = slicename, \
+                                            slice_filter_type = 'slice_hrn') 
+        if slices_list:
+            for sl in slices_list:
+            
+                logger.debug("SLABSLICE \tverify_slice slicename %s sl %s \
+                                    slice_record %s"%(slicename, sl, slice_record))
+                sfa_slice = sl
+                sfa_slice.update(slice_record)
+                #del slice['last_updated']
+                #del slice['date_created']
+                #if peer:
+                    #slice['peer_slice_id'] = slice_record.get('slice_id', None)
+                    ## unbind from peer so we can modify if necessary. 
+                    ## Will bind back later
+                    #self.driver.UnBindObjectFromPeer('slice', slice['slice_id'], \
+                                                                #peer['shortname'])
+                #Update existing record (e.g. expires field) 
+                    #it with the latest info.
+                ##if slice_record and slice['expires'] != slice_record['expires']:
+                    ##self.driver.UpdateSlice( slice['slice_id'], {'expires' : \
+                                                        #slice_record['expires']})
+        else:
+            logger.debug(" SLABSLICES \tverify_slice Oups \
+                        slice_record %s peer %s sfa_peer %s "\
+                        %(slice_record, peer,sfa_peer))
+            sfa_slice = {'slice_hrn': slicename,
+                     #'url': slice_record.get('url', slice_hrn), 
+                     #'description': slice_record.get('description', slice_hrn)
+                     'node_list' : [],
+                     'record_id_user' : slice_record['person_ids'][0],
+                     'record_id_slice': slice_record['record_id'],
+                     'peer_authority':str(peer.hrn)
+                    
+                     }
+            # add the slice  
+            self.driver.AddSlice(sfa_slice)                         
+            #slice['slice_id'] = self.driver.AddSlice(slice)
+            logger.debug("SLABSLICES \tverify_slice ADDSLICE OK") 
+            #slice['node_ids']=[]
+            #slice['person_ids'] = []
+            #if peer:
+                #slice['peer_slice_id'] = slice_record.get('slice_id', None) 
+            # mark this slice as an sfa peer record
+            #if sfa_peer:
+                #peer_dict = {'type': 'slice', 'hrn': slice_hrn, 
+                             #'peer_authority': sfa_peer, 'pointer': \
+                                                    #slice['slice_id']}
+                #self.registry.register_peer_object(self.credential, peer_dict)
+            
+
+       
+        return sfa_slice
+
+
+    def verify_persons(self, slice_hrn, slice_record, users,  peer, sfa_peer, \
+                                                                options={}):
+        """ 
+        users is a record list. Records can either be local records 
+        or users records from known and trusted federated sites. 
+        If the user is from another site that senslab doesn't trust yet,
+        then Resolve will raise an error before getting to create_sliver. 
+        """
+        #TODO SA 21/08/12 verify_persons Needs review 
+        
+        
+        users_by_id = {}  
+        users_by_hrn = {} 
+        #users_dict : dict whose keys can either be the user's hrn or its id.
+        #Values contains only id and hrn 
+        users_dict = {}
+        
+        #First create dicts by hrn and id for each user in the user record list:      
+        for user in users:
+            
+            if 'urn' in user and (not 'hrn' in user ) :
+                user['hrn'], user['type'] = urn_to_hrn(user['urn'])
+               
+            if 'person_id' in user and 'hrn' in user:
+                users_by_id[user['person_id']] = user
+                users_dict[user['person_id']] = {'person_id':\
+                                        user['person_id'], 'hrn':user['hrn']}
+
+                users_by_hrn[user['hrn']] = user
+                users_dict[user['hrn']] = {'person_id':user['person_id'], \
+                                                        'hrn':user['hrn']}
+                
+        
+        logger.debug( "SLABSLICE.PY \t verify_person  \
+                        users_dict %s \r\n user_by_hrn %s \r\n \
+                        \tusers_by_id %s " \
+                        %(users_dict,users_by_hrn, users_by_id))
+        
+        existing_user_ids = []
+        existing_user_hrns = []
+        existing_users = []
+        # Check if user is in Senslab LDAP using its hrn.
+        # Assuming Senslab is centralised :  one LDAP for all sites, 
+        # user_id unknown from LDAP
+        # LDAP does not provide users id, therefore we rely on hrns containing
+        # the login of the user.
+        # If the hrn is not a senslab hrn, the user may not be in LDAP.
+        if users_by_hrn:
+            #Construct the list of filters (list of dicts) for GetPersons
+            filter_user = []
+            for hrn in users_by_hrn:
+                filter_user.append (users_by_hrn[hrn])
+            logger.debug(" SLABSLICE.PY \tverify_person  filter_user %s " \
+                                                    %(filter_user))
+            #Check user's in LDAP with GetPersons
+            #Needed because what if the user has been deleted in LDAP but 
+            #is still in SFA?
+            existing_users = self.driver.GetPersons(filter_user) 
+                           
+            #User's in senslab LDAP               
+            if existing_users:
+                for user in existing_users :
+                    existing_user_hrns.append(users_dict[user['hrn']]['hrn'])
+                    existing_user_ids.\
+                                    append(users_dict[user['hrn']]['person_id'])
+         
+            # User from another known trusted federated site. Check 
+            # if a senslab account matching the email has already been created.
+            else: 
+                req = 'mail='
+                if isinstance(users, list):
+                    
+                    req += users[0]['email']  
+                else:
+                    req += users['email']
+                    
+                ldap_reslt = self.driver.ldap.LdapSearch(req)
+                if ldap_reslt:
+                    logger.debug(" SLABSLICE.PY \tverify_person users \
+                                USER already in Senslab \t ldap_reslt %s "%( ldap_reslt)) 
+                    existing_users.append(ldap_reslt[1])
+                 
+                else:
+                    #User not existing in LDAP
+                    #TODO SA 21/08/12 raise something to add user or add it auto ?
+                    logger.debug(" SLABSLICE.PY \tverify_person users \
+                                not in ldap ...NEW ACCOUNT NEEDED %s \r\n \t ldap_reslt %s "  \
+                                                %(users, ldap_reslt))
+   
+        requested_user_ids = users_by_id.keys() 
+        requested_user_hrns = users_by_hrn.keys()
+        logger.debug("SLABSLICE.PY \tverify_person requested_user_ids  %s \
+                        user_by_hrn %s " %(requested_user_ids, users_by_hrn)) 
+      
+   
+        #Check that the user of the slice in the slice record
+        #matches the existing users 
+        try:
+            if slice_record['record_id_user'] in requested_user_ids and \
+                                slice_record['PI'][0] in requested_user_hrns:
+                logger.debug(" SLABSLICE  \tverify_person  \
+                        requested_user_ids %s = \
+                        slice_record['record_id_user'] %s" \
+                        %(requested_user_ids,slice_record['record_id_user']))
+           
+        except KeyError:
+            pass
+            
+      
+        # users to be added, removed or updated
+        #One user in one senslab slice : there should be no need
+        #to remove/ add any user from/to a slice.
+        #However a user from SFA which is not registered in Senslab yet
+        #should be added to the LDAP.
+
+        added_user_hrns = set(requested_user_hrns).\
+                                            difference(set(existing_user_hrns))
+
+        #self.verify_keys(existing_slice_users, updated_users_list, \
+                                                            #peer, append)
+
+        added_persons = []
+        # add new users
+        for added_user_hrn in added_user_hrns:
+            added_user = users_dict[added_user_hrn]
+            #hrn, type = urn_to_hrn(added_user['urn'])  
+            person = {
+                #'first_name': added_user.get('first_name', hrn),
+                #'last_name': added_user.get('last_name', hrn),
+                'first_name': added_user['first_name'],
+                'last_name': added_user['last_name'],
+                'person_id': added_user['person_id'],
+                'peer_person_id': None,
+                'keys': [],
+                'key_ids': added_user.get('key_ids', []),
+                
+            } 
+            person['person_id'] = self.driver.AddPerson(person)
+            if peer:
+                person['peer_person_id'] = added_user['person_id']
+            added_persons.append(person)
+           
+            # enable the account 
+            self.driver.UpdatePerson(person['person_id'], {'enabled': True})
+            
+            # add person to site
+            #self.driver.AddPersonToSite(added_user_id, login_base)
+
+            #for key_string in added_user.get('keys', []):
+                #key = {'key':key_string, 'key_type':'ssh'}
+                #key['key_id'] = self.driver.AddPersonKey(person['person_id'], \
+                                                #                       key)
+                #person['keys'].append(key)
+
+            # add the registry record
+            #if sfa_peer:
+                #peer_dict = {'type': 'user', 'hrn': hrn, 'peer_authority': \
+                                                #sfa_peer, \
+                                                #'pointer': person['person_id']}
+                #self.registry.register_peer_object(self.credential, peer_dict)
+        #for added_slice_user_hrn in \
+                                #added_slice_user_hrns.union(added_user_hrns):
+            #self.driver.AddPersonToSlice(added_slice_user_hrn, \
+                                                    #slice_record['name'])
+        #for added_slice_user_id in \
+                                    #added_slice_user_ids.union(added_user_ids):
+            # add person to the slice 
+            #self.driver.AddPersonToSlice(added_slice_user_id, \
+                                                #slice_record['name'])
+            # if this is a peer record then it 
+            # should already be bound to a peer.
+            # no need to return worry about it getting bound later 
+
+        return added_persons
+            
+    #Unused
+    def verify_keys(self, persons, users, peer, options={}):
+        # existing keys 
+        key_ids = []
+        for person in persons:
+            key_ids.extend(person['key_ids'])
+        keylist = self.driver.GetKeys(key_ids, ['key_id', 'key'])
+        keydict = {}
+        for key in keylist:
+            keydict[key['key']] = key['key_id']     
+        existing_keys = keydict.keys()
+        persondict = {}
+        for person in persons:
+            persondict[person['email']] = person    
+    
+        # add new keys
+        requested_keys = []
+        updated_persons = []
+        for user in users:
+            user_keys = user.get('keys', [])
+            updated_persons.append(user)
+            for key_string in user_keys:
+                requested_keys.append(key_string)
+                if key_string not in existing_keys:
+                    key = {'key': key_string, 'key_type': 'ssh'}
+                    try:
+                        if peer:
+                            person = persondict[user['email']]
+                            self.driver.UnBindObjectFromPeer('person', \
+                                        person['person_id'], peer['shortname'])
+                        key['key_id'] = \
+                                self.driver.AddPersonKey(user['email'], key)
+                        if peer:
+                            key_index = user_keys.index(key['key'])
+                            remote_key_id = user['key_ids'][key_index]
+                            self.driver.BindObjectToPeer('key', \
+                                            key['key_id'], peer['shortname'], \
+                                            remote_key_id)
+                            
+                    finally:
+                        if peer:
+                            self.driver.BindObjectToPeer('person', \
+                                    person['person_id'], peer['shortname'], \
+                                    user['person_id'])
+        
+        # remove old keys (only if we are not appending)
+        append = options.get('append', True)
+        if append == False: 
+            removed_keys = set(existing_keys).difference(requested_keys)
+            for existing_key_id in keydict:
+                if keydict[existing_key_id] in removed_keys:
+
+                    if peer:
+                        self.driver.UnBindObjectFromPeer('key', \
+                                        existing_key_id, peer['shortname'])
+                    self.driver.DeleteKey(existing_key_id)
+
+    #def verify_slice_attributes(self, slice, requested_slice_attributes, \
+                                            #append=False, admin=False):
+        ## get list of attributes users ar able to manage
+        #filter = {'category': '*slice*'}
+        #if not admin:
+            #filter['|roles'] = ['user']
+        #slice_attributes = self.driver.GetTagTypes(filter)
+        #valid_slice_attribute_names = [attribute['tagname'] \
+                                            #for attribute in slice_attributes]
+
+        ## get sliver attributes
+        #added_slice_attributes = []
+        #removed_slice_attributes = []
+        #ignored_slice_attribute_names = []
+        #existing_slice_attributes = self.driver.GetSliceTags({'slice_id': \
+                                                            #slice['slice_id']})
+
+        ## get attributes that should be removed
+        #for slice_tag in existing_slice_attributes:
+            #if slice_tag['tagname'] in ignored_slice_attribute_names:
+                ## If a slice already has a admin only role 
+                ## it was probably given to them by an
+                ## admin, so we should ignore it.
+                #ignored_slice_attribute_names.append(slice_tag['tagname'])
+            #else:
+                ## If an existing slice attribute was not 
+                ## found in the request it should
+                ## be removed
+                #attribute_found=False
+                #for requested_attribute in requested_slice_attributes:
+                    #if requested_attribute['name'] == slice_tag['tagname'] \
+                        #and requested_attribute['value'] == slice_tag['value']:
+                        #attribute_found=True
+                        #break
+
+            #if not attribute_found and not append:
+                #removed_slice_attributes.append(slice_tag)
+        
+        ## get attributes that should be added:
+        #for requested_attribute in requested_slice_attributes:
+            ## if the requested attribute wasn't found  we should add it
+            #if requested_attribute['name'] in valid_slice_attribute_names:
+                #attribute_found = False
+                #for existing_attribute in existing_slice_attributes:
+                    #if requested_attribute['name'] == \
+                        #existing_attribute['tagname'] and \
+                       #requested_attribute['value'] == \
+                       #existing_attribute['value']:
+                        #attribute_found=True
+                        #break
+                #if not attribute_found:
+                    #added_slice_attributes.append(requested_attribute)
+
+
+        ## remove stale attributes
+        #for attribute in removed_slice_attributes:
+            #try:
+                #self.driver.DeleteSliceTag(attribute['slice_tag_id'])
+            #except Exception, error:
+                #self.logger.warn('Failed to remove sliver attribute. name: \
+                                #%s, value: %s, node_id: %s\nCause:%s'\
+                                #% (name, value,  node_id, str(error)))
+
+        ## add requested_attributes
+        #for attribute in added_slice_attributes:
+            #try:
+                #self.driver.AddSliceTag(slice['name'], attribute['name'], \
+                            #attribute['value'], attribute.get('node_id', None))
+            #except Exception, error:
+                #self.logger.warn('Failed to add sliver attribute. name: %s, \
+                                #value: %s, node_id: %s\nCause:%s'\
+                                #% (name, value,  node_id, str(error)))
+
\ No newline at end of file
index 1596cc3..0b999d9 100755 (executable)
@@ -84,9 +84,8 @@ def install_peer_certs(server_key_file, server_cert_file):
     peer_gids = []
     if not new_hrns:
         return 
-
     trusted_certs_dir = api.config.get_trustedroots_dir()
-    for new_hrn in new_hrns:
+    for new_hrn in new_hrns: 
         if not new_hrn: continue
         # the gid for this interface should already be installed
         if new_hrn == api.config.SFA_INTERFACE_HRN: continue
@@ -99,7 +98,6 @@ def install_peer_certs(server_key_file, server_cert_file):
             if 'sfa' not in server_version:
                 logger.info("get_trusted_certs: skipping non sfa aggregate: %s" % new_hrn)
                 continue
-      
             trusted_gids = ReturnValue.get_value(interface.get_trusted_certs())
             if trusted_gids:
                 # the gid we want should be the first one in the list,
@@ -184,8 +182,7 @@ def main():
     hierarchy = Hierarchy()
     auth_info = hierarchy.get_interface_auth_info()
     server_key_file = auth_info.get_privkey_filename()
-    server_cert_file = auth_info.get_gid_filename()
-
+    server_cert_file = auth_info.get_gid_filename() 
     # ensure interface cert is present in trusted roots dir
     trusted_roots = TrustedRoots(config.get_trustedroots_dir())
     trusted_roots.add_gid(GID(filename=server_cert_file))
index 0c03279..41f4280 100644 (file)
@@ -68,7 +68,6 @@ class Auth:
         self.client_cred = Credential(string = cred)
         self.client_gid = self.client_cred.get_gid_caller()
         self.object_gid = self.client_cred.get_gid_object()
-        
         # make sure the client_gid is not blank
         if not self.client_gid:
             raise MissingCallerGID(self.client_cred.get_subject())
@@ -78,12 +77,13 @@ class Auth:
             self.verifyPeerCert(self.peer_cert, self.client_gid)                   
 
         # make sure the client is allowed to perform the operation
-        if operation:
+        if operation:    
             if not self.client_cred.can_perform(operation):
                 raise InsufficientRights(operation)
 
         if self.trusted_cert_list:
             self.client_cred.verify(self.trusted_cert_file_list, self.config.SFA_CREDENTIAL_SCHEMA)
+            
         else:
            raise MissingTrustedRoots(self.config.get_trustedroots_dir())
        
@@ -91,6 +91,7 @@ class Auth:
         # This check does not apply to trusted peers 
         trusted_peers = [gid.get_hrn() for gid in self.trusted_cert_list]
         if hrn and self.client_gid.get_hrn() not in trusted_peers:
+            
             target_hrn = self.object_gid.get_hrn()
             if not hrn == target_hrn:
                 raise PermissionError("Target hrn: %s doesn't match specified hrn: %s " % \
@@ -225,13 +226,16 @@ class Auth:
         @param name human readable name to test  
         """
         object_hrn = self.object_gid.get_hrn()
-        if object_hrn == name:
-            return
-        if name.startswith(object_hrn + "."):
+       #strname = str(name).strip("['']")
+       if object_hrn == name:
+        #if object_hrn == strname:
+            return 
+        if name.startswith(object_hrn + ".") :
+        #if strname.startswith((object_hrn + ".")) is True:
             return
         #if name.startswith(get_authority(name)):
             #return
-    
+
         raise PermissionError(name)
 
     def determine_user_rights(self, caller_hrn, reg_record):
index 07f8680..9ccf18f 100644 (file)
-#----------------------------------------------------------------------\r
-# Copyright (c) 2008 Board of Trustees, Princeton University\r
-#\r
-# Permission is hereby granted, free of charge, to any person obtaining\r
-# a copy of this software and/or hardware specification (the "Work") to\r
-# deal in the Work without restriction, including without limitation the\r
-# rights to use, copy, modify, merge, publish, distribute, sublicense,\r
-# and/or sell copies of the Work, and to permit persons to whom the Work\r
-# is furnished to do so, subject to the following conditions:\r
-#\r
-# The above copyright notice and this permission notice shall be\r
-# included in all copies or substantial portions of the Work.\r
-#\r
-# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS \r
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT \r
-# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
-# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS \r
-# IN THE WORK.\r
-#----------------------------------------------------------------------\r
-##\r
-# Implements SFA Credentials\r
-#\r
-# Credentials are signed XML files that assign a subject gid privileges to an object gid\r
-##\r
-\r
-import os\r
-from types import StringTypes\r
-import datetime\r
-from StringIO import StringIO\r
-from tempfile import mkstemp\r
-from xml.dom.minidom import Document, parseString\r
-\r
-HAVELXML = False\r
-try:\r
-    from lxml import etree\r
-    HAVELXML = True\r
-except:\r
-    pass\r
-\r
-from xml.parsers.expat import ExpatError\r
-\r
-from sfa.util.faults import CredentialNotVerifiable, ChildRightsNotSubsetOfParent\r
-from sfa.util.sfalogging import logger\r
-from sfa.util.sfatime import utcparse\r
-from sfa.trust.credential_legacy import CredentialLegacy\r
-from sfa.trust.rights import Right, Rights, determine_rights\r
-from sfa.trust.gid import GID\r
-from sfa.util.xrn import urn_to_hrn, hrn_authfor_hrn\r
-\r
-# 2 weeks, in seconds \r
-DEFAULT_CREDENTIAL_LIFETIME = 86400 * 31\r
-\r
-\r
-# TODO:\r
-# . make privs match between PG and PL\r
-# . Need to add support for other types of credentials, e.g. tickets\r
-# . add namespaces to signed-credential element?\r
-\r
-signature_template = \\r
-'''\r
-<Signature xml:id="Sig_%s" xmlns="http://www.w3.org/2000/09/xmldsig#">\r
-  <SignedInfo>\r
-    <CanonicalizationMethod Algorithm="http://www.w3.org/TR/2001/REC-xml-c14n-20010315"/>\r
-    <SignatureMethod Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/>\r
-    <Reference URI="#%s">\r
-      <Transforms>\r
-        <Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature" />\r
-      </Transforms>\r
-      <DigestMethod Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>\r
-      <DigestValue></DigestValue>\r
-    </Reference>\r
-  </SignedInfo>\r
-  <SignatureValue />\r
-  <KeyInfo>\r
-    <X509Data>\r
-      <X509SubjectName/>\r
-      <X509IssuerSerial/>\r
-      <X509Certificate/>\r
-    </X509Data>\r
-    <KeyValue />\r
-  </KeyInfo>\r
-</Signature>\r
-'''\r
-\r
-# PG formats the template (whitespace) slightly differently.\r
-# Note that they don't include the xmlns in the template, but add it later.\r
-# Otherwise the two are equivalent.\r
-#signature_template_as_in_pg = \\r
-#'''\r
-#<Signature xml:id="Sig_%s" >\r
-# <SignedInfo>\r
-#  <CanonicalizationMethod      Algorithm="http://www.w3.org/TR/2001/REC-xml-c14n-20010315"/>\r
-#  <SignatureMethod      Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/>\r
-#  <Reference URI="#%s">\r
-#    <Transforms>\r
-#      <Transform         Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature" />\r
-#    </Transforms>\r
-#    <DigestMethod        Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>\r
-#    <DigestValue></DigestValue>\r
-#    </Reference>\r
-# </SignedInfo>\r
-# <SignatureValue />\r
-# <KeyInfo>\r
-#  <X509Data >\r
-#   <X509SubjectName/>\r
-#   <X509IssuerSerial/>\r
-#   <X509Certificate/>\r
-#  </X509Data>\r
-#  <KeyValue />\r
-# </KeyInfo>\r
-#</Signature>\r
-#'''\r
-\r
-##\r
-# Convert a string into a bool\r
-# used to convert an xsd:boolean to a Python boolean\r
-def str2bool(str):\r
-    if str.lower() in ['true','1']:\r
-        return True\r
-    return False\r
-\r
-\r
-##\r
-# Utility function to get the text of an XML element\r
-\r
-def getTextNode(element, subele):\r
-    sub = element.getElementsByTagName(subele)[0]\r
-    if len(sub.childNodes) > 0:            \r
-        return sub.childNodes[0].nodeValue\r
-    else:\r
-        return None\r
-        \r
-##\r
-# Utility function to set the text of an XML element\r
-# It creates the element, adds the text to it,\r
-# and then appends it to the parent.\r
-\r
-def append_sub(doc, parent, element, text):\r
-    ele = doc.createElement(element)\r
-    ele.appendChild(doc.createTextNode(text))\r
-    parent.appendChild(ele)\r
-\r
-##\r
-# Signature contains information about an xmlsec1 signature\r
-# for a signed-credential\r
-#\r
-\r
-class Signature(object):\r
-   \r
-    def __init__(self, string=None):\r
-        self.refid = None\r
-        self.issuer_gid = None\r
-        self.xml = None\r
-        if string:\r
-            self.xml = string\r
-            self.decode()\r
-\r
-\r
-    def get_refid(self):\r
-        if not self.refid:\r
-            self.decode()\r
-        return self.refid\r
-\r
-    def get_xml(self):\r
-        if not self.xml:\r
-            self.encode()\r
-        return self.xml\r
-\r
-    def set_refid(self, id):\r
-        self.refid = id\r
-\r
-    def get_issuer_gid(self):\r
-        if not self.gid:\r
-            self.decode()\r
-        return self.gid        \r
-\r
-    def set_issuer_gid(self, gid):\r
-        self.gid = gid\r
-\r
-    def decode(self):\r
-        try:\r
-            doc = parseString(self.xml)\r
-        except ExpatError,e:\r
-            logger.log_exc ("Failed to parse credential, %s"%self.xml)\r
-            raise\r
-        sig = doc.getElementsByTagName("Signature")[0]\r
-        self.set_refid(sig.getAttribute("xml:id").strip("Sig_"))\r
-        keyinfo = sig.getElementsByTagName("X509Data")[0]\r
-        szgid = getTextNode(keyinfo, "X509Certificate")\r
-        szgid = "-----BEGIN CERTIFICATE-----\n%s\n-----END CERTIFICATE-----" % szgid\r
-        self.set_issuer_gid(GID(string=szgid))        \r
-        \r
-    def encode(self):\r
-        self.xml = signature_template % (self.get_refid(), self.get_refid())\r
-\r
-\r
-##\r
-# A credential provides a caller gid with privileges to an object gid.\r
-# A signed credential is signed by the object's authority.\r
-#\r
-# Credentials are encoded in one of two ways.  The legacy style places\r
-# it in the subjectAltName of an X509 certificate.  The new credentials\r
-# are placed in signed XML.\r
-#\r
-# WARNING:\r
-# In general, a signed credential obtained externally should\r
-# not be changed else the signature is no longer valid.  So, once\r
-# you have loaded an existing signed credential, do not call encode() or sign() on it.\r
-\r
-def filter_creds_by_caller(creds, caller_hrn_list):\r
-        """\r
-        Returns a list of creds who's gid caller matches the\r
-        specified caller hrn\r
-        """\r
-        if not isinstance(creds, list): creds = [creds]\r
-        if not isinstance(caller_hrn_list, list): \r
-            caller_hrn_list = [caller_hrn_list]\r
-        caller_creds = []\r
-        for cred in creds:\r
-            try:\r
-                tmp_cred = Credential(string=cred)\r
-                if tmp_cred.get_gid_caller().get_hrn() in caller_hrn_list:\r
-                    caller_creds.append(cred)\r
-            except: pass\r
-        return caller_creds\r
-\r
-class Credential(object):\r
-\r
-    ##\r
-    # Create a Credential object\r
-    #\r
-    # @param create If true, create a blank x509 certificate\r
-    # @param subject If subject!=None, create an x509 cert with the subject name\r
-    # @param string If string!=None, load the credential from the string\r
-    # @param filename If filename!=None, load the credential from the file\r
-    # FIXME: create and subject are ignored!\r
-    def __init__(self, create=False, subject=None, string=None, filename=None):\r
-        self.gidCaller = None\r
-        self.gidObject = None\r
-        self.expiration = None\r
-        self.privileges = None\r
-        self.issuer_privkey = None\r
-        self.issuer_gid = None\r
-        self.issuer_pubkey = None\r
-        self.parent = None\r
-        self.signature = None\r
-        self.xml = None\r
-        self.refid = None\r
-        self.legacy = None\r
-\r
-        # Check if this is a legacy credential, translate it if so\r
-        if string or filename:\r
-            if string:                \r
-                str = string\r
-            elif filename:\r
-                str = file(filename).read()\r
-                \r
-            if str.strip().startswith("-----"):\r
-                self.legacy = CredentialLegacy(False,string=str)\r
-                self.translate_legacy(str)\r
-            else:\r
-                self.xml = str\r
-                self.decode()\r
-\r
-        # Find an xmlsec1 path\r
-        self.xmlsec_path = ''\r
-        paths = ['/usr/bin','/usr/local/bin','/bin','/opt/bin','/opt/local/bin']\r
-        for path in paths:\r
-            if os.path.isfile(path + '/' + 'xmlsec1'):\r
-                self.xmlsec_path = path + '/' + 'xmlsec1'\r
-                break\r
-\r
-    def get_subject(self):\r
-        if not self.gidObject:\r
-            self.decode()\r
-        return self.gidObject.get_printable_subject()\r
-\r
-    # sounds like this should be __repr__ instead ??\r
-    def get_summary_tostring(self):\r
-        if not self.gidObject:\r
-            self.decode()\r
-        obj = self.gidObject.get_printable_subject()\r
-        caller = self.gidCaller.get_printable_subject()\r
-        exp = self.get_expiration()\r
-        # Summarize the rights too? The issuer?\r
-        return "[ Grant %s rights on %s until %s ]" % (caller, obj, exp)\r
-\r
-    def get_signature(self):\r
-        if not self.signature:\r
-            self.decode()\r
-        return self.signature\r
-\r
-    def set_signature(self, sig):\r
-        self.signature = sig\r
-\r
-        \r
-    ##\r
-    # Translate a legacy credential into a new one\r
-    #\r
-    # @param String of the legacy credential\r
-\r
-    def translate_legacy(self, str):\r
-        legacy = CredentialLegacy(False,string=str)\r
-        self.gidCaller = legacy.get_gid_caller()\r
-        self.gidObject = legacy.get_gid_object()\r
-        lifetime = legacy.get_lifetime()\r
-        if not lifetime:\r
-            self.set_expiration(datetime.datetime.utcnow() + datetime.timedelta(seconds=DEFAULT_CREDENTIAL_LIFETIME))\r
-        else:\r
-            self.set_expiration(int(lifetime))\r
-        self.lifeTime = legacy.get_lifetime()\r
-        self.set_privileges(legacy.get_privileges())\r
-        self.get_privileges().delegate_all_privileges(legacy.get_delegate())\r
-\r
-    ##\r
-    # Need the issuer's private key and name\r
-    # @param key Keypair object containing the private key of the issuer\r
-    # @param gid GID of the issuing authority\r
-\r
-    def set_issuer_keys(self, privkey, gid):\r
-        self.issuer_privkey = privkey\r
-        self.issuer_gid = gid\r
-\r
-\r
-    ##\r
-    # Set this credential's parent\r
-    def set_parent(self, cred):\r
-        self.parent = cred\r
-        self.updateRefID()\r
-\r
-    ##\r
-    # set the GID of the caller\r
-    #\r
-    # @param gid GID object of the caller\r
-\r
-    def set_gid_caller(self, gid):\r
-        self.gidCaller = gid\r
-        # gid origin caller is the caller's gid by default\r
-        self.gidOriginCaller = gid\r
-\r
-    ##\r
-    # get the GID of the object\r
-\r
-    def get_gid_caller(self):\r
-        if not self.gidCaller:\r
-            self.decode()\r
-        return self.gidCaller\r
-\r
-    ##\r
-    # set the GID of the object\r
-    #\r
-    # @param gid GID object of the object\r
-\r
-    def set_gid_object(self, gid):\r
-        self.gidObject = gid\r
-\r
-    ##\r
-    # get the GID of the object\r
-\r
-    def get_gid_object(self):\r
-        if not self.gidObject:\r
-            self.decode()\r
-        return self.gidObject\r
-            \r
-    ##\r
-    # Expiration: an absolute UTC time of expiration (as either an int or string or datetime)\r
-    # \r
-    def set_expiration(self, expiration):\r
-        if isinstance(expiration, (int, float)):\r
-            self.expiration = datetime.datetime.fromtimestamp(expiration)\r
-        elif isinstance (expiration, datetime.datetime):\r
-            self.expiration = expiration\r
-        elif isinstance (expiration, StringTypes):\r
-            self.expiration = utcparse (expiration)\r
-        else:\r
-            logger.error ("unexpected input type in Credential.set_expiration")\r
-\r
-\r
-    ##\r
-    # get the lifetime of the credential (always in datetime format)\r
-\r
-    def get_expiration(self):\r
-        if not self.expiration:\r
-            self.decode()\r
-        # at this point self.expiration is normalized as a datetime - DON'T call utcparse again\r
-        return self.expiration\r
-\r
-    ##\r
-    # For legacy sake\r
-    def get_lifetime(self):\r
-        return self.get_expiration()\r
\r
-    ##\r
-    # set the privileges\r
-    #\r
-    # @param privs either a comma-separated list of privileges of a Rights object\r
-\r
-    def set_privileges(self, privs):\r
-        if isinstance(privs, str):\r
-            self.privileges = Rights(string = privs)\r
-        else:\r
-            self.privileges = privs        \r
-\r
-    ##\r
-    # return the privileges as a Rights object\r
-\r
-    def get_privileges(self):\r
-        if not self.privileges:\r
-            self.decode()\r
-        return self.privileges\r
-\r
-    ##\r
-    # determine whether the credential allows a particular operation to be\r
-    # performed\r
-    #\r
-    # @param op_name string specifying name of operation ("lookup", "update", etc)\r
-\r
-    def can_perform(self, op_name):\r
-        rights = self.get_privileges()\r
-        \r
-        if not rights:\r
-            return False\r
-\r
-        return rights.can_perform(op_name)\r
-\r
-\r
-    ##\r
-    # Encode the attributes of the credential into an XML string    \r
-    # This should be done immediately before signing the credential.    \r
-    # WARNING:\r
-    # In general, a signed credential obtained externally should\r
-    # not be changed else the signature is no longer valid.  So, once\r
-    # you have loaded an existing signed credential, do not call encode() or sign() on it.\r
-\r
-    def encode(self):\r
-        # Create the XML document\r
-        doc = Document()\r
-        signed_cred = doc.createElement("signed-credential")\r
-\r
-# Declare namespaces\r
-# Note that credential/policy.xsd are really the PG schemas\r
-# in a PL namespace.\r
-# Note that delegation of credentials between the 2 only really works\r
-# cause those schemas are identical.\r
-# Also note these PG schemas talk about PG tickets and CM policies.\r
-        signed_cred.setAttribute("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance")\r
-        signed_cred.setAttribute("xsi:noNamespaceSchemaLocation", "http://www.planet-lab.org/resources/sfa/credential.xsd")\r
-        signed_cred.setAttribute("xsi:schemaLocation", "http://www.planet-lab.org/resources/sfa/ext/policy/1 http://www.planet-lab.org/resources/sfa/ext/policy/1/policy.xsd")\r
-\r
-# PG says for those last 2:\r
-#        signed_cred.setAttribute("xsi:noNamespaceSchemaLocation", "http://www.protogeni.net/resources/credential/credential.xsd")\r
-#        signed_cred.setAttribute("xsi:schemaLocation", "http://www.protogeni.net/resources/credential/ext/policy/1 http://www.protogeni.net/resources/credential/ext/policy/1/policy.xsd")\r
-\r
-        doc.appendChild(signed_cred)  \r
-        \r
-        # Fill in the <credential> bit        \r
-        cred = doc.createElement("credential")\r
-        cred.setAttribute("xml:id", self.get_refid())\r
-        signed_cred.appendChild(cred)\r
-        append_sub(doc, cred, "type", "privilege")\r
-        append_sub(doc, cred, "serial", "8")\r
-        append_sub(doc, cred, "owner_gid", self.gidCaller.save_to_string())\r
-        append_sub(doc, cred, "owner_urn", self.gidCaller.get_urn())\r
-        append_sub(doc, cred, "target_gid", self.gidObject.save_to_string())\r
-        append_sub(doc, cred, "target_urn", self.gidObject.get_urn())\r
-        append_sub(doc, cred, "uuid", "")\r
-        if not self.expiration:\r
-            self.set_expiration(datetime.datetime.utcnow() + datetime.timedelta(seconds=DEFAULT_CREDENTIAL_LIFETIME))\r
-        self.expiration = self.expiration.replace(microsecond=0)\r
-        append_sub(doc, cred, "expires", self.expiration.isoformat())\r
-        privileges = doc.createElement("privileges")\r
-        cred.appendChild(privileges)\r
-\r
-        if self.privileges:\r
-            rights = self.get_privileges()\r
-            for right in rights.rights:\r
-                priv = doc.createElement("privilege")\r
-                append_sub(doc, priv, "name", right.kind)\r
-                append_sub(doc, priv, "can_delegate", str(right.delegate).lower())\r
-                privileges.appendChild(priv)\r
-\r
-        # Add the parent credential if it exists\r
-        if self.parent:\r
-            sdoc = parseString(self.parent.get_xml())\r
-            # If the root node is a signed-credential (it should be), then\r
-            # get all its attributes and attach those to our signed_cred\r
-            # node.\r
-            # Specifically, PG and PLadd attributes for namespaces (which is reasonable),\r
-            # and we need to include those again here or else their signature\r
-            # no longer matches on the credential.\r
-            # We expect three of these, but here we copy them all:\r
-#        signed_cred.setAttribute("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance")\r
-# and from PG (PL is equivalent, as shown above):\r
-#        signed_cred.setAttribute("xsi:noNamespaceSchemaLocation", "http://www.protogeni.net/resources/credential/credential.xsd")\r
-#        signed_cred.setAttribute("xsi:schemaLocation", "http://www.protogeni.net/resources/credential/ext/policy/1 http://www.protogeni.net/resources/credential/ext/policy/1/policy.xsd")\r
-\r
-            # HOWEVER!\r
-            # PL now also declares these, with different URLs, so\r
-            # the code notices those attributes already existed with\r
-            # different values, and complains.\r
-            # This happens regularly on delegation now that PG and\r
-            # PL both declare the namespace with different URLs.\r
-            # If the content ever differs this is a problem,\r
-            # but for now it works - different URLs (values in the attributes)\r
-            # but the same actual schema, so using the PG schema\r
-            # on delegated-to-PL credentials works fine.\r
-\r
-            # Note: you could also not copy attributes\r
-            # which already exist. It appears that both PG and PL\r
-            # will actually validate a slicecred with a parent\r
-            # signed using PG namespaces and a child signed with PL\r
-            # namespaces over the whole thing. But I don't know\r
-            # if that is a bug in xmlsec1, an accident since\r
-            # the contents of the schemas are the same,\r
-            # or something else, but it seems odd. And this works.\r
-            parentRoot = sdoc.documentElement\r
-            if parentRoot.tagName == "signed-credential" and parentRoot.hasAttributes():\r
-                for attrIx in range(0, parentRoot.attributes.length):\r
-                    attr = parentRoot.attributes.item(attrIx)\r
-                    # returns the old attribute of same name that was\r
-                    # on the credential\r
-                    # Below throws InUse exception if we forgot to clone the attribute first\r
-                    oldAttr = signed_cred.setAttributeNode(attr.cloneNode(True))\r
-                    if oldAttr and oldAttr.value != attr.value:\r
-                        msg = "Delegating cred from owner %s to %s over %s replaced attribute %s value '%s' with '%s'" % (self.parent.gidCaller.get_urn(), self.gidCaller.get_urn(), self.gidObject.get_urn(), oldAttr.name, oldAttr.value, attr.value)\r
-                        logger.warn(msg)\r
-                        #raise CredentialNotVerifiable("Can't encode new valid delegated credential: %s" % msg)\r
-\r
-            p_cred = doc.importNode(sdoc.getElementsByTagName("credential")[0], True)\r
-            p = doc.createElement("parent")\r
-            p.appendChild(p_cred)\r
-            cred.appendChild(p)\r
-        # done handling parent credential\r
-\r
-        # Create the <signatures> tag\r
-        signatures = doc.createElement("signatures")\r
-        signed_cred.appendChild(signatures)\r
-\r
-        # Add any parent signatures\r
-        if self.parent:\r
-            for cur_cred in self.get_credential_list()[1:]:\r
-                sdoc = parseString(cur_cred.get_signature().get_xml())\r
-                ele = doc.importNode(sdoc.getElementsByTagName("Signature")[0], True)\r
-                signatures.appendChild(ele)\r
-                \r
-        # Get the finished product\r
-        self.xml = doc.toxml()\r
-\r
-\r
-    def save_to_random_tmp_file(self):       \r
-        fp, filename = mkstemp(suffix='cred', text=True)\r
-        fp = os.fdopen(fp, "w")\r
-        self.save_to_file(filename, save_parents=True, filep=fp)\r
-        return filename\r
-    \r
-    def save_to_file(self, filename, save_parents=True, filep=None):\r
-        if not self.xml:\r
-            self.encode()\r
-        if filep:\r
-            f = filep \r
-        else:\r
-            f = open(filename, "w")\r
-        f.write(self.xml)\r
-        f.close()\r
-\r
-    def save_to_string(self, save_parents=True):\r
-        if not self.xml:\r
-            self.encode()\r
-        return self.xml\r
-\r
-    def get_refid(self):\r
-        if not self.refid:\r
-            self.refid = 'ref0'\r
-        return self.refid\r
-\r
-    def set_refid(self, rid):\r
-        self.refid = rid\r
-\r
-    ##\r
-    # Figure out what refids exist, and update this credential's id\r
-    # so that it doesn't clobber the others.  Returns the refids of\r
-    # the parents.\r
-    \r
-    def updateRefID(self):\r
-        if not self.parent:\r
-            self.set_refid('ref0')\r
-            return []\r
-        \r
-        refs = []\r
-\r
-        next_cred = self.parent\r
-        while next_cred:\r
-            refs.append(next_cred.get_refid())\r
-            if next_cred.parent:\r
-                next_cred = next_cred.parent\r
-            else:\r
-                next_cred = None\r
-\r
-        \r
-        # Find a unique refid for this credential\r
-        rid = self.get_refid()\r
-        while rid in refs:\r
-            val = int(rid[3:])\r
-            rid = "ref%d" % (val + 1)\r
-\r
-        # Set the new refid\r
-        self.set_refid(rid)\r
-\r
-        # Return the set of parent credential ref ids\r
-        return refs\r
-\r
-    def get_xml(self):\r
-        if not self.xml:\r
-            self.encode()\r
-        return self.xml\r
-\r
-    ##\r
-    # Sign the XML file created by encode()\r
-    #\r
-    # WARNING:\r
-    # In general, a signed credential obtained externally should\r
-    # not be changed else the signature is no longer valid.  So, once\r
-    # you have loaded an existing signed credential, do not call encode() or sign() on it.\r
-\r
-    def sign(self):\r
-        if not self.issuer_privkey or not self.issuer_gid:\r
-            return\r
-        doc = parseString(self.get_xml())\r
-        sigs = doc.getElementsByTagName("signatures")[0]\r
-\r
-        # Create the signature template to be signed\r
-        signature = Signature()\r
-        signature.set_refid(self.get_refid())\r
-        sdoc = parseString(signature.get_xml())        \r
-        sig_ele = doc.importNode(sdoc.getElementsByTagName("Signature")[0], True)\r
-        sigs.appendChild(sig_ele)\r
-\r
-        self.xml = doc.toxml()\r
-\r
-\r
-        # Split the issuer GID into multiple certificates if it's a chain\r
-        chain = GID(filename=self.issuer_gid)\r
-        gid_files = []\r
-        while chain:\r
-            gid_files.append(chain.save_to_random_tmp_file(False))\r
-            if chain.get_parent():\r
-                chain = chain.get_parent()\r
-            else:\r
-                chain = None\r
-\r
-\r
-        # Call out to xmlsec1 to sign it\r
-        ref = 'Sig_%s' % self.get_refid()\r
-        filename = self.save_to_random_tmp_file()\r
-        signed = os.popen('%s --sign --node-id "%s" --privkey-pem %s,%s %s' \\r
-                 % (self.xmlsec_path, ref, self.issuer_privkey, ",".join(gid_files), filename)).read()\r
-        os.remove(filename)\r
-\r
-        for gid_file in gid_files:\r
-            os.remove(gid_file)\r
-\r
-        self.xml = signed\r
-\r
-        # This is no longer a legacy credential\r
-        if self.legacy:\r
-            self.legacy = None\r
-\r
-        # Update signatures\r
-        self.decode()       \r
-\r
-        \r
-    ##\r
-    # Retrieve the attributes of the credential from the XML.\r
-    # This is automatically called by the various get_* methods of\r
-    # this class and should not need to be called explicitly.\r
-\r
-    def decode(self):\r
-        if not self.xml:\r
-            return\r
-        doc = parseString(self.xml)\r
-        sigs = []\r
-        signed_cred = doc.getElementsByTagName("signed-credential")\r
-\r
-        # Is this a signed-cred or just a cred?\r
-        if len(signed_cred) > 0:\r
-            creds = signed_cred[0].getElementsByTagName("credential")\r
-            signatures = signed_cred[0].getElementsByTagName("signatures")\r
-            if len(signatures) > 0:\r
-                sigs = signatures[0].getElementsByTagName("Signature")\r
-        else:\r
-            creds = doc.getElementsByTagName("credential")\r
-        \r
-        if creds is None or len(creds) == 0:\r
-            # malformed cred file\r
-            raise CredentialNotVerifiable("Malformed XML: No credential tag found")\r
-\r
-        # Just take the first cred if there are more than one\r
-        cred = creds[0]\r
-\r
-        self.set_refid(cred.getAttribute("xml:id"))\r
-        self.set_expiration(utcparse(getTextNode(cred, "expires")))\r
-        self.gidCaller = GID(string=getTextNode(cred, "owner_gid"))\r
-        self.gidObject = GID(string=getTextNode(cred, "target_gid"))   \r
-\r
-\r
-        # Process privileges\r
-        privs = cred.getElementsByTagName("privileges")[0]\r
-        rlist = Rights()\r
-        for priv in privs.getElementsByTagName("privilege"):\r
-            kind = getTextNode(priv, "name")\r
-            deleg = str2bool(getTextNode(priv, "can_delegate"))\r
-            if kind == '*':\r
-                # Convert * into the default privileges for the credential's type\r
-                # Each inherits the delegatability from the * above\r
-                _ , type = urn_to_hrn(self.gidObject.get_urn())\r
-                rl = determine_rights(type, self.gidObject.get_urn())\r
-                for r in rl.rights:\r
-                    r.delegate = deleg\r
-                    rlist.add(r)\r
-            else:\r
-                rlist.add(Right(kind.strip(), deleg))\r
-        self.set_privileges(rlist)\r
-\r
-\r
-        # Is there a parent?\r
-        parent = cred.getElementsByTagName("parent")\r
-        if len(parent) > 0:\r
-            parent_doc = parent[0].getElementsByTagName("credential")[0]\r
-            parent_xml = parent_doc.toxml()\r
-            self.parent = Credential(string=parent_xml)\r
-            self.updateRefID()\r
-\r
-        # Assign the signatures to the credentials\r
-        for sig in sigs:\r
-            Sig = Signature(string=sig.toxml())\r
-\r
-            for cur_cred in self.get_credential_list():\r
-                if cur_cred.get_refid() == Sig.get_refid():\r
-                    cur_cred.set_signature(Sig)\r
-                                    \r
-            \r
-    ##\r
-    # Verify\r
-    #   trusted_certs: A list of trusted GID filenames (not GID objects!) \r
-    #                  Chaining is not supported within the GIDs by xmlsec1.\r
-    #\r
-    #   trusted_certs_required: Should usually be true. Set False means an\r
-    #                 empty list of trusted_certs would still let this method pass.\r
-    #                 It just skips xmlsec1 verification et al. Only used by some utils\r
-    #    \r
-    # Verify that:\r
-    # . All of the signatures are valid and that the issuers trace back\r
-    #   to trusted roots (performed by xmlsec1)\r
-    # . The XML matches the credential schema\r
-    # . That the issuer of the credential is the authority in the target's urn\r
-    #    . In the case of a delegated credential, this must be true of the root\r
-    # . That all of the gids presented in the credential are valid\r
-    #    . Including verifying GID chains, and includ the issuer\r
-    # . The credential is not expired\r
-    #\r
-    # -- For Delegates (credentials with parents)\r
-    # . The privileges must be a subset of the parent credentials\r
-    # . The privileges must have "can_delegate" set for each delegated privilege\r
-    # . The target gid must be the same between child and parents\r
-    # . The expiry time on the child must be no later than the parent\r
-    # . The signer of the child must be the owner of the parent\r
-    #\r
-    # -- Verify does *NOT*\r
-    # . ensure that an xmlrpc client's gid matches a credential gid, that\r
-    #   must be done elsewhere\r
-    #\r
-    # @param trusted_certs: The certificates of trusted CA certificates\r
-    def verify(self, trusted_certs=None, schema=None, trusted_certs_required=True):\r
-        if not self.xml:\r
-            self.decode()\r
-\r
-        # validate against RelaxNG schema\r
-        if HAVELXML and not self.legacy:\r
-            if schema and os.path.exists(schema):\r
-                tree = etree.parse(StringIO(self.xml))\r
-                schema_doc = etree.parse(schema)\r
-                xmlschema = etree.XMLSchema(schema_doc)\r
-                if not xmlschema.validate(tree):\r
-                    error = xmlschema.error_log.last_error\r
-                    message = "%s: %s (line %s)" % (self.get_summary_tostring(), error.message, error.line)\r
-                    raise CredentialNotVerifiable(message)\r
-\r
-        if trusted_certs_required and trusted_certs is None:\r
-            trusted_certs = []\r
-\r
-#        trusted_cert_objects = [GID(filename=f) for f in trusted_certs]\r
-        trusted_cert_objects = []\r
-        ok_trusted_certs = []\r
-        # If caller explicitly passed in None that means skip cert chain validation.\r
-        # Strange and not typical\r
-        if trusted_certs is not None:\r
-            for f in trusted_certs:\r
-                try:\r
-                    # Failures here include unreadable files\r
-                    # or non PEM files\r
-                    trusted_cert_objects.append(GID(filename=f))\r
-                    ok_trusted_certs.append(f)\r
-                except Exception, exc:\r
-                    logger.error("Failed to load trusted cert from %s: %r", f, exc)\r
-            trusted_certs = ok_trusted_certs\r
-\r
-        # Use legacy verification if this is a legacy credential\r
-        if self.legacy:\r
-            self.legacy.verify_chain(trusted_cert_objects)\r
-            if self.legacy.client_gid:\r
-                self.legacy.client_gid.verify_chain(trusted_cert_objects)\r
-            if self.legacy.object_gid:\r
-                self.legacy.object_gid.verify_chain(trusted_cert_objects)\r
-            return True\r
-        \r
-        # make sure it is not expired\r
-        if self.get_expiration() < datetime.datetime.utcnow():\r
-            raise CredentialNotVerifiable("Credential %s expired at %s" % (self.get_summary_tostring(), self.expiration.isoformat()))\r
-\r
-        # Verify the signatures\r
-        filename = self.save_to_random_tmp_file()\r
-        if trusted_certs is not None:\r
-            cert_args = " ".join(['--trusted-pem %s' % x for x in trusted_certs])\r
-\r
-        # If caller explicitly passed in None that means skip cert chain validation.\r
-        # - Strange and not typical\r
-        if trusted_certs is not None:\r
-            # Verify the gids of this cred and of its parents\r
-            for cur_cred in self.get_credential_list():\r
-                cur_cred.get_gid_object().verify_chain(trusted_cert_objects)\r
-                cur_cred.get_gid_caller().verify_chain(trusted_cert_objects)\r
-\r
-        refs = []\r
-        refs.append("Sig_%s" % self.get_refid())\r
-\r
-        parentRefs = self.updateRefID()\r
-        for ref in parentRefs:\r
-            refs.append("Sig_%s" % ref)\r
-\r
-        for ref in refs:\r
-            # If caller explicitly passed in None that means skip xmlsec1 validation.\r
-            # Strange and not typical\r
-            if trusted_certs is None:\r
-                break\r
-\r
-#            print "Doing %s --verify --node-id '%s' %s %s 2>&1" % \\r
-#                (self.xmlsec_path, ref, cert_args, filename)\r
-            verified = os.popen('%s --verify --node-id "%s" %s %s 2>&1' \\r
-                            % (self.xmlsec_path, ref, cert_args, filename)).read()\r
-            if not verified.strip().startswith("OK"):\r
-                # xmlsec errors have a msg= which is the interesting bit.\r
-                mstart = verified.find("msg=")\r
-                msg = ""\r
-                if mstart > -1 and len(verified) > 4:\r
-                    mstart = mstart + 4\r
-                    mend = verified.find('\\', mstart)\r
-                    msg = verified[mstart:mend]\r
-                raise CredentialNotVerifiable("xmlsec1 error verifying cred %s using Signature ID %s: %s %s" % (self.get_summary_tostring(), ref, msg, verified.strip()))\r
-        os.remove(filename)\r
-\r
-        # Verify the parents (delegation)\r
-        if self.parent:\r
-            self.verify_parent(self.parent)\r
-\r
-        # Make sure the issuer is the target's authority, and is\r
-        # itself a valid GID\r
-        self.verify_issuer(trusted_cert_objects)\r
-        return True\r
-\r
-    ##\r
-    # Creates a list of the credential and its parents, with the root \r
-    # (original delegated credential) as the last item in the list\r
-    def get_credential_list(self):    \r
-        cur_cred = self\r
-        list = []\r
-        while cur_cred:\r
-            list.append(cur_cred)\r
-            if cur_cred.parent:\r
-                cur_cred = cur_cred.parent\r
-            else:\r
-                cur_cred = None\r
-        return list\r
-    \r
-    ##\r
-    # Make sure the credential's target gid (a) was signed by or (b)\r
-    # is the same as the entity that signed the original credential,\r
-    # or (c) is an authority over the target's namespace.\r
-    # Also ensure that the credential issuer / signer itself has a valid\r
-    # GID signature chain (signed by an authority with namespace rights).\r
-    def verify_issuer(self, trusted_gids):\r
-        root_cred = self.get_credential_list()[-1]\r
-        root_target_gid = root_cred.get_gid_object()\r
-        root_cred_signer = root_cred.get_signature().get_issuer_gid()\r
-\r
-        # Case 1:\r
-        # Allow non authority to sign target and cred about target.\r
-        #\r
-        # Why do we need to allow non authorities to sign?\r
-        # If in the target gid validation step we correctly\r
-        # checked that the target is only signed by an authority,\r
-        # then this is just a special case of case 3.\r
-        # This short-circuit is the common case currently -\r
-        # and cause GID validation doesn't check 'authority',\r
-        # this allows users to generate valid slice credentials.\r
-        if root_target_gid.is_signed_by_cert(root_cred_signer):\r
-            # cred signer matches target signer, return success\r
-            return\r
-\r
-        # Case 2:\r
-        # Allow someone to sign credential about themeselves. Used?\r
-        # If not, remove this.\r
-        #root_target_gid_str = root_target_gid.save_to_string()\r
-        #root_cred_signer_str = root_cred_signer.save_to_string()\r
-        #if root_target_gid_str == root_cred_signer_str:\r
-        #    # cred signer is target, return success\r
-        #    return\r
-\r
-        # Case 3:\r
-\r
-        # root_cred_signer is not the target_gid\r
-        # So this is a different gid that we have not verified.\r
-        # xmlsec1 verified the cert chain on this already, but\r
-        # it hasn't verified that the gid meets the HRN namespace\r
-        # requirements.\r
-        # Below we'll ensure that it is an authority.\r
-        # But we haven't verified that it is _signed by_ an authority\r
-        # We also don't know if xmlsec1 requires that cert signers\r
-        # are marked as CAs.\r
-\r
-        # Note that if verify() gave us no trusted_gids then this\r
-        # call will fail. So skip it if we have no trusted_gids\r
-        if trusted_gids and len(trusted_gids) > 0:\r
-            root_cred_signer.verify_chain(trusted_gids)\r
-        else:\r
-            logger.debug("No trusted gids. Cannot verify that cred signer is signed by a trusted authority. Skipping that check.")\r
-\r
-        # See if the signer is an authority over the domain of the target.\r
-        # There are multiple types of authority - accept them all here\r
-        # Maybe should be (hrn, type) = urn_to_hrn(root_cred_signer.get_urn())\r
-        root_cred_signer_type = root_cred_signer.get_type()\r
-        if (root_cred_signer_type.find('authority') == 0):\r
-            #logger.debug('Cred signer is an authority')\r
-            # signer is an authority, see if target is in authority's domain\r
-            signerhrn = root_cred_signer.get_hrn()\r
-            if hrn_authfor_hrn(signerhrn, root_target_gid.get_hrn()):\r
-                return\r
-\r
-        # We've required that the credential be signed by an authority\r
-        # for that domain. Reasonable and probably correct.\r
-        # A looser model would also allow the signer to be an authority\r
-        # in my control framework - eg My CA or CH. Even if it is not\r
-        # the CH that issued these, eg, user credentials.\r
-\r
-        # Give up, credential does not pass issuer verification\r
-\r
-        raise CredentialNotVerifiable("Could not verify credential owned by %s for object %s. Cred signer %s not the trusted authority for Cred target %s" % (self.gidCaller.get_urn(), self.gidObject.get_urn(), root_cred_signer.get_hrn(), root_target_gid.get_hrn()))\r
-\r
-\r
-    ##\r
-    # -- For Delegates (credentials with parents) verify that:\r
-    # . The privileges must be a subset of the parent credentials\r
-    # . The privileges must have "can_delegate" set for each delegated privilege\r
-    # . The target gid must be the same between child and parents\r
-    # . The expiry time on the child must be no later than the parent\r
-    # . The signer of the child must be the owner of the parent        \r
-    def verify_parent(self, parent_cred):\r
-        # make sure the rights given to the child are a subset of the\r
-        # parents rights (and check delegate bits)\r
-        if not parent_cred.get_privileges().is_superset(self.get_privileges()):\r
-            raise ChildRightsNotSubsetOfParent(("Parent cred ref %s rights " % parent_cred.get_refid()) +\r
-                self.parent.get_privileges().save_to_string() + (" not superset of delegated cred %s ref %s rights " % (self.get_summary_tostring(), self.get_refid())) +\r
-                self.get_privileges().save_to_string())\r
-\r
-        # make sure my target gid is the same as the parent's\r
-        if not parent_cred.get_gid_object().save_to_string() == \\r
-           self.get_gid_object().save_to_string():\r
-            raise CredentialNotVerifiable("Delegated cred %s: Target gid not equal between parent and child. Parent %s" % (self.get_summary_tostring(), parent_cred.get_summary_tostring()))\r
-\r
-        # make sure my expiry time is <= my parent's\r
-        if not parent_cred.get_expiration() >= self.get_expiration():\r
-            raise CredentialNotVerifiable("Delegated credential %s expires after parent %s" % (self.get_summary_tostring(), parent_cred.get_summary_tostring()))\r
-\r
-        # make sure my signer is the parent's caller\r
-        if not parent_cred.get_gid_caller().save_to_string(False) == \\r
-           self.get_signature().get_issuer_gid().save_to_string(False):\r
-            raise CredentialNotVerifiable("Delegated credential %s not signed by parent %s's caller" % (self.get_summary_tostring(), parent_cred.get_summary_tostring()))\r
-                \r
-        # Recurse\r
-        if parent_cred.parent:\r
-            parent_cred.verify_parent(parent_cred.parent)\r
-\r
-\r
-    def delegate(self, delegee_gidfile, caller_keyfile, caller_gidfile):\r
-        """\r
-        Return a delegated copy of this credential, delegated to the \r
-        specified gid's user.    \r
-        """\r
-        # get the gid of the object we are delegating\r
-        object_gid = self.get_gid_object()\r
-        object_hrn = object_gid.get_hrn()        \r
\r
-        # the hrn of the user who will be delegated to\r
-        delegee_gid = GID(filename=delegee_gidfile)\r
-        delegee_hrn = delegee_gid.get_hrn()\r
-  \r
-        #user_key = Keypair(filename=keyfile)\r
-        #user_hrn = self.get_gid_caller().get_hrn()\r
-        subject_string = "%s delegated to %s" % (object_hrn, delegee_hrn)\r
-        dcred = Credential(subject=subject_string)\r
-        dcred.set_gid_caller(delegee_gid)\r
-        dcred.set_gid_object(object_gid)\r
-        dcred.set_parent(self)\r
-        dcred.set_expiration(self.get_expiration())\r
-        dcred.set_privileges(self.get_privileges())\r
-        dcred.get_privileges().delegate_all_privileges(True)\r
-        #dcred.set_issuer_keys(keyfile, delegee_gidfile)\r
-        dcred.set_issuer_keys(caller_keyfile, caller_gidfile)\r
-        dcred.encode()\r
-        dcred.sign()\r
-\r
-        return dcred\r
-\r
-    # only informative\r
-    def get_filename(self):\r
-        return getattr(self,'filename',None)\r
-\r
-    ##\r
-    # Dump the contents of a credential to stdout in human-readable format\r
-    #\r
-    # @param dump_parents If true, also dump the parent certificates\r
-    def dump (self, *args, **kwargs):\r
-        print self.dump_string(*args, **kwargs)\r
-\r
-\r
-    def dump_string(self, dump_parents=False):\r
-        result=""\r
-        result += "CREDENTIAL %s\n" % self.get_subject()\r
-        filename=self.get_filename()\r
-        if filename: result += "Filename %s\n"%filename\r
-        result += "      privs: %s\n" % self.get_privileges().save_to_string()\r
-        gidCaller = self.get_gid_caller()\r
-        if gidCaller:\r
-            result += "  gidCaller:\n"\r
-            result += gidCaller.dump_string(8, dump_parents)\r
-\r
-        if self.get_signature():\r
-            print "  gidIssuer:"\r
-            self.get_signature().get_issuer_gid().dump(8, dump_parents)\r
-\r
-        gidObject = self.get_gid_object()\r
-        if gidObject:\r
-            result += "  gidObject:\n"\r
-            result += gidObject.dump_string(8, dump_parents)\r
-\r
-        if self.parent and dump_parents:\r
-            result += "\nPARENT"\r
-            result += self.parent.dump_string(True)\r
-\r
-        return result\r
+#----------------------------------------------------------------------
+# Copyright (c) 2008 Board of Trustees, Princeton University
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and/or hardware specification (the "Work") to
+# deal in the Work without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Work, and to permit persons to whom the Work
+# is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Work.
+#
+# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 
+# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS 
+# IN THE WORK.
+#----------------------------------------------------------------------
+##
+# Implements SFA Credentials
+#
+# Credentials are signed XML files that assign a subject gid privileges to an object gid
+##
+
+import os
+from types import StringTypes
+import datetime
+from StringIO import StringIO
+from tempfile import mkstemp
+from xml.dom.minidom import Document, parseString
+
+HAVELXML = False
+try:
+    from lxml import etree
+    HAVELXML = True
+except:
+    pass
+
+from xml.parsers.expat import ExpatError
+
+from sfa.util.faults import CredentialNotVerifiable, ChildRightsNotSubsetOfParent
+from sfa.util.sfalogging import logger
+from sfa.util.sfatime import utcparse
+from sfa.trust.credential_legacy import CredentialLegacy
+from sfa.trust.rights import Right, Rights, determine_rights
+from sfa.trust.gid import GID
+from sfa.util.xrn import urn_to_hrn, hrn_authfor_hrn
+
+# 2 weeks, in seconds 
+DEFAULT_CREDENTIAL_LIFETIME = 86400 * 31
+
+
+# TODO:
+# . make privs match between PG and PL
+# . Need to add support for other types of credentials, e.g. tickets
+# . add namespaces to signed-credential element?
+
+signature_template = \
+'''
+<Signature xml:id="Sig_%s" xmlns="http://www.w3.org/2000/09/xmldsig#">
+  <SignedInfo>
+    <CanonicalizationMethod Algorithm="http://www.w3.org/TR/2001/REC-xml-c14n-20010315"/>
+    <SignatureMethod Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/>
+    <Reference URI="#%s">
+      <Transforms>
+        <Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature" />
+      </Transforms>
+      <DigestMethod Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>
+      <DigestValue></DigestValue>
+    </Reference>
+  </SignedInfo>
+  <SignatureValue />
+  <KeyInfo>
+    <X509Data>
+      <X509SubjectName/>
+      <X509IssuerSerial/>
+      <X509Certificate/>
+    </X509Data>
+    <KeyValue />
+  </KeyInfo>
+</Signature>
+'''
+
+# PG formats the template (whitespace) slightly differently.
+# Note that they don't include the xmlns in the template, but add it later.
+# Otherwise the two are equivalent.
+#signature_template_as_in_pg = \
+#'''
+#<Signature xml:id="Sig_%s" >
+# <SignedInfo>
+#  <CanonicalizationMethod      Algorithm="http://www.w3.org/TR/2001/REC-xml-c14n-20010315"/>
+#  <SignatureMethod      Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/>
+#  <Reference URI="#%s">
+#    <Transforms>
+#      <Transform         Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature" />
+#    </Transforms>
+#    <DigestMethod        Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>
+#    <DigestValue></DigestValue>
+#    </Reference>
+# </SignedInfo>
+# <SignatureValue />
+# <KeyInfo>
+#  <X509Data >
+#   <X509SubjectName/>
+#   <X509IssuerSerial/>
+#   <X509Certificate/>
+#  </X509Data>
+#  <KeyValue />
+# </KeyInfo>
+#</Signature>
+#'''
+
+##
+# Convert a string into a bool
+# used to convert an xsd:boolean to a Python boolean
+def str2bool(str):
+    if str.lower() in ['true','1']:
+        return True
+    return False
+
+
+##
+# Utility function to get the text of an XML element
+
+def getTextNode(element, subele):
+    sub = element.getElementsByTagName(subele)[0]
+    if len(sub.childNodes) > 0:            
+        return sub.childNodes[0].nodeValue
+    else:
+        return None
+        
+##
+# Utility function to set the text of an XML element
+# It creates the element, adds the text to it,
+# and then appends it to the parent.
+
+def append_sub(doc, parent, element, text):
+    ele = doc.createElement(element)
+    ele.appendChild(doc.createTextNode(text))
+    parent.appendChild(ele)
+
+##
+# Signature contains information about an xmlsec1 signature
+# for a signed-credential
+#
+
+class Signature(object):
+   
+    def __init__(self, string=None):
+        self.refid = None
+        self.issuer_gid = None
+        self.xml = None
+        if string:
+            self.xml = string
+            self.decode()
+
+
+    def get_refid(self):
+        if not self.refid:
+            self.decode()
+        return self.refid
+
+    def get_xml(self):
+        if not self.xml:
+            self.encode()
+        return self.xml
+
+    def set_refid(self, id):
+        self.refid = id
+
+    def get_issuer_gid(self):
+        if not self.gid:
+            self.decode()
+        return self.gid        
+
+    def set_issuer_gid(self, gid):
+        self.gid = gid
+
+    def decode(self):
+        try:
+            doc = parseString(self.xml)
+        except ExpatError,e:
+            logger.log_exc ("Failed to parse credential, %s"%self.xml)
+            raise
+        sig = doc.getElementsByTagName("Signature")[0]
+        self.set_refid(sig.getAttribute("xml:id").strip("Sig_"))
+        keyinfo = sig.getElementsByTagName("X509Data")[0]
+        szgid = getTextNode(keyinfo, "X509Certificate")
+        szgid = "-----BEGIN CERTIFICATE-----\n%s\n-----END CERTIFICATE-----" % szgid
+        self.set_issuer_gid(GID(string=szgid))        
+        
+    def encode(self):
+        self.xml = signature_template % (self.get_refid(), self.get_refid())
+
+
+##
+# A credential provides a caller gid with privileges to an object gid.
+# A signed credential is signed by the object's authority.
+#
+# Credentials are encoded in one of two ways.  The legacy style places
+# it in the subjectAltName of an X509 certificate.  The new credentials
+# are placed in signed XML.
+#
+# WARNING:
+# In general, a signed credential obtained externally should
+# not be changed else the signature is no longer valid.  So, once
+# you have loaded an existing signed credential, do not call encode() or sign() on it.
+
+def filter_creds_by_caller(creds, caller_hrn_list):
+        """
+        Returns a list of creds who's gid caller matches the
+        specified caller hrn
+        """
+        if not isinstance(creds, list): creds = [creds]
+        if not isinstance(caller_hrn_list, list): 
+            caller_hrn_list = [caller_hrn_list]
+        caller_creds = []
+        for cred in creds:
+            try:
+                tmp_cred = Credential(string=cred)
+                if tmp_cred.get_gid_caller().get_hrn() in caller_hrn_list:
+                    caller_creds.append(cred)
+            except: pass
+        return caller_creds
+
+class Credential(object):
+
+    ##
+    # Create a Credential object
+    #
+    # @param create If true, create a blank x509 certificate
+    # @param subject If subject!=None, create an x509 cert with the subject name
+    # @param string If string!=None, load the credential from the string
+    # @param filename If filename!=None, load the credential from the file
+    # FIXME: create and subject are ignored!
+    def __init__(self, create=False, subject=None, string=None, filename=None):
+        self.gidCaller = None
+        self.gidObject = None
+        self.expiration = None
+        self.privileges = None
+        self.issuer_privkey = None
+        self.issuer_gid = None
+        self.issuer_pubkey = None
+        self.parent = None
+        self.signature = None
+        self.xml = None
+        self.refid = None
+        self.legacy = None
+
+        # Check if this is a legacy credential, translate it if so
+        if string or filename:
+            if string:                
+                str = string
+            elif filename:
+                str = file(filename).read()
+                
+            if str.strip().startswith("-----"):
+                self.legacy = CredentialLegacy(False,string=str)
+                self.translate_legacy(str)
+            else:
+                self.xml = str
+                self.decode()
+
+        # Find an xmlsec1 path
+        self.xmlsec_path = ''
+        paths = ['/usr/bin','/usr/local/bin','/bin','/opt/bin','/opt/local/bin']
+        for path in paths:
+            if os.path.isfile(path + '/' + 'xmlsec1'):
+                self.xmlsec_path = path + '/' + 'xmlsec1'
+                break
+
+    def get_subject(self):
+        if not self.gidObject:
+            self.decode()
+        return self.gidObject.get_printable_subject()
+
+    # sounds like this should be __repr__ instead ??
+    def get_summary_tostring(self):
+        if not self.gidObject:
+            self.decode()
+        obj = self.gidObject.get_printable_subject()
+        caller = self.gidCaller.get_printable_subject()
+        exp = self.get_expiration()
+        # Summarize the rights too? The issuer?
+        return "[ Grant %s rights on %s until %s ]" % (caller, obj, exp)
+
+    def get_signature(self):
+        if not self.signature:
+            self.decode()
+        return self.signature
+
+    def set_signature(self, sig):
+        self.signature = sig
+
+        
+    ##
+    # Translate a legacy credential into a new one
+    #
+    # @param String of the legacy credential
+
+    def translate_legacy(self, str):
+        legacy = CredentialLegacy(False,string=str)
+        self.gidCaller = legacy.get_gid_caller()
+        self.gidObject = legacy.get_gid_object()
+        lifetime = legacy.get_lifetime()
+        if not lifetime:
+            self.set_expiration(datetime.datetime.utcnow() + datetime.timedelta(seconds=DEFAULT_CREDENTIAL_LIFETIME))
+        else:
+            self.set_expiration(int(lifetime))
+        self.lifeTime = legacy.get_lifetime()
+        self.set_privileges(legacy.get_privileges())
+        self.get_privileges().delegate_all_privileges(legacy.get_delegate())
+
+    ##
+    # Need the issuer's private key and name
+    # @param key Keypair object containing the private key of the issuer
+    # @param gid GID of the issuing authority
+
+    def set_issuer_keys(self, privkey, gid):
+        self.issuer_privkey = privkey
+        self.issuer_gid = gid
+
+
+    ##
+    # Set this credential's parent
+    def set_parent(self, cred):
+        self.parent = cred
+        self.updateRefID()
+
+    ##
+    # set the GID of the caller
+    #
+    # @param gid GID object of the caller
+
+    def set_gid_caller(self, gid):
+        self.gidCaller = gid
+        # gid origin caller is the caller's gid by default
+        self.gidOriginCaller = gid
+
+    ##
+    # get the GID of the object
+
+    def get_gid_caller(self):
+        if not self.gidCaller:
+            self.decode()
+        return self.gidCaller
+
+    ##
+    # set the GID of the object
+    #
+    # @param gid GID object of the object
+
+    def set_gid_object(self, gid):
+        self.gidObject = gid
+
+    ##
+    # get the GID of the object
+
+    def get_gid_object(self):
+        if not self.gidObject:
+            self.decode()
+        return self.gidObject
+            
+    ##
+    # Expiration: an absolute UTC time of expiration (as either an int or string or datetime)
+    # 
+    def set_expiration(self, expiration):
+        if isinstance(expiration, (int, float)):
+            self.expiration = datetime.datetime.fromtimestamp(expiration)
+        elif isinstance (expiration, datetime.datetime):
+            self.expiration = expiration
+        elif isinstance (expiration, StringTypes):
+            self.expiration = utcparse (expiration)
+        else:
+            logger.error ("unexpected input type in Credential.set_expiration")
+
+
+    ##
+    # get the lifetime of the credential (always in datetime format)
+
+    def get_expiration(self):
+        if not self.expiration:
+            self.decode()
+        # at this point self.expiration is normalized as a datetime - DON'T call utcparse again
+        return self.expiration
+
+    ##
+    # For legacy sake
+    def get_lifetime(self):
+        return self.get_expiration()
+    ##
+    # set the privileges
+    #
+    # @param privs either a comma-separated list of privileges of a Rights object
+
+    def set_privileges(self, privs):
+        if isinstance(privs, str):
+            self.privileges = Rights(string = privs)
+        else:
+            self.privileges = privs        
+
+    ##
+    # return the privileges as a Rights object
+
+    def get_privileges(self):
+        if not self.privileges:
+            self.decode()
+        return self.privileges
+
+    ##
+    # determine whether the credential allows a particular operation to be
+    # performed
+    #
+    # @param op_name string specifying name of operation ("lookup", "update", etc)
+
+    def can_perform(self, op_name):
+        rights = self.get_privileges()
+        
+        if not rights:
+            return False
+
+        return rights.can_perform(op_name)
+
+
+    ##
+    # Encode the attributes of the credential into an XML string    
+    # This should be done immediately before signing the credential.    
+    # WARNING:
+    # In general, a signed credential obtained externally should
+    # not be changed else the signature is no longer valid.  So, once
+    # you have loaded an existing signed credential, do not call encode() or sign() on it.
+
+    def encode(self):
+        # Create the XML document
+        doc = Document()
+        signed_cred = doc.createElement("signed-credential")
+
+# Declare namespaces
+# Note that credential/policy.xsd are really the PG schemas
+# in a PL namespace.
+# Note that delegation of credentials between the 2 only really works
+# cause those schemas are identical.
+# Also note these PG schemas talk about PG tickets and CM policies.
+        signed_cred.setAttribute("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance")
+        signed_cred.setAttribute("xsi:noNamespaceSchemaLocation", "http://www.planet-lab.org/resources/sfa/credential.xsd")
+        signed_cred.setAttribute("xsi:schemaLocation", "http://www.planet-lab.org/resources/sfa/ext/policy/1 http://www.planet-lab.org/resources/sfa/ext/policy/1/policy.xsd")
+
+# PG says for those last 2:
+#        signed_cred.setAttribute("xsi:noNamespaceSchemaLocation", "http://www.protogeni.net/resources/credential/credential.xsd")
+#        signed_cred.setAttribute("xsi:schemaLocation", "http://www.protogeni.net/resources/credential/ext/policy/1 http://www.protogeni.net/resources/credential/ext/policy/1/policy.xsd")
+
+        doc.appendChild(signed_cred)  
+        
+        # Fill in the <credential> bit        
+        cred = doc.createElement("credential")
+        cred.setAttribute("xml:id", self.get_refid())
+        signed_cred.appendChild(cred)
+        append_sub(doc, cred, "type", "privilege")
+        append_sub(doc, cred, "serial", "8")
+        append_sub(doc, cred, "owner_gid", self.gidCaller.save_to_string())
+        append_sub(doc, cred, "owner_urn", self.gidCaller.get_urn())
+        append_sub(doc, cred, "target_gid", self.gidObject.save_to_string())
+        append_sub(doc, cred, "target_urn", self.gidObject.get_urn())
+        append_sub(doc, cred, "uuid", "")
+        if not self.expiration:
+            self.set_expiration(datetime.datetime.utcnow() + datetime.timedelta(seconds=DEFAULT_CREDENTIAL_LIFETIME))
+        self.expiration = self.expiration.replace(microsecond=0)
+        append_sub(doc, cred, "expires", self.expiration.isoformat())
+        privileges = doc.createElement("privileges")
+        cred.appendChild(privileges)
+
+        if self.privileges:
+            rights = self.get_privileges()
+            for right in rights.rights:
+                priv = doc.createElement("privilege")
+                append_sub(doc, priv, "name", right.kind)
+                append_sub(doc, priv, "can_delegate", str(right.delegate).lower())
+                privileges.appendChild(priv)
+
+        # Add the parent credential if it exists
+        if self.parent:
+            sdoc = parseString(self.parent.get_xml())
+            # If the root node is a signed-credential (it should be), then
+            # get all its attributes and attach those to our signed_cred
+            # node.
+            # Specifically, PG and PLadd attributes for namespaces (which is reasonable),
+            # and we need to include those again here or else their signature
+            # no longer matches on the credential.
+            # We expect three of these, but here we copy them all:
+#        signed_cred.setAttribute("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance")
+# and from PG (PL is equivalent, as shown above):
+#        signed_cred.setAttribute("xsi:noNamespaceSchemaLocation", "http://www.protogeni.net/resources/credential/credential.xsd")
+#        signed_cred.setAttribute("xsi:schemaLocation", "http://www.protogeni.net/resources/credential/ext/policy/1 http://www.protogeni.net/resources/credential/ext/policy/1/policy.xsd")
+
+            # HOWEVER!
+            # PL now also declares these, with different URLs, so
+            # the code notices those attributes already existed with
+            # different values, and complains.
+            # This happens regularly on delegation now that PG and
+            # PL both declare the namespace with different URLs.
+            # If the content ever differs this is a problem,
+            # but for now it works - different URLs (values in the attributes)
+            # but the same actual schema, so using the PG schema
+            # on delegated-to-PL credentials works fine.
+
+            # Note: you could also not copy attributes
+            # which already exist. It appears that both PG and PL
+            # will actually validate a slicecred with a parent
+            # signed using PG namespaces and a child signed with PL
+            # namespaces over the whole thing. But I don't know
+            # if that is a bug in xmlsec1, an accident since
+            # the contents of the schemas are the same,
+            # or something else, but it seems odd. And this works.
+            parentRoot = sdoc.documentElement
+            if parentRoot.tagName == "signed-credential" and parentRoot.hasAttributes():
+                for attrIx in range(0, parentRoot.attributes.length):
+                    attr = parentRoot.attributes.item(attrIx)
+                    # returns the old attribute of same name that was
+                    # on the credential
+                    # Below throws InUse exception if we forgot to clone the attribute first
+                    oldAttr = signed_cred.setAttributeNode(attr.cloneNode(True))
+                    if oldAttr and oldAttr.value != attr.value:
+                        msg = "Delegating cred from owner %s to %s over %s replaced attribute %s value '%s' with '%s'" % (self.parent.gidCaller.get_urn(), self.gidCaller.get_urn(), self.gidObject.get_urn(), oldAttr.name, oldAttr.value, attr.value)
+                        logger.warn(msg)
+                        #raise CredentialNotVerifiable("Can't encode new valid delegated credential: %s" % msg)
+
+            p_cred = doc.importNode(sdoc.getElementsByTagName("credential")[0], True)
+            p = doc.createElement("parent")
+            p.appendChild(p_cred)
+            cred.appendChild(p)
+        # done handling parent credential
+
+        # Create the <signatures> tag
+        signatures = doc.createElement("signatures")
+        signed_cred.appendChild(signatures)
+
+        # Add any parent signatures
+        if self.parent:
+            for cur_cred in self.get_credential_list()[1:]:
+                sdoc = parseString(cur_cred.get_signature().get_xml())
+                ele = doc.importNode(sdoc.getElementsByTagName("Signature")[0], True)
+                signatures.appendChild(ele)
+                
+        # Get the finished product
+        self.xml = doc.toxml()
+
+
+    def save_to_random_tmp_file(self):       
+        fp, filename = mkstemp(suffix='cred', text=True)
+        fp = os.fdopen(fp, "w")
+        self.save_to_file(filename, save_parents=True, filep=fp)
+        return filename
+    
+    def save_to_file(self, filename, save_parents=True, filep=None):
+        if not self.xml:
+            self.encode()
+        if filep:
+            f = filep 
+        else:
+            f = open(filename, "w")
+        f.write(self.xml)
+        f.close()
+
+    def save_to_string(self, save_parents=True):
+        if not self.xml:
+            self.encode()
+        return self.xml
+
+    def get_refid(self):
+        if not self.refid:
+            self.refid = 'ref0'
+        return self.refid
+
+    def set_refid(self, rid):
+        self.refid = rid
+
+    ##
+    # Figure out what refids exist, and update this credential's id
+    # so that it doesn't clobber the others.  Returns the refids of
+    # the parents.
+    
+    def updateRefID(self):
+        if not self.parent:
+            self.set_refid('ref0')
+            return []
+        
+        refs = []
+
+        next_cred = self.parent
+        while next_cred:
+            refs.append(next_cred.get_refid())
+            if next_cred.parent:
+                next_cred = next_cred.parent
+            else:
+                next_cred = None
+
+        
+        # Find a unique refid for this credential
+        rid = self.get_refid()
+        while rid in refs:
+            val = int(rid[3:])
+            rid = "ref%d" % (val + 1)
+
+        # Set the new refid
+        self.set_refid(rid)
+
+        # Return the set of parent credential ref ids
+        return refs
+
+    def get_xml(self):
+        if not self.xml:
+            self.encode()
+        return self.xml
+
+    ##
+    # Sign the XML file created by encode()
+    #
+    # WARNING:
+    # In general, a signed credential obtained externally should
+    # not be changed else the signature is no longer valid.  So, once
+    # you have loaded an existing signed credential, do not call encode() or sign() on it.
+
+    def sign(self):
+        if not self.issuer_privkey or not self.issuer_gid:
+            return
+        doc = parseString(self.get_xml())
+        sigs = doc.getElementsByTagName("signatures")[0]
+
+        # Create the signature template to be signed
+        signature = Signature()
+        signature.set_refid(self.get_refid())
+        sdoc = parseString(signature.get_xml())        
+        sig_ele = doc.importNode(sdoc.getElementsByTagName("Signature")[0], True)
+        sigs.appendChild(sig_ele)
+
+        self.xml = doc.toxml()
+
+
+        # Split the issuer GID into multiple certificates if it's a chain
+        chain = GID(filename=self.issuer_gid)
+        gid_files = []
+        while chain:
+            gid_files.append(chain.save_to_random_tmp_file(False))
+            if chain.get_parent():
+                chain = chain.get_parent()
+            else:
+                chain = None
+
+
+        # Call out to xmlsec1 to sign it
+        ref = 'Sig_%s' % self.get_refid()
+        filename = self.save_to_random_tmp_file()
+        signed = os.popen('%s --sign --node-id "%s" --privkey-pem %s,%s %s' \
+                 % (self.xmlsec_path, ref, self.issuer_privkey, ",".join(gid_files), filename)).read()
+        os.remove(filename)
+
+        for gid_file in gid_files:
+            os.remove(gid_file)
+
+        self.xml = signed
+
+        # This is no longer a legacy credential
+        if self.legacy:
+            self.legacy = None
+
+        # Update signatures
+        self.decode()       
+
+        
+    ##
+    # Retrieve the attributes of the credential from the XML.
+    # This is automatically called by the various get_* methods of
+    # this class and should not need to be called explicitly.
+
+    def decode(self):
+        if not self.xml:
+            return
+        doc = parseString(self.xml)
+        sigs = []
+        signed_cred = doc.getElementsByTagName("signed-credential")
+
+        # Is this a signed-cred or just a cred?
+        if len(signed_cred) > 0:
+            creds = signed_cred[0].getElementsByTagName("credential")
+            signatures = signed_cred[0].getElementsByTagName("signatures")
+            if len(signatures) > 0:
+                sigs = signatures[0].getElementsByTagName("Signature")
+        else:
+            creds = doc.getElementsByTagName("credential")
+        
+        if creds is None or len(creds) == 0:
+            # malformed cred file
+            raise CredentialNotVerifiable("Malformed XML: No credential tag found")
+
+        # Just take the first cred if there are more than one
+        cred = creds[0]
+
+        self.set_refid(cred.getAttribute("xml:id"))
+        self.set_expiration(utcparse(getTextNode(cred, "expires")))
+        self.gidCaller = GID(string=getTextNode(cred, "owner_gid"))
+        self.gidObject = GID(string=getTextNode(cred, "target_gid"))   
+
+
+        # Process privileges
+        privs = cred.getElementsByTagName("privileges")[0]
+        rlist = Rights()
+        for priv in privs.getElementsByTagName("privilege"):
+            kind = getTextNode(priv, "name")
+            deleg = str2bool(getTextNode(priv, "can_delegate"))
+            if kind == '*':
+                # Convert * into the default privileges for the credential's type
+                # Each inherits the delegatability from the * above
+                _ , type = urn_to_hrn(self.gidObject.get_urn())
+                rl = determine_rights(type, self.gidObject.get_urn())
+                for r in rl.rights:
+                    r.delegate = deleg
+                    rlist.add(r)
+            else:
+                rlist.add(Right(kind.strip(), deleg))
+        self.set_privileges(rlist)
+
+
+        # Is there a parent?
+        parent = cred.getElementsByTagName("parent")
+        if len(parent) > 0:
+            parent_doc = parent[0].getElementsByTagName("credential")[0]
+            parent_xml = parent_doc.toxml()
+            self.parent = Credential(string=parent_xml)
+            self.updateRefID()
+
+        # Assign the signatures to the credentials
+        for sig in sigs:
+            Sig = Signature(string=sig.toxml())
+
+            for cur_cred in self.get_credential_list():
+                if cur_cred.get_refid() == Sig.get_refid():
+                    cur_cred.set_signature(Sig)
+                                    
+            
+    ##
+    # Verify
+    #   trusted_certs: A list of trusted GID filenames (not GID objects!) 
+    #                  Chaining is not supported within the GIDs by xmlsec1.
+    #
+    #   trusted_certs_required: Should usually be true. Set False means an
+    #                 empty list of trusted_certs would still let this method pass.
+    #                 It just skips xmlsec1 verification et al. Only used by some utils
+    #    
+    # Verify that:
+    # . All of the signatures are valid and that the issuers trace back
+    #   to trusted roots (performed by xmlsec1)
+    # . The XML matches the credential schema
+    # . That the issuer of the credential is the authority in the target's urn
+    #    . In the case of a delegated credential, this must be true of the root
+    # . That all of the gids presented in the credential are valid
+    #    . Including verifying GID chains, and includ the issuer
+    # . The credential is not expired
+    #
+    # -- For Delegates (credentials with parents)
+    # . The privileges must be a subset of the parent credentials
+    # . The privileges must have "can_delegate" set for each delegated privilege
+    # . The target gid must be the same between child and parents
+    # . The expiry time on the child must be no later than the parent
+    # . The signer of the child must be the owner of the parent
+    #
+    # -- Verify does *NOT*
+    # . ensure that an xmlrpc client's gid matches a credential gid, that
+    #   must be done elsewhere
+    #
+    # @param trusted_certs: The certificates of trusted CA certificates
+    def verify(self, trusted_certs=None, schema=None, trusted_certs_required=True):
+        if not self.xml:
+            self.decode()
+
+        # validate against RelaxNG schema
+        if HAVELXML and not self.legacy:
+            if schema and os.path.exists(schema):
+                tree = etree.parse(StringIO(self.xml))
+                schema_doc = etree.parse(schema)
+                xmlschema = etree.XMLSchema(schema_doc)
+                if not xmlschema.validate(tree):
+                    error = xmlschema.error_log.last_error
+                    message = "%s: %s (line %s)" % (self.get_summary_tostring(), error.message, error.line)
+                    raise CredentialNotVerifiable(message)
+
+        if trusted_certs_required and trusted_certs is None:
+            trusted_certs = []
+
+#        trusted_cert_objects = [GID(filename=f) for f in trusted_certs]
+        trusted_cert_objects = []
+        ok_trusted_certs = []
+        # If caller explicitly passed in None that means skip cert chain validation.
+        # Strange and not typical
+        if trusted_certs is not None:
+            for f in trusted_certs:
+                try:
+                    # Failures here include unreadable files
+                    # or non PEM files
+                    trusted_cert_objects.append(GID(filename=f))
+                    ok_trusted_certs.append(f)
+                except Exception, exc:
+                    logger.error("Failed to load trusted cert from %s: %r", f, exc)
+            trusted_certs = ok_trusted_certs
+
+        # Use legacy verification if this is a legacy credential
+        if self.legacy:
+            self.legacy.verify_chain(trusted_cert_objects)
+            if self.legacy.client_gid:
+                self.legacy.client_gid.verify_chain(trusted_cert_objects)
+            if self.legacy.object_gid:
+                self.legacy.object_gid.verify_chain(trusted_cert_objects)
+            return True
+        
+        # make sure it is not expired
+        if self.get_expiration() < datetime.datetime.utcnow():
+            raise CredentialNotVerifiable("Credential %s expired at %s" % (self.get_summary_tostring(), self.expiration.isoformat()))
+
+        # Verify the signatures
+        filename = self.save_to_random_tmp_file()
+        if trusted_certs is not None:
+            cert_args = " ".join(['--trusted-pem %s' % x for x in trusted_certs])
+
+        # If caller explicitly passed in None that means skip cert chain validation.
+        # - Strange and not typical
+        if trusted_certs is not None:
+            # Verify the gids of this cred and of its parents
+            for cur_cred in self.get_credential_list():
+                cur_cred.get_gid_object().verify_chain(trusted_cert_objects)
+                cur_cred.get_gid_caller().verify_chain(trusted_cert_objects)
+
+        refs = []
+        refs.append("Sig_%s" % self.get_refid())
+
+        parentRefs = self.updateRefID()
+        for ref in parentRefs:
+            refs.append("Sig_%s" % ref)
+
+        for ref in refs:
+            # If caller explicitly passed in None that means skip xmlsec1 validation.
+            # Strange and not typical
+            if trusted_certs is None:
+                break
+
+#            print "Doing %s --verify --node-id '%s' %s %s 2>&1" % \
+#                (self.xmlsec_path, ref, cert_args, filename)
+            verified = os.popen('%s --verify --node-id "%s" %s %s 2>&1' \
+                            % (self.xmlsec_path, ref, cert_args, filename)).read()
+            if not verified.strip().startswith("OK"):
+                # xmlsec errors have a msg= which is the interesting bit.
+                mstart = verified.find("msg=")
+                msg = ""
+                if mstart > -1 and len(verified) > 4:
+                    mstart = mstart + 4
+                    mend = verified.find('\\', mstart)
+                    msg = verified[mstart:mend]
+                raise CredentialNotVerifiable("xmlsec1 error verifying cred %s using Signature ID %s: %s %s" % (self.get_summary_tostring(), ref, msg, verified.strip()))
+        os.remove(filename)
+
+        # Verify the parents (delegation)
+        if self.parent:
+            self.verify_parent(self.parent)
+
+        # Make sure the issuer is the target's authority, and is
+        # itself a valid GID
+        self.verify_issuer(trusted_cert_objects)
+        return True
+
+    ##
+    # Creates a list of the credential and its parents, with the root 
+    # (original delegated credential) as the last item in the list
+    def get_credential_list(self):    
+        cur_cred = self
+        list = []
+        while cur_cred:
+            list.append(cur_cred)
+            if cur_cred.parent:
+                cur_cred = cur_cred.parent
+            else:
+                cur_cred = None
+        return list
+    
+    ##
+    # Make sure the credential's target gid (a) was signed by or (b)
+    # is the same as the entity that signed the original credential,
+    # or (c) is an authority over the target's namespace.
+    # Also ensure that the credential issuer / signer itself has a valid
+    # GID signature chain (signed by an authority with namespace rights).
+    def verify_issuer(self, trusted_gids):
+        root_cred = self.get_credential_list()[-1]
+        root_target_gid = root_cred.get_gid_object()
+        root_cred_signer = root_cred.get_signature().get_issuer_gid()
+
+        # Case 1:
+        # Allow non authority to sign target and cred about target.
+        #
+        # Why do we need to allow non authorities to sign?
+        # If in the target gid validation step we correctly
+        # checked that the target is only signed by an authority,
+        # then this is just a special case of case 3.
+        # This short-circuit is the common case currently -
+        # and cause GID validation doesn't check 'authority',
+        # this allows users to generate valid slice credentials.
+        if root_target_gid.is_signed_by_cert(root_cred_signer):
+            # cred signer matches target signer, return success
+            return
+
+        # Case 2:
+        # Allow someone to sign credential about themeselves. Used?
+        # If not, remove this.
+        #root_target_gid_str = root_target_gid.save_to_string()
+        #root_cred_signer_str = root_cred_signer.save_to_string()
+        #if root_target_gid_str == root_cred_signer_str:
+        #    # cred signer is target, return success
+        #    return
+
+        # Case 3:
+
+        # root_cred_signer is not the target_gid
+        # So this is a different gid that we have not verified.
+        # xmlsec1 verified the cert chain on this already, but
+        # it hasn't verified that the gid meets the HRN namespace
+        # requirements.
+        # Below we'll ensure that it is an authority.
+        # But we haven't verified that it is _signed by_ an authority
+        # We also don't know if xmlsec1 requires that cert signers
+        # are marked as CAs.
+
+        # Note that if verify() gave us no trusted_gids then this
+        # call will fail. So skip it if we have no trusted_gids
+        if trusted_gids and len(trusted_gids) > 0:
+            root_cred_signer.verify_chain(trusted_gids)
+        else:
+            logger.debug("No trusted gids. Cannot verify that cred signer is signed by a trusted authority. Skipping that check.")
+
+        # See if the signer is an authority over the domain of the target.
+        # There are multiple types of authority - accept them all here
+        # Maybe should be (hrn, type) = urn_to_hrn(root_cred_signer.get_urn())
+        root_cred_signer_type = root_cred_signer.get_type()
+        if (root_cred_signer_type.find('authority') == 0):
+            #logger.debug('Cred signer is an authority')
+            # signer is an authority, see if target is in authority's domain
+            signerhrn = root_cred_signer.get_hrn()
+            if hrn_authfor_hrn(signerhrn, root_target_gid.get_hrn()):
+                return
+
+        # We've required that the credential be signed by an authority
+        # for that domain. Reasonable and probably correct.
+        # A looser model would also allow the signer to be an authority
+        # in my control framework - eg My CA or CH. Even if it is not
+        # the CH that issued these, eg, user credentials.
+
+        # Give up, credential does not pass issuer verification
+
+        raise CredentialNotVerifiable("Could not verify credential owned by %s for object %s. Cred signer %s not the trusted authority for Cred target %s" % (self.gidCaller.get_urn(), self.gidObject.get_urn(), root_cred_signer.get_hrn(), root_target_gid.get_hrn()))
+
+
+    ##
+    # -- For Delegates (credentials with parents) verify that:
+    # . The privileges must be a subset of the parent credentials
+    # . The privileges must have "can_delegate" set for each delegated privilege
+    # . The target gid must be the same between child and parents
+    # . The expiry time on the child must be no later than the parent
+    # . The signer of the child must be the owner of the parent        
+    def verify_parent(self, parent_cred):
+        # make sure the rights given to the child are a subset of the
+        # parents rights (and check delegate bits)
+        if not parent_cred.get_privileges().is_superset(self.get_privileges()):
+            raise ChildRightsNotSubsetOfParent(("Parent cred ref %s rights " % parent_cred.get_refid()) +
+                self.parent.get_privileges().save_to_string() + (" not superset of delegated cred %s ref %s rights " % (self.get_summary_tostring(), self.get_refid())) +
+                self.get_privileges().save_to_string())
+
+        # make sure my target gid is the same as the parent's
+        if not parent_cred.get_gid_object().save_to_string() == \
+           self.get_gid_object().save_to_string():
+            raise CredentialNotVerifiable("Delegated cred %s: Target gid not equal between parent and child. Parent %s" % (self.get_summary_tostring(), parent_cred.get_summary_tostring()))
+
+        # make sure my expiry time is <= my parent's
+        if not parent_cred.get_expiration() >= self.get_expiration():
+            raise CredentialNotVerifiable("Delegated credential %s expires after parent %s" % (self.get_summary_tostring(), parent_cred.get_summary_tostring()))
+
+        # make sure my signer is the parent's caller
+        if not parent_cred.get_gid_caller().save_to_string(False) == \
+           self.get_signature().get_issuer_gid().save_to_string(False):
+            raise CredentialNotVerifiable("Delegated credential %s not signed by parent %s's caller" % (self.get_summary_tostring(), parent_cred.get_summary_tostring()))
+                
+        # Recurse
+        if parent_cred.parent:
+            parent_cred.verify_parent(parent_cred.parent)
+
+
+    def delegate(self, delegee_gidfile, caller_keyfile, caller_gidfile):
+        """
+        Return a delegated copy of this credential, delegated to the 
+        specified gid's user.    
+        """
+        # get the gid of the object we are delegating
+        object_gid = self.get_gid_object()
+        object_hrn = object_gid.get_hrn()        
+        # the hrn of the user who will be delegated to
+        delegee_gid = GID(filename=delegee_gidfile)
+        delegee_hrn = delegee_gid.get_hrn()
+  
+        #user_key = Keypair(filename=keyfile)
+        #user_hrn = self.get_gid_caller().get_hrn()
+        subject_string = "%s delegated to %s" % (object_hrn, delegee_hrn)
+        dcred = Credential(subject=subject_string)
+        dcred.set_gid_caller(delegee_gid)
+        dcred.set_gid_object(object_gid)
+        dcred.set_parent(self)
+        dcred.set_expiration(self.get_expiration())
+        dcred.set_privileges(self.get_privileges())
+        dcred.get_privileges().delegate_all_privileges(True)
+        #dcred.set_issuer_keys(keyfile, delegee_gidfile)
+        dcred.set_issuer_keys(caller_keyfile, caller_gidfile)
+        dcred.encode()
+        dcred.sign()
+
+        return dcred
+
+    # only informative
+    def get_filename(self):
+        return getattr(self,'filename',None)
+
+    ##
+    # Dump the contents of a credential to stdout in human-readable format
+    #
+    # @param dump_parents If true, also dump the parent certificates
+    def dump (self, *args, **kwargs):
+        print self.dump_string(*args, **kwargs)
+
+
+    def dump_string(self, dump_parents=False):
+        result=""
+        result += "CREDENTIAL %s\n" % self.get_subject()
+        filename=self.get_filename()
+        if filename: result += "Filename %s\n"%filename
+        result += "      privs: %s\n" % self.get_privileges().save_to_string()
+        gidCaller = self.get_gid_caller()
+        if gidCaller:
+            result += "  gidCaller:\n"
+            result += gidCaller.dump_string(8, dump_parents)
+
+        if self.get_signature():
+            print "  gidIssuer:"
+            self.get_signature().get_issuer_gid().dump(8, dump_parents)
+
+        gidObject = self.get_gid_object()
+        if gidObject:
+            result += "  gidObject:\n"
+            result += gidObject.dump_string(8, dump_parents)
+
+        if self.parent and dump_parents:
+            result += "\nPARENT"
+            result += self.parent.dump_string(True)
+
+        return result
index 0bc88f6..a684d3e 100644 (file)
@@ -1,6 +1,6 @@
 # sfa should not depend on sfatables
 # if the sfatables.runtime import fails, just define run_sfatables as identity
-
+import sys
 try:
     from sfatables.runtime import SFATablesRules
 
@@ -27,7 +27,7 @@ try:
         """
         if not context_callback:
             context_callback = fetch_context
-
+    
         chain = chain.upper()
         rules = SFATablesRules(chain)
         if rules.sorted_rule_list:
index 4a47b58..985b571 100644 (file)
-#----------------------------------------------------------------------\r
-# Copyright (c) 2008 Board of Trustees, Princeton University\r
-#\r
-# Permission is hereby granted, free of charge, to any person obtaining\r
-# a copy of this software and/or hardware specification (the "Work") to\r
-# deal in the Work without restriction, including without limitation the\r
-# rights to use, copy, modify, merge, publish, distribute, sublicense,\r
-# and/or sell copies of the Work, and to permit persons to whom the Work\r
-# is furnished to do so, subject to the following conditions:\r
-#\r
-# The above copyright notice and this permission notice shall be\r
-# included in all copies or substantial portions of the Work.\r
-#\r
-# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS \r
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT \r
-# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
-# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS \r
-# IN THE WORK.\r
-#----------------------------------------------------------------------\r
-\r
-import re\r
-\r
-from sfa.util.faults import SfaAPIError\r
-\r
-# for convenience and smoother translation - we should get rid of these functions eventually \r
-def get_leaf(hrn): return Xrn(hrn).get_leaf()\r
-def get_authority(hrn): return Xrn(hrn).get_authority_hrn()\r
-def urn_to_hrn(urn): xrn=Xrn(urn); return (xrn.hrn, xrn.type)\r
-def hrn_to_urn(hrn,type): return Xrn(hrn, type=type).urn\r
-def hrn_authfor_hrn(parenthrn, hrn): return Xrn.hrn_is_auth_for_hrn(parenthrn, hrn)\r
-\r
-def urn_to_sliver_id(urn, slice_id, node_id, index=0, authority=None):\r
-    return Xrn(urn).get_sliver_id(slice_id, node_id, index, authority)\r
-\r
-class Xrn:\r
-\r
-    ########## basic tools on HRNs\r
-    # split a HRN-like string into pieces\r
-    # this is like split('.') except for escaped (backslashed) dots\r
-    # e.g. hrn_split ('a\.b.c.d') -> [ 'a\.b','c','d']\r
-    @staticmethod\r
-    def hrn_split(hrn):\r
-        return [ x.replace('--sep--','\\.') for x in hrn.replace('\\.','--sep--').split('.') ]\r
-\r
-    # e.g. hrn_leaf ('a\.b.c.d') -> 'd'\r
-    @staticmethod\r
-    def hrn_leaf(hrn): return Xrn.hrn_split(hrn)[-1]\r
-\r
-    # e.g. hrn_auth_list ('a\.b.c.d') -> ['a\.b', 'c']\r
-    @staticmethod\r
-    def hrn_auth_list(hrn): return Xrn.hrn_split(hrn)[0:-1]\r
-    \r
-    # e.g. hrn_auth ('a\.b.c.d') -> 'a\.b.c'\r
-    @staticmethod\r
-    def hrn_auth(hrn): return '.'.join(Xrn.hrn_auth_list(hrn))\r
-    \r
-    # e.g. escape ('a.b') -> 'a\.b'\r
-    @staticmethod\r
-    def escape(token): return re.sub(r'([^\\])\.', r'\1\.', token)\r
-\r
-    # e.g. unescape ('a\.b') -> 'a.b'\r
-    @staticmethod\r
-    def unescape(token): return token.replace('\\.','.')\r
-\r
-    # Return the HRN authority chain from top to bottom.\r
-    # e.g. hrn_auth_chain('a\.b.c.d') -> ['a\.b', 'a\.b.c']\r
-    @staticmethod\r
-    def hrn_auth_chain(hrn):\r
-        parts = Xrn.hrn_auth_list(hrn)\r
-        chain = []\r
-        for i in range(len(parts)):\r
-            chain.append('.'.join(parts[:i+1]))\r
-        # Include the HRN itself?\r
-        #chain.append(hrn)\r
-        return chain\r
-\r
-    # Is the given HRN a true authority over the namespace of the other\r
-    # child HRN?\r
-    # A better alternative than childHRN.startswith(parentHRN)\r
-    # e.g. hrn_is_auth_for_hrn('a\.b', 'a\.b.c.d') -> True,\r
-    # but hrn_is_auth_for_hrn('a', 'a\.b.c.d') -> False\r
-    # Also hrn_is_auth_for_hrn('a\.b.c.d', 'a\.b.c.d') -> True\r
-    @staticmethod\r
-    def hrn_is_auth_for_hrn(parenthrn, hrn):\r
-        if parenthrn == hrn:\r
-            return True\r
-        for auth in Xrn.hrn_auth_chain(hrn):\r
-            if parenthrn == auth:\r
-                return True\r
-        return False\r
-\r
-    ########## basic tools on URNs\r
-    URN_PREFIX = "urn:publicid:IDN"\r
-    URN_PREFIX_lower = "urn:publicid:idn"\r
-\r
-    @staticmethod\r
-    def is_urn (text):\r
-        return text.lower().startswith(Xrn.URN_PREFIX_lower)\r
-\r
-    @staticmethod\r
-    def urn_full (urn):\r
-        if Xrn.is_urn(urn): return urn\r
-        else: return Xrn.URN_PREFIX+urn\r
-    @staticmethod\r
-    def urn_meaningful (urn):\r
-        if Xrn.is_urn(urn): return urn[len(Xrn.URN_PREFIX):]\r
-        else: return urn\r
-    @staticmethod\r
-    def urn_split (urn):\r
-        return Xrn.urn_meaningful(urn).split('+')\r
-\r
-    ####################\r
-    # the local fields that are kept consistent\r
-    # self.urn\r
-    # self.hrn\r
-    # self.type\r
-    # self.path\r
-    # provide either urn, or (hrn + type)\r
-    def __init__ (self, xrn, type=None):\r
-        if not xrn: xrn = ""\r
-        # user has specified xrn : guess if urn or hrn\r
-        if Xrn.is_urn(xrn):\r
-            self.hrn=None\r
-            self.urn=xrn\r
-            self.urn_to_hrn()\r
-        else:\r
-            self.urn=None\r
-            self.hrn=xrn\r
-            self.type=type\r
-            self.hrn_to_urn()\r
-        self._normalize()\r
-# happens all the time ..\r
-#        if not type:\r
-#            debug_logger.debug("type-less Xrn's are not safe")\r
-\r
-    def __repr__ (self):\r
-        result="<XRN u=%s h=%s"%(self.urn,self.hrn)\r
-        if hasattr(self,'leaf'): result += " leaf=%s"%self.leaf\r
-        if hasattr(self,'authority'): result += " auth=%s"%self.authority\r
-        result += ">"\r
-        return result\r
-\r
-    def get_urn(self): return self.urn\r
-    def get_hrn(self): return self.hrn\r
-    def get_type(self): return self.type\r
-    def get_hrn_type(self): return (self.hrn, self.type)\r
-\r
-    def _normalize(self):\r
-        if self.hrn is None: raise SfaAPIError, "Xrn._normalize"\r
-        if not hasattr(self,'leaf'): \r
-            self.leaf=Xrn.hrn_split(self.hrn)[-1]\r
-        # self.authority keeps a list\r
-        if not hasattr(self,'authority'): \r
-            self.authority=Xrn.hrn_auth_list(self.hrn)\r
-\r
-    def get_leaf(self):\r
-        self._normalize()\r
-        return self.leaf\r
-\r
-    def get_authority_hrn(self):\r
-        self._normalize()\r
-        return '.'.join( self.authority )\r
-    \r
-    def get_authority_urn(self): \r
-        self._normalize()\r
-        return ':'.join( [Xrn.unescape(x) for x in self.authority] )\r
-\r
-    def get_sliver_id(self, slice_id, node_id=None, index=0, authority=None):\r
-        self._normalize()\r
-        urn = self.get_urn()\r
-        if authority:\r
-            authority_hrn = self.get_authority_hrn()\r
-            if not authority_hrn.startswith(authority):\r
-                hrn = ".".join([authority,authority_hrn, self.get_leaf()])\r
-            else:\r
-                hrn = ".".join([authority_hrn, self.get_leaf()])\r
-            urn = Xrn(hrn, self.get_type()).get_urn()\r
-        parts = [part for part in [urn, slice_id, node_id, index] if part is not None]\r
-        return ":".join(map(str, [parts]))\r
-\r
-    def urn_to_hrn(self):\r
-        """\r
-        compute tuple (hrn, type) from urn\r
-        """\r
-        \r
-#        if not self.urn or not self.urn.startswith(Xrn.URN_PREFIX):\r
-        if not Xrn.is_urn(self.urn):\r
-            raise SfaAPIError, "Xrn.urn_to_hrn"\r
-\r
-        parts = Xrn.urn_split(self.urn)\r
-        type=parts.pop(2)\r
-        # Remove the authority name (e.g. '.sa')\r
-        if type == 'authority':\r
-            name = parts.pop()\r
-            # Drop the sa. This is a bad hack, but its either this\r
-            # or completely change how record types are generated/stored   \r
-            if name != 'sa':\r
-                type = type + "+" + name\r
-            name =""\r
-        else:\r
-            name = parts.pop(len(parts)-1)\r
-        # convert parts (list) into hrn (str) by doing the following\r
-        # 1. remove blank parts\r
-        # 2. escape dots inside parts\r
-        # 3. replace ':' with '.' inside parts\r
-        # 3. join parts using '.'\r
-        hrn = '.'.join([Xrn.escape(part).replace(':','.') for part in parts if part])\r
-        # dont replace ':' in the name section\r
-        if name:\r
-            hrn += '.%s' % Xrn.escape(name) \r
-\r
-        self.hrn=str(hrn)\r
-        self.type=str(type)\r
-    \r
-    def hrn_to_urn(self):\r
-        """\r
-        compute urn from (hrn, type)\r
-        """\r
-\r
-#        if not self.hrn or self.hrn.startswith(Xrn.URN_PREFIX):\r
-        if Xrn.is_urn(self.hrn):\r
-            raise SfaAPIError, "Xrn.hrn_to_urn, hrn=%s"%self.hrn\r
-\r
-        if self.type and self.type.startswith('authority'):\r
-            self.authority = Xrn.hrn_auth_list(self.hrn)\r
-            leaf = self.get_leaf()\r
-            #if not self.authority:\r
-            #    self.authority = [self.hrn]\r
-            type_parts = self.type.split("+")\r
-            self.type = type_parts[0]\r
-            name = 'sa'\r
-            if len(type_parts) > 1:\r
-                name = type_parts[1]\r
-            auth_parts = [part for part in [self.get_authority_urn(), leaf] if part]\r
-            authority_string = ":".join(auth_parts)\r
-        else:\r
-            self.authority = Xrn.hrn_auth_list(self.hrn)\r
-            name = Xrn.hrn_leaf(self.hrn)\r
-            authority_string = self.get_authority_urn()\r
-\r
-        if self.type == None:\r
-            urn = "+".join(['',authority_string,Xrn.unescape(name)])\r
-        else:\r
-            urn = "+".join(['',authority_string,self.type,Xrn.unescape(name)])\r
-        \r
-        self.urn = Xrn.URN_PREFIX + urn\r
-\r
-    def dump_string(self):\r
-        result="-------------------- XRN\n"\r
-        result += "URN=%s\n"%self.urn\r
-        result += "HRN=%s\n"%self.hrn\r
-        result += "TYPE=%s\n"%self.type\r
-        result += "LEAF=%s\n"%self.get_leaf()\r
-        result += "AUTH(hrn format)=%s\n"%self.get_authority_hrn()\r
-        result += "AUTH(urn format)=%s\n"%self.get_authority_urn()\r
-        return result\r
-        \r
+#----------------------------------------------------------------------
+# Copyright (c) 2008 Board of Trustees, Princeton University
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and/or hardware specification (the "Work") to
+# deal in the Work without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Work, and to permit persons to whom the Work
+# is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Work.
+#
+# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 
+# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS 
+# IN THE WORK.
+#----------------------------------------------------------------------
+
+import re
+
+from sfa.util.faults import SfaAPIError
+
+# for convenience and smoother translation - we should get rid of these functions eventually 
+def get_leaf(hrn): return Xrn(hrn).get_leaf()
+def get_authority(hrn): return Xrn(hrn).get_authority_hrn()
+def urn_to_hrn(urn): xrn=Xrn(urn); return (xrn.hrn, xrn.type)
+def hrn_to_urn(hrn,type): return Xrn(hrn, type=type).urn
+def hrn_authfor_hrn(parenthrn, hrn): return Xrn.hrn_is_auth_for_hrn(parenthrn, hrn)
+
+def urn_to_sliver_id(urn, slice_id, node_id, index=0, authority=None):
+    return Xrn(urn).get_sliver_id(slice_id, node_id, index, authority)
+
+class Xrn:
+
+    ########## basic tools on HRNs
+    # split a HRN-like string into pieces
+    # this is like split('.') except for escaped (backslashed) dots
+    # e.g. hrn_split ('a\.b.c.d') -> [ 'a\.b','c','d']
+    @staticmethod
+    def hrn_split(hrn):
+        return [ x.replace('--sep--','\\.') for x in hrn.replace('\\.','--sep--').split('.') ]
+
+    # e.g. hrn_leaf ('a\.b.c.d') -> 'd'
+    @staticmethod
+    def hrn_leaf(hrn): return Xrn.hrn_split(hrn)[-1]
+
+    # e.g. hrn_auth_list ('a\.b.c.d') -> ['a\.b', 'c']
+    @staticmethod
+    def hrn_auth_list(hrn): return Xrn.hrn_split(hrn)[0:-1]
+    
+    # e.g. hrn_auth ('a\.b.c.d') -> 'a\.b.c'
+    @staticmethod
+    def hrn_auth(hrn): return '.'.join(Xrn.hrn_auth_list(hrn))
+    
+    # e.g. escape ('a.b') -> 'a\.b'
+    @staticmethod
+    def escape(token): return re.sub(r'([^\\])\.', r'\1\.', token)
+
+    # e.g. unescape ('a\.b') -> 'a.b'
+    @staticmethod
+    def unescape(token): return token.replace('\\.','.')
+
+    # Return the HRN authority chain from top to bottom.
+    # e.g. hrn_auth_chain('a\.b.c.d') -> ['a\.b', 'a\.b.c']
+    @staticmethod
+    def hrn_auth_chain(hrn):
+        parts = Xrn.hrn_auth_list(hrn)
+        chain = []
+        for i in range(len(parts)):
+            chain.append('.'.join(parts[:i+1]))
+        # Include the HRN itself?
+        #chain.append(hrn)
+        return chain
+
+    # Is the given HRN a true authority over the namespace of the other
+    # child HRN?
+    # A better alternative than childHRN.startswith(parentHRN)
+    # e.g. hrn_is_auth_for_hrn('a\.b', 'a\.b.c.d') -> True,
+    # but hrn_is_auth_for_hrn('a', 'a\.b.c.d') -> False
+    # Also hrn_is_auth_for_hrn('a\.b.c.d', 'a\.b.c.d') -> True
+    @staticmethod
+    def hrn_is_auth_for_hrn(parenthrn, hrn):
+        if parenthrn == hrn:
+            return True
+        for auth in Xrn.hrn_auth_chain(hrn):
+            if parenthrn == auth:
+                return True
+        return False
+
+    ########## basic tools on URNs
+    URN_PREFIX = "urn:publicid:IDN"
+    URN_PREFIX_lower = "urn:publicid:idn"
+
+    @staticmethod
+    def is_urn (text):
+        return text.lower().startswith(Xrn.URN_PREFIX_lower)
+
+    @staticmethod
+    def urn_full (urn):
+        if Xrn.is_urn(urn): return urn
+        else: return Xrn.URN_PREFIX+urn
+    @staticmethod
+    def urn_meaningful (urn):
+        if Xrn.is_urn(urn): return urn[len(Xrn.URN_PREFIX):]
+        else: return urn
+    @staticmethod
+    def urn_split (urn):
+        return Xrn.urn_meaningful(urn).split('+')
+
+    ####################
+    # the local fields that are kept consistent
+    # self.urn
+    # self.hrn
+    # self.type
+    # self.path
+    # provide either urn, or (hrn + type)
+    def __init__ (self, xrn, type=None):
+        if not xrn: xrn = ""
+        # user has specified xrn : guess if urn or hrn
+        if Xrn.is_urn(xrn):
+            self.hrn=None
+            self.urn=xrn
+            self.urn_to_hrn()
+        else:
+            self.urn=None
+            self.hrn=xrn
+            self.type=type
+            self.hrn_to_urn()
+        self._normalize()
+# happens all the time ..
+#        if not type:
+#            debug_logger.debug("type-less Xrn's are not safe")
+
+    def __repr__ (self):
+        result="<XRN u=%s h=%s"%(self.urn,self.hrn)
+        if hasattr(self,'leaf'): result += " leaf=%s"%self.leaf
+        if hasattr(self,'authority'): result += " auth=%s"%self.authority
+        result += ">"
+        return result
+
+    def get_urn(self): return self.urn
+    def get_hrn(self): return self.hrn
+    def get_type(self): return self.type
+    def get_hrn_type(self): return (self.hrn, self.type)
+
+    def _normalize(self):
+        if self.hrn is None: raise SfaAPIError, "Xrn._normalize"
+        if not hasattr(self,'leaf'): 
+            self.leaf=Xrn.hrn_split(self.hrn)[-1]
+        # self.authority keeps a list
+        if not hasattr(self,'authority'): 
+            self.authority=Xrn.hrn_auth_list(self.hrn)
+
+    def get_leaf(self):
+        self._normalize()
+        return self.leaf
+
+    def get_authority_hrn(self):
+        self._normalize()
+        return '.'.join( self.authority )
+    
+    def get_authority_urn(self): 
+        self._normalize()
+        return ':'.join( [Xrn.unescape(x) for x in self.authority] )
+
+    def get_sliver_id(self, slice_id, node_id=None, index=0, authority=None):
+        self._normalize()
+        urn = self.get_urn()
+        if authority:
+            authority_hrn = self.get_authority_hrn()
+            if not authority_hrn.startswith(authority):
+                hrn = ".".join([authority,authority_hrn, self.get_leaf()])
+            else:
+                hrn = ".".join([authority_hrn, self.get_leaf()])
+            urn = Xrn(hrn, self.get_type()).get_urn()
+        parts = [part for part in [urn, slice_id, node_id, index] if part is not None]
+        return ":".join(map(str, [parts]))
+
+    def urn_to_hrn(self):
+        """
+        compute tuple (hrn, type) from urn
+        """
+        
+#        if not self.urn or not self.urn.startswith(Xrn.URN_PREFIX):
+        if not Xrn.is_urn(self.urn):
+            raise SfaAPIError, "Xrn.urn_to_hrn"
+
+        parts = Xrn.urn_split(self.urn)
+        type=parts.pop(2)
+        # Remove the authority name (e.g. '.sa')
+        if type == 'authority':
+            name = parts.pop()
+            # Drop the sa. This is a bad hack, but its either this
+            # or completely change how record types are generated/stored   
+            if name != 'sa':
+                type = type + "+" + name
+            name =""
+        else:
+            name = parts.pop(len(parts)-1)
+        # convert parts (list) into hrn (str) by doing the following
+        # 1. remove blank parts
+        # 2. escape dots inside parts
+        # 3. replace ':' with '.' inside parts
+        # 3. join parts using '.'
+        hrn = '.'.join([Xrn.escape(part).replace(':','.') for part in parts if part])
+        # dont replace ':' in the name section
+        if name:
+            hrn += '.%s' % Xrn.escape(name) 
+
+        self.hrn=str(hrn)
+        self.type=str(type)
+    
+    def hrn_to_urn(self):
+        """
+        compute urn from (hrn, type)
+        """
+
+#        if not self.hrn or self.hrn.startswith(Xrn.URN_PREFIX):
+        if Xrn.is_urn(self.hrn):
+            raise SfaAPIError, "Xrn.hrn_to_urn, hrn=%s"%self.hrn
+
+        if self.type and self.type.startswith('authority'):
+            self.authority = Xrn.hrn_auth_list(self.hrn)
+            leaf = self.get_leaf()
+            #if not self.authority:
+            #    self.authority = [self.hrn]
+            type_parts = self.type.split("+")
+            self.type = type_parts[0]
+            name = 'sa'
+            if len(type_parts) > 1:
+                name = type_parts[1]
+            auth_parts = [part for part in [self.get_authority_urn(), leaf] if part]
+            authority_string = ":".join(auth_parts)
+        else:
+            self.authority = Xrn.hrn_auth_list(self.hrn)
+            name = Xrn.hrn_leaf(self.hrn)
+            authority_string = self.get_authority_urn()
+
+        if self.type == None:
+            urn = "+".join(['',authority_string,Xrn.unescape(name)])
+        else:
+            urn = "+".join(['',authority_string,self.type,Xrn.unescape(name)])
+        
+        self.urn = Xrn.URN_PREFIX + urn
+
+    def dump_string(self):
+        result="-------------------- XRN\n"
+        result += "URN=%s\n"%self.urn
+        result += "HRN=%s\n"%self.hrn
+        result += "TYPE=%s\n"%self.type
+        result += "LEAF=%s\n"%self.get_leaf()
+        result += "AUTH(hrn format)=%s\n"%self.get_authority_hrn()
+        result += "AUTH(urn format)=%s\n"%self.get_authority_urn()
+        return result
+        
index f681205..7f97a7f 100755 (executable)
@@ -1,4 +1,5 @@
 #!/usr/bin/python
+# just checking write access on repo
 import sys
 import unittest