'sfa/managers',
'sfa/importer',
'sfa/plc',
+ 'sfa/senslab',
'sfa/rspecs',
'sfa/rspecs/elements',
'sfa/rspecs/elements/versions',
-
+import sys
def pg_users_arg(records):
users = []
for record in records:
def sfa_users_arg(records, slice_record):
users = []
+ print>>sys.stderr, " \r\n \r\n \t CLIENT_HELPER.PY sfa_users_arg slice_record %s \r\n records %s"%(slice_record,records)
for record in records:
if record['type'] != 'user':
continue
- user = {'urn': record['geni_urn'], #
- 'keys': record['keys'],
- 'email': record['email'], # needed for MyPLC
- 'person_id': record['person_id'], # needed for MyPLC
- 'first_name': record['first_name'], # needed for MyPLC
- 'last_name': record['last_name'], # needed for MyPLC
+ user = {#'urn': record['geni_urn'],
+ #'keys': record['keys'],
+ #'email': record['email'], # needed for MyPLC
+ 'person_id': record['record_id'],
+ 'hrn': record['hrn'],
+ 'type': record['type'],
+ 'authority' : record['authority'],
+ 'gid' : record['gid'],
+ #'first_name': record['first_name'], # needed for MyPLC
+ #'last_name': record['last_name'], # needed for MyPLC
'slice_record': slice_record, # needed for legacy refresh peer
- 'key_ids': record['key_ids'] # needed for legacy refresh peer
+ #'key_ids': record['key_ids'] # needed for legacy refresh peer
}
- users.append(user)
+ users.append(user)
+ print>>sys.stderr, " \r\n \r\n \t CLIENT_HELPER.PY sfa_users_arg user %s",user
return users
def sfa_to_pg_users_arg(users):
rspec.filter({'component_manager_id': server_version['urn']})
rspec = RSpecConverter.to_pg_rspec(rspec.toxml(), content_type='request')
else:
+ print >>sys.stderr, "\r\n \r\n \r\n WOOOOOO"
users = sfa_users_arg(user_records, slice_record)
# do not append users, keys, or slice tags. Anything
--- /dev/null
+from sfa.generic import Generic
+
+import sfa.server.sfaapi
+
+
+
+class slab (Generic):
+
+ # use the standard api class
+ def api_class (self):
+ return sfa.server.sfaapi.SfaApi
+
+ # the importer class
+ def importer_class (self):
+ import sfa.importer.slabimporter
+ return sfa.importer.slabimporter.SlabImporter
+
+ # the manager classes for the server-side services
+ def registry_manager_class (self) :
+ import sfa.managers.registry_manager
+ return sfa.managers.registry_manager.RegistryManager
+
+ def slicemgr_manager_class (self) :
+ import sfa.managers.slice_manager
+ return sfa.managers.slice_manager.SliceManager
+
+ def aggregate_manager_class (self) :
+ import sfa.managers.aggregate_manager
+ return sfa.managers.aggregate_manager.AggregateManager
+
+ # driver class for server-side services, talk to the whole testbed
+ def driver_class (self):
+ import sfa.senslab.slabdriver
+ return sfa.senslab.slabdriver.SlabDriver
+
+ # slab does not have a component manager yet
+ # manager class
+ def component_manager_class (self):
+ return None
+ # driver_class
+ def component_driver_class (self):
+ return None
+
+
--- /dev/null
+import os
+import sys
+import datetime
+import time
+
+from sfa.util.config import Config
+from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
+from sfa.util.plxrn import PlXrn, slicename_to_hrn, email_to_hrn, hrn_to_pl_slicename
+
+from sfa.senslab.LDAPapi import LDAPapi
+from sfa.senslab.slabdriver import SlabDriver
+from sfa.senslab.slabpostgres import SlabSliceDB, slab_dbsession
+
+from sfa.trust.certificate import Keypair,convert_public_key
+from sfa.trust.gid import create_uuid
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegAuthority, RegSlice, RegNode, RegUser, RegKey
+from sfa.storage.dbschema import DBSchema
+
+
+
+def _get_site_hrn(site):
+ hrn = site['name']
+ return hrn
+
+class SlabImporter:
+
+ def __init__ (self, auth_hierarchy, logger):
+ self.auth_hierarchy = auth_hierarchy
+ self.logger=logger
+
+
+ def hostname_to_hrn(self,root_auth,login_base,hostname):
+ return PlXrn(auth=root_auth,hostname=login_base+'_'+hostname).get_hrn()
+
+ def slicename_to_hrn(self, person_hrn):
+ return (person_hrn +'_slice')
+
+ def add_options (self, parser):
+ # we don't have any options for now
+ pass
+
+ def find_record_by_type_hrn(self,type,hrn):
+ return self.records_by_type_hrn.get ( (type, hrn), None)
+
+ def locate_by_type_pointer (self, type, pointer):
+ print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES locate_by_type_pointer .........................."
+ ret = self.records_by_type_pointer.get ( (type, pointer), None)
+ print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES locate_by_type_pointer "
+ return ret
+
+ def update_just_added_records_dict (self, record):
+ tuple = (record.type, record.hrn)
+ if tuple in self.records_by_type_hrn:
+ self.logger.warning ("SlabImporter.update_just_added_records_dict: duplicate (%s,%s)"%tuple)
+ return
+ self.records_by_type_hrn [ tuple ] = record
+
+ def run (self, options):
+ config = Config()
+
+ slabdriver = SlabDriver(config)
+
+ #Create special slice table for senslab
+
+ if not slabdriver.db.exists('slice_senslab'):
+ slabdriver.db.createtable('slice_senslab')
+ print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES CREATETABLE YAAAAAAAAAAY"
+ ######## retrieve all existing SFA objects
+ all_records = dbsession.query(RegRecord).all()
+ #print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES all_records %s" %(all_records)
+ #create hash by (type,hrn)
+ #used to know if a given record is already known to SFA
+
+ self.records_by_type_hrn = \
+ dict ( [ ( (record.type,record.hrn) , record ) for record in all_records ] )
+
+ # create hash by (type,pointer)
+ self.records_by_type_pointer = \
+ dict ( [ ( (str(record.type),record.pointer) , record ) for record in all_records if record.pointer != -1] )
+ print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES self.records_by_type_pointer %s" %( self.records_by_type_pointer)
+ # initialize record.stale to True by default, then mark stale=False on the ones that are in use
+ for record in all_records:
+ record.stale=True
+
+ nodes_listdict = slabdriver.GetNodes()
+ nodes_by_id = dict([(node['node_id'],node) for node in nodes_listdict])
+ sites_listdict = slabdriver.GetSites()
+
+ ldap_person_listdict = slabdriver.GetPersons()
+ slices_listdict = slabdriver.GetSlices()
+ try:
+ slices_by_userid = dict ( [ (slice.record_id_user, slice ) for slice in slices_listdict ] )
+ except TypeError:
+ print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES slices_listdict EMPTY "
+ pass
+ #print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES slices_by_userid %s" %( slices_by_userid)
+ for site in sites_listdict:
+ site_hrn = _get_site_hrn(site)
+ site_record = self.find_record_by_type_hrn ('authority', site_hrn)
+ if not site_record:
+ try:
+ urn = hrn_to_urn(site_hrn, 'authority')
+ if not self.auth_hierarchy.auth_exists(urn):
+ self.auth_hierarchy.create_auth(urn)
+ auth_info = self.auth_hierarchy.get_auth_info(urn)
+ site_record = RegAuthority(hrn=site_hrn, gid=auth_info.get_gid_object(),
+ pointer='-1',
+ authority=get_authority(site_hrn))
+ site_record.just_created()
+ dbsession.add(site_record)
+ dbsession.commit()
+ self.logger.info("SlabImporter: imported authority (site) : %s" % site_record)
+ self.update_just_added_records_dict(site_record)
+ except:
+ # if the site import fails then there is no point in trying to import the
+ # site's child records (node, slices, persons), so skip them.
+ self.logger.log_exc("SlabImporter: failed to import site. Skipping child records")
+ continue
+ else:
+ # xxx update the record ...
+ pass
+ site_record.stale=False
+
+ # import node records in site
+ for node_id in site['node_ids']:
+ try:
+ node = nodes_by_id[node_id]
+ except:
+ self.logger.warning ("SlabImporter: cannot find node_id %s - ignored"%node_id)
+ continue
+ site_auth = get_authority(site_hrn)
+ site_name = site['name']
+ hrn = self.hostname_to_hrn(slabdriver.root_auth, site_name, node['hostname'])
+ # xxx this sounds suspicious
+ if len(hrn) > 64: hrn = hrn[:64]
+ node_record = self.find_record_by_type_hrn( 'node', hrn )
+ #print >>sys.stderr, " \r\n \r\n SLAB IMPORTER node_record %s " %(node_record)
+ if not node_record:
+ try:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(hrn, 'node')
+ #print>>sys.stderr, "\r\n \r\n SLAB IMPORTER NODE IMPORT urn %s hrn %s" %(urn, hrn)
+ node_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ node_record = RegNode (hrn=hrn, gid=node_gid,
+ pointer =node['node_id'],
+ authority=get_authority(hrn))
+ node_record.just_created()
+ dbsession.add(node_record)
+ dbsession.commit()
+ self.logger.info("SlabImporter: imported node: %s" % node_record)
+ print>>sys.stderr, "\r\n \t\t\t SLAB IMPORTER NODE IMPORT NOTnode_record %s " %(node_record)
+ self.update_just_added_records_dict(node_record)
+ except:
+ self.logger.log_exc("SlabImporter: failed to import node")
+ else:
+ # xxx update the record ...
+ pass
+ node_record.stale=False
+
+
+ # import persons
+ for person in ldap_person_listdict :
+
+ person_hrn = person['hrn']
+ slice_hrn = self.slicename_to_hrn(person['hrn'])
+
+ # xxx suspicious again
+ if len(person_hrn) > 64: person_hrn = person_hrn[:64]
+ person_urn = hrn_to_urn(person_hrn, 'user')
+
+ user_record = self.find_record_by_type_hrn( 'user', person_hrn)
+ slice_record = self.find_record_by_type_hrn ('slice', slice_hrn)
+ print>>sys.stderr, "\r\n \r\n SLAB IMPORTER FROM LDAP LIST PERSON IMPORT user_record %s " %(user_record)
+
+
+ # return a tuple pubkey (a plc key object) and pkey (a Keypair object)
+ def init_person_key (person, slab_key):
+ pubkey=None
+ if person['pkey']:
+ # randomly pick first key in set
+ pubkey = slab_key
+ try:
+ pkey = convert_public_key(pubkey)
+ except:
+ self.logger.warn('SlabImporter: unable to convert public key for %s' % person_hrn)
+ pkey = Keypair(create=True)
+ else:
+ # the user has no keys. Creating a random keypair for the user's gid
+ self.logger.warn("SlabImporter: person %s does not have a PL public key"%person_hrn)
+ pkey = Keypair(create=True)
+ return (pubkey, pkey)
+
+
+ try:
+ slab_key = person['pkey']
+ # new person
+ if not user_record:
+ (pubkey,pkey) = init_person_key (person, slab_key )
+ person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey)
+ if person['email']:
+ print>>sys.stderr, "\r\n \r\n SLAB IMPORTER PERSON EMAIL OK email %s " %(person['email'])
+ person_gid.set_email(person['email'])
+ user_record = RegUser (hrn=person_hrn, gid=person_gid,
+ pointer='-1',
+ authority=get_authority(person_hrn),
+ email=person['email'])
+ else:
+ user_record = RegUser (hrn=person_hrn, gid=person_gid,
+ pointer='-1',
+ authority=get_authority(person_hrn))
+
+ if pubkey:
+ user_record.reg_keys=[RegKey (pubkey)]
+ else:
+ self.logger.warning("No key found for user %s"%user_record)
+ user_record.just_created()
+ dbsession.add (user_record)
+ dbsession.commit()
+ self.logger.info("SlabImporter: imported person: %s" % user_record)
+ print>>sys.stderr, "\r\n \r\n SLAB IMPORTER PERSON IMPORT NOTuser_record %s " %(user_record)
+ self.update_just_added_records_dict( user_record )
+ else:
+ # update the record ?
+ # if user's primary key has changed then we need to update the
+ # users gid by forcing an update here
+ sfa_keys = user_record.reg_keys
+
+ new_key=False
+ if slab_key is not sfa_keys :
+ new_key = True
+ if new_key:
+ (pubkey,pkey) = init_person_key (person, slab_key)
+ person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey)
+ if not pubkey:
+ user_record.reg_keys=[]
+ else:
+ user_record.reg_keys=[ RegKey (pubkey)]
+ self.logger.info("SlabImporter: updated person: %s" % user_record)
+ if person['email']:
+ user_record.email = person['email']
+ dbsession.commit()
+ user_record.stale=False
+ except:
+ self.logger.log_exc("SlabImporter: failed to import person %s"%(person) )
+
+ try:
+ slice = slices_by_userid[user_record.record_id]
+ except:
+ self.logger.warning ("SlabImporter: cannot locate slices_by_userid[user_record.record_id] %s - ignored"%user_record.record_id )
+ if not slice_record:
+
+ try:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(slice_hrn, 'slice')
+ slice_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ slice_record = RegSlice (hrn=slice_hrn, gid=slice_gid,
+ pointer='-1',
+ authority=get_authority(slice_hrn))
+
+ slice_record.just_created()
+ dbsession.add(slice_record)
+ dbsession.commit()
+
+ #Serial id created after commit
+ #Get it
+ sl_rec = dbsession.query(RegSlice).filter(RegSlice.hrn.match(slice_hrn)).all()
+
+ slab_slice = SlabSliceDB( slice_hrn = slice_hrn, record_id_slice=sl_rec[0].record_id, record_id_user= user_record.record_id)
+ print>>sys.stderr, "\r\n \r\n SLAB IMPORTER SLICE IMPORT NOTslice_record %s \r\n slab_slice %s" %(sl_rec,slab_slice)
+ slab_dbsession.add(slab_slice)
+ slab_dbsession.commit()
+ self.logger.info("SlabImporter: imported slice: %s" % slice_record)
+ self.update_just_added_records_dict ( slice_record )
+ except:
+ self.logger.log_exc("SlabImporter: failed to import slice")
+
+ #No slice update upon import in senslab
+ else:
+ # xxx update the record ...
+ self.logger.warning ("Slice update not yet implemented")
+ pass
+ # record current users affiliated with the slice
+
+ slice_record.reg_researchers = [user_record]
+ dbsession.commit()
+ slice_record.stale=False
+
+
+
+ ### remove stale records
+ # special records must be preserved
+ system_hrns = [slabdriver.hrn, slabdriver.root_auth, slabdriver.hrn+ '.slicemanager']
+ for record in all_records:
+ if record.hrn in system_hrns:
+ record.stale=False
+ if record.peer_authority:
+ record.stale=False
+
+
+ for record in all_records:
+ try:
+ stale=record.stale
+ except:
+ stale=True
+ self.logger.warning("stale not found with %s"%record)
+ if stale:
+ self.logger.info("SlabImporter: deleting stale record: %s" % record)
+ dbsession.delete(record)
+ dbsession.commit()
+
+
+
\ No newline at end of file
# an attempt to document what a driver class should provide,
# and implement reasonable defaults
#
-
+import sys
class Driver:
def __init__ (self, config):
# this constraint, based on the principle that SFA should not rely on the
# testbed database to perform such a core operation (i.e. getting rights right)
def augment_records_with_testbed_info (self, sfa_records):
+ print >>sys.stderr, " \r\n \r\n DRIVER.PY augment_records_with_testbed_info sfa_records ",sfa_records
return sfa_records
# incoming record, as provided by the client to the Register API call
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="RSpec"/>
+ </start>
+ <define name="RSpec">
+ <element name="RSpec">
+ <attribute name="type">
+ <data type="NMTOKEN"/>
+ </attribute>
+ <choice>
+ <ref name="network"/>
+ <ref name="request"/>
+ </choice>
+ </element>
+ </define>
+ <define name="network">
+ <element name="network">
+ <attribute name="name">
+ <data type="NMTOKEN"/>
+ </attribute>
+ <optional>
+ <attribute name="slice">
+ <data type="NMTOKEN"/>
+ </attribute>
+ </optional>
+ <optional>
+ <ref name="sliver_defaults"/>
+ </optional>
+ <oneOrMore>
+ <ref name="site"/>
+ </oneOrMore>
+ </element>
+ </define>
+ <define name="sliver_defaults">
+ <element name="sliver_defaults">
+ <ref name="sliver_elements"/>
+ </element>
+ </define>
+ <define name="site">
+ <element name="site">
+ <attribute name="id">
+ <data type="ID"/>
+ </attribute>
+ <element name="name">
+ <text/>
+ </element>
+ <zeroOrMore>
+ <ref name="node"/>
+ </zeroOrMore>
+ </element>
+ </define>
+ <define name="node">
+ <element name="node">
+ <attribute name="node_id">
+ <data type="ID"/>
+ </attribute>
+ <element name="hostname">
+ <text/>
+ </element>
+ <attribute name="reservable">
+ <data type="boolean"/>
+ </attribute>
+ <element name="ip_address">
+ <text/>
+ </element>
+ <optional>
+ <element name="urn">
+ <text/>
+ </element>
+ </optional>
+ <optional>
+ <ref name="leases"/>
+ </optional>
+ <optional>
+ <ref name="sliver"/>
+ </optional>
+ </element>
+ </define>
+ <define name="request">
+ <element name="request">
+ <attribute name="name">
+ <data type="NMTOKEN"/>
+ </attribute>
+ <optional>
+ <ref name="sliver_defaults"/>
+ </optional>
+ <oneOrMore>
+ <ref name="sliver"/>
+ </oneOrMore>
+ </element>
+ </define>
+ <define name="sliver">
+ <element name="sliver">
+ <optional>
+ <attribute name="nodeid">
+ <data type="ID"/>
+ </attribute>
+ </optional>
+ <ref name="sliver_elements"/>
+ </element>
+ </define>
+ <define name="sliver_elements">
+ <interleave>
+ <optional>
+ <element name="capabilities">
+ <text/>
+ </element>
+ </optional>
+ <optional>
+ <element name="delegations">
+ <text/>
+ </element>
+ </optional>
+ <optional>
+ <element name="program">
+ <text/>
+ </element>
+ </optional>
+ </interleave>
+ </define>
+ <define name="leases">
+ <element name="leases">
+ <zeroOrMore>
+ <group>
+ <attribute name="slot"/>
+ <data type="dateTime"/>
+ </attribute>
+ <attribute name="slice">
+ <data type="NMTOKEN"/>
+ </attribute>
+ </group>
+ </zeroOrMore>
+</grammar>
from sfa.util.xrn import urn_to_hrn
from sfa.util.method import Method
from sfa.util.sfatablesRuntime import run_sfatables
+import sys
from sfa.trust.credential import Credential
from sfa.storage.parameter import Parameter, Mixed
from sfa.rspecs.rspec import RSpec
hrn, type = urn_to_hrn(slice_xrn)
self.api.logger.info("interface: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, hrn, self.name))
-
+ print >>sys.stderr, " \r\n \r\n Createsliver.py call %s\ttarget-hrn: %s\tmethod-name: %s "%(self.api.interface, hrn, self.name)
# Find the valid credentials
valid_creds = self.api.auth.checkCredentials(creds, 'createsliver', hrn)
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
import zlib
-
+import sys
from sfa.util.xrn import urn_to_hrn
from sfa.util.method import Method
from sfa.util.sfatablesRuntime import run_sfatables
# get slice's hrn from options
xrn = options.get('geni_slice_urn', '')
(hrn, _) = urn_to_hrn(xrn)
-
+ print >>sys.stderr, " \r\n \r\n \t Lsitresources.pyeuuuuuu call : hrn %s options %s" %( hrn,options )
# Find the valid credentials
valid_creds = self.api.auth.checkCredentials(creds, 'listnodes', hrn)
# get hrn of the original caller
origin_hrn = options.get('origin_hrn', None)
+ print >>sys.stderr, " \r\n \r\n \t Lsitresources :origin_hrn %s sansvqalid credss %s " %(origin_hrn, Credential(string=creds[0]).get_gid_caller().get_hrn())
if not origin_hrn:
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
+ print >>sys.stderr, " \r\n \r\n \t Lsitresources.py000 call : hrn %s self.api.interface %s origin_hrn %s \r\n \r\n \r\n " %(hrn ,self.api.interface,origin_hrn)
rspec = self.api.manager.ListResources(self.api, creds, options)
# filter rspec through sfatables
chain_name = 'OUTGOING'
elif self.api.interface in ['slicemgr']:
chain_name = 'FORWARD-OUTGOING'
- self.api.logger.debug("ListResources: sfatables on chain %s"%chain_name)
+ self.api.logger.debug("ListResources: sfatables on chain %s"%chain_name)
+ print >>sys.stderr, " \r\n \r\n \t Listresources.py001 call : chain_name %s hrn %s origine_hrn %s " %(chain_name, hrn, origin_hrn)
filtered_rspec = run_sfatables(chain_name, hrn, origin_hrn, rspec)
if options.has_key('geni_compressed') and options['geni_compressed'] == True:
from types import StringTypes
from collections import defaultdict
+import sys
from sfa.util.sfatime import utcparse, datetime_to_epoch
from sfa.util.sfalogging import logger
# slice belongs to out local plc or a myplc peer. We will assume it
# is a local site, unless we find out otherwise
peer = None
-
+ print>>sys.stderr, " \r\n \r\n \tplslices.py get_peer slice_authority "
# get this slice's authority (site)
slice_authority = get_authority(hrn)
# get this site's authority (sfa root authority or sub authority)
site_authority = get_authority(slice_authority).lower()
-
+ print>>sys.stderr, " \r\n \r\n \tplslices.py get_peer slice_authority %s site_authority %s" %(slice_authority,site_authority)
# check if we are already peered with this site_authority, if so
peers = self.driver.shell.GetPeers({}, ['peer_id', 'peername', 'shortname', 'hrn_root'])
for peer_record in peers:
--- /dev/null
+from sfa.rspecs.sfa_rspec import sfa_rspec_version
+from sfa.rspecs.pg_rspec import pg_rspec_ad_version, pg_rspec_request_version
+
+ad_rspec_versions = [
+ pg_rspec_ad_version,
+ sfa_rspec_version
+ ]
+
+request_rspec_versions = ad_rspec_versions
+
+default_rspec_version = { 'type': 'SFA', 'version': '1' }
+
+supported_rspecs = {'ad_rspec_versions': ad_rspec_versions,
+ 'request_rspec_versions': request_rspec_versions,
+ 'default_ad_rspec': default_rspec_version}
+
--- /dev/null
+
+
+
+import ldap
+from sfa.util.config import *
+from sfa.trust.gid import *
+from sfa.trust.hierarchy import *
+from sfa.trust.auth import *
+from sfa.trust.certificate import *
+
+class LDAPapi :
+ def __init__(self, record_filter = None):
+ self.ldapserv=ldap.open("192.168.0.251")
+ self.senslabauth=Hierarchy()
+ config=Config()
+ self.authname=config.SFA_REGISTRY_ROOT_AUTH
+ authinfo=self.senslabauth.get_auth_info(self.authname)
+
+ self.auth=Auth()
+ gid=authinfo.get_gid_object()
+ self.ldapdictlist = ['type',
+ 'pkey',
+ 'uid',
+ 'serial',
+ 'authority',
+ 'peer_authority',
+ 'pointer' ,
+ 'hrn']
+
+ def ldapFind(self, record_filter = None, columns=None):
+
+ results = []
+
+ if 'authority' in record_filter:
+ # ask for authority
+ if record_filter['authority']==self.authname:
+ # which is SFA_REGISTRY_ROOT_AUTH
+ # request all records which are under our authority, ie all ldap entries
+ ldapfilter="cn=*"
+ else:
+ #which is NOT SFA_REGISTRY_ROOT_AUTH
+ return []
+ else :
+ if not 'hrn' in record_filter:
+ print >>sys.stderr,"find : don't know how to handle filter ",record_filter
+ return []
+ else:
+ hrns=[]
+ h=record_filter['hrn']
+ if isinstance(h,list):
+ hrns=h
+ else :
+ hrns.append(h)
+
+ ldapfilter="(|"
+ for hrn in hrns:
+ splited_hrn=hrn.split(".")
+ if splited_hrn[0] != self.authname :
+ print >>sys.stderr,"i know nothing about",hrn, " my authname is ", self.authname, " not ", splited_hrn[0]
+ else :
+ login=splited_hrn[1]
+ ldapfilter+="(uid="
+ ldapfilter+=login
+ ldapfilter+=")"
+ ldapfilter+=")"
+
+
+ rindex=self.ldapserv.search("ou=people,dc=senslab,dc=info",ldap.SCOPE_SUBTREE,ldapfilter, ['mail','givenName', 'sn', 'uid','sshPublicKey'])
+ ldapresponse=self.ldapserv.result(rindex,1)
+ for ldapentry in ldapresponse[1]:
+ #print>>sys.stderr, " \r\n \t LDAP : ! mail ldapentry[1]['mail'][0] %s " %(ldapentry[1]['mail'][0])
+
+ tmpname = ldapentry[1]['uid'][0]
+
+ if ldapentry[1]['uid'][0] == "savakian":
+ tmpname = 'avakian'
+
+ hrn=self.authname+"."+ tmpname
+
+ tmpemail = ldapentry[1]['mail'][0]
+ if ldapentry[1]['mail'][0] == "unknown":
+ tmpemail = None
+# uuid=create_uuid()
+
+# RSA_KEY_STRING=ldapentry[1]['sshPublicKey'][0]
+
+# pkey=convert_public_key(RSA_KEY_STRING)
+
+# gid=self.senslabauth.create_gid("urn:publicid:IDN+"+self.authname+"+user+"+ldapentry[1]['uid'][0], uuid, pkey, CA=False)
+
+ parent_hrn = get_authority(hrn)
+ parent_auth_info = self.senslabauth.get_auth_info(parent_hrn)
+
+ results.append( {
+ 'type': 'user',
+ 'pkey': ldapentry[1]['sshPublicKey'][0],
+ #'uid': ldapentry[1]['uid'][0],
+ 'uid': tmpname ,
+ 'email':tmpemail,
+ #'email': ldapentry[1]['mail'][0],
+ 'first_name': ldapentry[1]['givenName'][0],
+ 'last_name': ldapentry[1]['sn'][0],
+# 'phone': 'none',
+ 'serial': 'none',
+ 'authority': self.authname,
+ 'peer_authority': '',
+ 'pointer' : -1,
+ 'hrn': hrn,
+ } )
+ return results
--- /dev/null
+# import modules used here -- sys is a very standard one
+import sys
+import httplib
+import json
+import datetime
+from time import gmtime, strftime
+from sfa.senslab.parsing import *
+#from sfa.senslab.SenslabImportUsers import *
+import urllib
+import urllib2
+from sfa.util.config import Config
+from sfa.util.plxrn import PlXrn
+from sfa.util.xrn import hrn_to_urn, get_authority,Xrn,get_leaf
+
+from sfa.util.config import Config
+
+#OARIP='10.127.255.254'
+OARIP='192.168.0.109'
+
+
+OARrequests_list = ["GET_version", "GET_timezone", "GET_jobs", "GET_jobs_table", "GET_jobs_details",
+"GET_resources_full", "GET_resources"]
+
+OARrequests_uri_list = ['/oarapi/version.json','/oarapi/timezone.json', '/oarapi/jobs.json',
+'/oarapi/jobs/details.json', '/oarapi/resources/full.json', '/oarapi/resources.json']
+
+OARrequests_get_uri_dict = { 'GET_version': '/oarapi/version.json',
+ 'GET_timezone':'/oarapi/timezone.json' ,
+ 'GET_jobs': '/oarapi/jobs.json',
+ 'GET_jobs_id': '/oarapi/jobs/id.json',
+ 'GET_jobs_id_resources': '/oarapi/jobs/id/resources.json',
+ 'GET_resources_id': '/oarapi/resources/id.json',
+ 'GET_jobs_table': '/oarapi/jobs/table.json',
+ 'GET_jobs_details': '/oarapi/jobs/details.json',
+ 'GET_resources_full': '/oarapi/resources/full.json',
+ 'GET_resources':'/oarapi/resources.json',
+ 'GET_sites' : '/oarapi/resources/full.json',
+
+
+}
+
+OARrequest_post_uri_dict = {
+ 'POST_job':{'uri': '/oarapi/jobs.json'},
+ 'DELETE_jobs_id':{'uri':'/oarapi/jobs/id.json'},}
+
+POSTformat = { #'yaml': {'content':"text/yaml", 'object':yaml}
+'json' : {'content':"application/json",'object':json},
+#'http': {'content':"applicaton/x-www-form-urlencoded",'object': html},
+}
+
+OARpostdatareqfields = {'resource' :"/nodes=", 'command':"sleep", 'workdir':"/home/", 'walltime':""}
+
+class OARrestapi:
+ def __init__(self):
+ self.oarserver= {}
+ self.oarserver['ip'] = OARIP
+ self.oarserver['port'] = 80
+ self.oarserver['uri'] = None
+ self.oarserver['postformat'] = 'json'
+
+ self.jobstates = ["Terminated", "Running", "Error", "Waiting", "Launching","Hold"]
+
+ self.parser = OARGETParser(self)
+
+
+ def GETRequestToOARRestAPI(self, request, strval=None , username = None ):
+ self.oarserver['uri'] = OARrequests_get_uri_dict[request]
+ headers = {}
+ data = json.dumps({})
+ if strval:
+ self.oarserver['uri'] = self.oarserver['uri'].replace("id",str(strval))
+ print>>sys.stderr, "\r\n \r\n GETRequestToOARRestAPI replace : self.oarserver['uri'] %s", self.oarserver['uri']
+ if username:
+ headers['X-REMOTE_IDENT'] = username
+ try :
+ #headers = {'X-REMOTE_IDENT':'avakian',\
+ #'content-length':'0'}
+ headers['content-length'] = '0' #seems that it does not work if we don't add this
+
+ #conn = httplib.HTTPConnection(self.oarserver['ip'],self.oarserver['port'])
+ #conn.putheader(headers)
+ #conn.endheaders()
+ #conn.putrequest("GET",self.oarserver['uri'] )
+ conn = httplib.HTTPConnection(self.oarserver['ip'],self.oarserver['port'])
+
+ conn.request("GET",self.oarserver['uri'],data , headers )
+ resp = ( conn.getresponse()).read()
+ conn.close()
+ except:
+ raise ServerError("GET_OAR_SRVR : Could not reach OARserver")
+ try:
+ js = json.loads(resp)
+
+ if strval:
+ print>>sys.stderr, " \r\n \r\n \t GETRequestToOARRestAPI strval %s js %s" %(strval,js)
+ return js
+
+ except ValueError:
+ raise ServerError("Failed to parse Server Response:" + js)
+
+
+ def POSTRequestToOARRestAPI(self, request, datadict, username=None):
+ #first check that all params for are OK
+
+ print>>sys.stderr, " \r\n \r\n POSTRequestToOARRestAPI username",username
+ try:
+ self.oarserver['uri'] = OARrequest_post_uri_dict[request]['uri']
+ print>>sys.stderr, " \r\n \r\n POSTRequestToOARRestAPI rq %s datadict %s " % ( self.oarserver['uri'] ,datadict)
+
+ except:
+ print>>sys.stderr, " \r\n \r\n POSTRequestToOARRestAPI request not in OARrequest_post_uri_dict"
+ return
+ try:
+ print>>sys.stderr, " \r\n \r\n POSTRequestToOARRestAPI %s " %( 'strval' in datadict)
+ if datadict and 'strval' in datadict:
+ self.oarserver['uri'] = self.oarserver['uri'].replace("id",str(datadict['strval']))
+ print>>sys.stderr, " \r\n \r\n POSTRequestToOARRestAPI REPLACE OK %s"%(self.oarserver['uri'])
+ del datadict['strval']
+ print>>sys.stderr, " \r\n \r\n \t POSTRequestToOARRestAPI datadict %s rq %s" %(datadict, self.oarserver['uri'] )
+ except:
+ print>>sys.stderr, " \r\n \r\n POSTRequestToOARRestAPI ERRRRRORRRRRR "
+ return
+ #if format in POSTformat:
+ #if format is 'json':
+ data = json.dumps(datadict)
+ headers = {'X-REMOTE_IDENT':username,\
+ 'content-type':POSTformat['json']['content'],\
+ 'content-length':str(len(data))}
+ try :
+ #self.oarserver['postformat'] = POSTformat[format]
+
+ print>>sys.stderr, "\r\n POSTRequestToOARRestAPI headers %s uri %s" %(headers,self.oarserver['uri'])
+ conn = httplib.HTTPConnection(self.oarserver['ip'],self.oarserver['port'])
+ conn.request("POST",self.oarserver['uri'],data,headers )
+ resp = ( conn.getresponse()).read()
+ conn.close()
+
+ #conn = httplib.HTTPConnection(self.oarserver['ip'],self.oarserver['port'])
+ #conn.putrequest("POST",self.oarserver['uri'] )
+ #self.oarserver['postformat'] = POSTformat[format]
+ #conn.putheader('HTTP X-REMOTE_IDENT', 'avakian')
+ #conn.putheader('content-type', self.oarserver['postformat']['content'])
+ #conn.putheader('content-length', str(len(data)))
+ #conn.endheaders()
+ #conn.send(data)
+ #resp = ( conn.getresponse()).read()
+ #conn.close()
+
+ except:
+ print>>sys.stderr, "\r\n POSTRequestToOARRestAPI ERROR: data %s \r\n \t\n \t\t headers %s uri %s" %(data,headers,self.oarserver['uri'])
+ #raise ServerError("POST_OAR_SRVR : error")
+
+ try:
+ answer = json.loads(resp)
+ print>>sys.stderr, "\r\n POSTRequestToOARRestAPI : ", answer
+ return answer
+
+ except ValueError:
+ raise ServerError("Failed to parse Server Response:" + answer)
+
+
+ #def createjobrequest(self, nodelist):
+ #datadict = dict(zip(self.OARpostdatareqfields.keys(), self.OARpostdatareqfields.values())
+ #for k in datadict:
+ #if k is 'resource':
+ #for node in nodelist:
+ #datadict[k] += str(nodelist)
+
+
+class OARGETParser:
+
+ #Insert a new node into the dictnode dictionary
+ def AddNodeId(self,dictnode,value):
+ #Inserts new key. The value associated is a tuple list.
+ node_id = int(value)
+ dictnode[node_id] = [('node_id',node_id) ]
+ return node_id
+
+ def AddNodeNetworkAddr(self,tuplelist,value):
+ tuplelist.append(('hostname',str(value)))
+
+
+ def AddNodeSite(self,tuplelist,value):
+ tuplelist.append(('site',str(value)))
+
+
+ def AddNodeRadio(self,tuplelist,value):
+ tuplelist.append(('radio',str(value)))
+
+
+ def AddMobility(self,tuplelist,value):
+ if value :
+ tuplelist.append(('mobile',int(value)))
+ return 0
+
+
+ def AddPosX(self,tuplelist,value):
+ tuplelist.append(('posx',value))
+
+
+ def AddPosY(self,tuplelist,value):
+ tuplelist.append(('posy',value))
+
+ def AddBootState(self,tuplelist,value):
+ tuplelist.append(('boot_state',str(value)))
+
+ def ParseVersion(self) :
+ #print self.raw_json
+ #print >>sys.stderr, self.raw_json
+ if 'oar_version' in self.raw_json :
+ self.version_json_dict.update(api_version=self.raw_json['api_version'] ,
+ apilib_version=self.raw_json['apilib_version'],
+ api_timezone=self.raw_json['api_timezone'],
+ api_timestamp=self.raw_json['api_timestamp'],
+ oar_version=self.raw_json['oar_version'] )
+ else :
+ self.version_json_dict.update(api_version=self.raw_json['api'] ,
+ apilib_version=self.raw_json['apilib'],
+ api_timezone=self.raw_json['api_timezone'],
+ api_timestamp=self.raw_json['api_timestamp'],
+ oar_version=self.raw_json['oar'] )
+
+ print self.version_json_dict['apilib_version']
+
+
+ def ParseTimezone(self) :
+ api_timestamp=self.raw_json['api_timestamp']
+ #readable_time = strftime("%Y-%m-%d %H:%M:%S", gmtime(float(api_timestamp)))
+
+ return api_timestamp
+
+ def ParseJobs(self) :
+ self.jobs_list = []
+ print " ParseJobs "
+
+ def ParseJobsTable(self) :
+ print "ParseJobsTable"
+
+ def ParseJobsDetails (self):
+
+ print >>sys.stderr,"ParseJobsDetails %s " %(self.raw_json)
+
+
+ def ParseJobsIds(self):
+
+ job_resources =['assigned_network_address', 'assigned_resources','Job_Id', 'scheduledStart','state','job_user', 'startTime','walltime','message']
+ job_resources_full = ['Job_Id', 'scheduledStart', 'resubmit_job_id', 'owner', 'submissionTime', 'message', 'id', 'jobType', 'queue', 'launchingDirectory', 'exit_code', 'state', 'array_index', 'events', 'assigned_network_address', 'cpuset_name', 'initial_request', 'job_user', 'assigned_resources', 'array_id', 'job_id', 'resources_uri', 'dependencies', 'api_timestamp', 'startTime', 'reservation', 'properties', 'types', 'walltime', 'name', 'uri', 'wanted_resources', 'project', 'command']
+
+ job_info = self.raw_json
+
+ values=[]
+ try:
+ for k in job_resources:
+ values.append(job_info[k])
+ return dict(zip(job_resources,values))
+
+ except KeyError:
+ print>>sys.stderr, " \r\n \t ParseJobsIds Key Error"
+
+
+
+
+ def ParseJobsIdResources(self):
+ print>>sys.stderr, "ParseJobsIdResources"
+
+ def ParseResources(self) :
+ print>>sys.stderr, " \r\n \t\t\t ParseResources__________________________ "
+ #resources are listed inside the 'items' list from the json
+ self.raw_json = self.raw_json['items']
+ self.ParseNodes()
+
+
+ def ParseDeleteJobs(self):
+ return
+
+ def ParseResourcesFull(self ) :
+ print>>sys.stderr, " \r\n \t\t\t ParseResourcesFull_____________________________ "
+ #print self.raw_json[1]
+ #resources are listed inside the 'items' list from the json
+ if self.version_json_dict['apilib_version'] != "0.2.10" :
+ self.raw_json = self.raw_json['items']
+ self.ParseNodes()
+ self.ParseSites()
+ return self.node_dictlist
+
+ def ParseResourcesFullSites(self ) :
+ if self.version_json_dict['apilib_version'] != "0.2.10" :
+ self.raw_json = self.raw_json['items']
+ self.ParseNodes()
+ self.ParseSites()
+ return self.site_dict
+
+
+ resources_fulljson_dict= {
+ 'resource_id' : AddNodeId,
+ 'network_address' : AddNodeNetworkAddr,
+ 'site': AddNodeSite,
+ 'radio': AddNodeRadio,
+ 'mobile': AddMobility,
+ 'posx': AddPosX,
+ 'posy': AddPosY,
+ 'state':AddBootState,
+ }
+
+
+ #Parse nodes properties from OAR
+ #Put them into a dictionary with key = node id and value is a dictionary
+ #of the node properties and properties'values.
+ def ParseNodes(self):
+ node_id = None
+ #print >>sys.stderr, " \r\n \r\n \t\t OARrestapi.py ParseNodes self.raw_json %s" %(self.raw_json)
+ for dictline in self.raw_json:
+ #print >>sys.stderr, " \r\n \r\n \t\t OARrestapi.py ParseNodes dictline %s hey" %(dictline)
+ for k in dictline:
+ if k in self.resources_fulljson_dict:
+ # dictionary is empty and/or a new node has to be inserted
+ if node_id is None :
+ node_id = self.resources_fulljson_dict[k](self,self.node_dictlist, dictline[k])
+ else:
+ ret = self.resources_fulljson_dict[k](self,self.node_dictlist[node_id], dictline[k])
+
+ #If last property has been inserted in the property tuple list, reset node_id
+ if ret == 0:
+ #Turn the property tuple list (=dict value) into a dictionary
+ self.node_dictlist[node_id] = dict(self.node_dictlist[node_id])
+ node_id = None
+
+ else:
+ pass
+
+ def hostname_to_hrn(self, root_auth, login_base, hostname):
+ return PlXrn(auth=root_auth,hostname=login_base+'_'+hostname).get_hrn()
+ #Retourne liste de dictionnaires contenant attributs des sites
+ def ParseSites(self):
+ nodes_per_site = {}
+ config = Config()
+ # Create a list of nodes per site_id
+ for node_id in self.node_dictlist.keys():
+ node = self.node_dictlist[node_id]
+ if node['site'] not in nodes_per_site:
+ nodes_per_site[node['site']] = []
+ nodes_per_site[node['site']].append(node['node_id'])
+ else:
+ if node['node_id'] not in nodes_per_site[node['site']]:
+ nodes_per_site[node['site']].append(node['node_id'])
+
+ #Create a site dictionary with key is site_login_base (name of the site)
+ # and value is a dictionary of properties, including the list of the node_ids
+ for node_id in self.node_dictlist.keys():
+ node = self.node_dictlist[node_id]
+ node.update({'hrn':self.hostname_to_hrn(self.interface_hrn, node['site'],node['hostname'])})
+ #node['hrn'] = self.hostname_to_hrn(self.interface_hrn, node['site_login_base'],node['hostname'])
+ self.node_dictlist.update({node_id:node})
+ #if node_id is 1:
+ #print>>sys.stderr, " \r\n \r\n \t \t\t\t OARESTAPI Parse Sites self.node_dictlist %s " %(self.node_dictlist)
+ if node['site'] not in self.site_dict:
+ self.site_dict[node['site']] = {'site':node['site'],
+ 'node_ids':nodes_per_site[node['site']],
+ 'latitude':"48.83726",
+ 'longitude':"- 2.10336",'name':config.SFA_REGISTRY_ROOT_AUTH,
+ 'pcu_ids':[], 'max_slices':None, 'ext_consortium_id':None,
+ 'max_slivers':None, 'is_public':True, 'peer_site_id': None,
+ 'abbreviated_name':"senslab", 'address_ids': [],
+ 'url':"http,//www.senslab.info", 'person_ids':[],
+ 'site_tag_ids':[], 'enabled': True, 'slice_ids':[],
+ 'date_created': None, 'peer_id': None }
+ #if node['site_login_base'] not in self.site_dict.keys():
+ #self.site_dict[node['site_login_base']] = {'login_base':node['site_login_base'],
+ #'node_ids':nodes_per_site[node['site_login_base']],
+ #'latitude':"48.83726",
+ #'longitude':"- 2.10336",'name':"senslab",
+ #'pcu_ids':[], 'max_slices':None, 'ext_consortium_id':None,
+ #'max_slivers':None, 'is_public':True, 'peer_site_id': None,
+ #'abbreviated_name':"senslab", 'address_ids': [],
+ #'url':"http,//www.senslab.info", 'person_ids':[],
+ #'site_tag_ids':[], 'enabled': True, 'slice_ids':[],
+ #'date_created': None, 'peer_id': None }
+
+
+
+
+ OARrequests_uri_dict = {
+ 'GET_version': {'uri':'/oarapi/version.json', 'parse_func': ParseVersion},
+ 'GET_timezone':{'uri':'/oarapi/timezone.json' ,'parse_func': ParseTimezone },
+ 'GET_jobs': {'uri':'/oarapi/jobs.json','parse_func': ParseJobs},
+ 'GET_jobs_id': {'uri':'/oarapi/jobs/id.json','parse_func': ParseJobsIds},
+ 'GET_jobs_id_resources': {'uri':'/oarapi/jobs/id/resources.json','parse_func': ParseJobsIdResources},
+ 'GET_jobs_table': {'uri':'/oarapi/jobs/table.json','parse_func': ParseJobsTable},
+ 'GET_jobs_details': {'uri':'/oarapi/jobs/details.json','parse_func': ParseJobsDetails},
+ 'GET_resources_full': {'uri':'/oarapi/resources/full.json','parse_func': ParseResourcesFull},
+ 'GET_sites':{'uri':'/oarapi/resources/full.json','parse_func': ParseResourcesFullSites},
+ 'GET_resources':{'uri':'/oarapi/resources.json' ,'parse_func': ParseResources},
+ 'DELETE_jobs_id':{'uri':'/oarapi/jobs/id.json' ,'parse_func': ParseDeleteJobs}
+ }
+
+
+ def __init__(self, srv ):
+ self.version_json_dict= { 'api_version' : None , 'apilib_version' :None, 'api_timezone': None, 'api_timestamp': None, 'oar_version': None ,}
+ self.config = Config()
+ self.interface_hrn = self.config.SFA_INTERFACE_HRN
+ self.timezone_json_dict = { 'timezone': None, 'api_timestamp': None, }
+ self.jobs_json_dict = { 'total' : None, 'links' : [] , 'offset':None , 'items' : [] , }
+ self.jobs_table_json_dict = self.jobs_json_dict
+ self.jobs_details_json_dict = self.jobs_json_dict
+ self.server = srv
+ self.node_dictlist = {}
+
+ self.site_dict = {}
+ self.SendRequest("GET_version")
+
+ def SendRequest(self,request, strval = None , username = None):
+ if request in OARrequests_get_uri_dict:
+ self.raw_json = self.server.GETRequestToOARRestAPI(request,strval,username)
+ #print>>sys.stderr, "\r\n OARGetParse __init__ : request %s result %s "%(request,self.raw_json)
+ return self.OARrequests_uri_dict[request]['parse_func'](self)
+ else:
+ print>>sys.stderr, "\r\n OARGetParse __init__ : ERROR_REQUEST " ,request
+
+
+
\ No newline at end of file
--- /dev/null
+
+# import modules used here -- sys is a very standard one
+import sys
+import httplib
+import json
+from collections import defaultdict
+
+def strip_dictionnary (dict_to_strip):
+ stripped_filter = []
+ stripped_filterdict = {}
+ for f in dict_to_strip :
+ stripped_filter.append(str(f).strip('|'))
+
+ stripped_filterdict = dict(zip(stripped_filter, dict_to_strip.values()))
+
+ return stripped_filterdict
+
+
+def filter_return_fields( dict_to_filter, return_fields):
+ filtered_dict = {}
+ print>>sys.stderr, " \r\n \t \tfilter_return_fields return fields %s " %(return_fields)
+ for field in return_fields:
+ #print>>sys.stderr, " \r\n \t \tfield %s " %(field)
+ if field in dict_to_filter:
+ filtered_dict[field] = dict_to_filter[field]
+ print>>sys.stderr, " \r\n \t\t filter_return_fields filtered_dict %s " %(filtered_dict)
+ return filtered_dict
+
+
+
+def parse_filter(list_to_filter, param_filter, type_of_list, return_fields=None) :
+ list_type = { 'persons': {'str': 'hrn','int':'record_id'},\
+ 'keys':{'int':'key_id'},\
+ 'site':{'str':'login_base','int':'site_id'},\
+ 'node':{'str':'hostname','int':'node_id'},\
+ 'slice':{'str':'slice_hrn','int':'record_id_slice'},\
+ 'peers':{'str':'hrn'}}
+
+ print>>sys.stderr, " \r\n ___ parse_filter param_filter %s type %s return fields %s " %(param_filter,type_of_list, return_fields)
+ if param_filter is None and return_fields is None:
+ return list_to_filter
+
+ if type_of_list not in list_type:
+ print>>sys.stderr, " \r\n type_of_list Error parse_filter %s " %(type_of_list)
+ return []
+
+ return_filtered_list= []
+
+ for item in list_to_filter:
+ tmp_item = {}
+
+ if type(param_filter) is list :
+ #print>>sys.stderr, " \r\n p_filter LIST %s " %(param_filter)
+
+ for p_filter in param_filter:
+ #print>>sys.stderr, " \r\n p_filter %s \t item %s " %(p_filter,item)
+ if type(p_filter) is int:
+ if item[list_type[type_of_list]['int']] == p_filter :
+ if return_fields:
+ tmp_item = filter_return_fields(item,return_fields)
+ else:
+ tmp_item = item
+ return_filtered_list.append(tmp_item)
+ #print>>sys.stderr, " \r\n 1tmp_item",tmp_item
+
+ if type(p_filter) is str:
+ if item[list_type[type_of_list]['str']] == str(p_filter) :
+ print>>sys.stderr, " \r\n p_filter %s \t item %s "%(p_filter,item[list_type[type_of_list]['str']])
+ if return_fields:
+ tmp_item = filter_return_fields(item,return_fields)
+ else:
+ tmp_item = item
+ return_filtered_list.append(tmp_item)
+ #print>>sys.stderr, " \r\n 2tmp_item",tmp_item
+
+
+ elif type(param_filter) is dict:
+ #stripped_filterdict = strip_dictionnary(param_filter)
+ #tmp_copy = {}
+ #tmp_copy = item.copy()
+ #print>>sys.stderr, " \r\n \t\t ________tmp_copy %s " %(tmp_copy)
+ #key_list = tmp_copy.keys()
+ #for key in key_list:
+ #print>>sys.stderr, " \r\n \t\t key %s " %(key)
+ #if key not in stripped_filterdict:
+ #del tmp_copy[key]
+
+ #rif the item matches the filter, returns it
+ founditem = []
+ check = [ True for k in param_filter.keys() if 'id' in k ]
+ dflt= defaultdict(str,param_filter)
+
+
+
+ #founditem = [ item for k in dflt if item[k] in dflt[k]]
+ for k in dflt:
+ if item[k] in dflt[k]:
+ founditem = [item]
+
+ if founditem:
+ if return_fields:
+ print>>sys.stderr, " \r\n \r\n parsing.py param_filter dflt %s founditem %s " %(dflt, founditem)
+ tmp_item = filter_return_fields(founditem[0],return_fields)
+ else:
+ tmp_item = founditem[0]
+ return_filtered_list.append(tmp_item)
+
+ #print>>sys.stderr, " \r\n tmp_copy %s param_filter %s cmp = %s " %(tmp_copy, param_filter,cmp(tmp_copy, stripped_filterdict))
+
+ #if cmp(tmp_copy, stripped_filterdict) == 0:
+ #if return_fields:
+ #tmp_item = filter_return_fields(item,return_fields)
+ #else:
+
+ #tmp_item = item
+ #return_filtered_list.append(tmp_item)
+ if return_filtered_list :
+ return return_filtered_list
+
\ No newline at end of file
--- /dev/null
+#!/bin/bash
+#
+# sfa starts sfa service
+#
+# chkconfig: 2345 61 39
+#
+# description: starts sfa service
+#
+
+# Source config
+[ -f /etc/sfa/sfa_config ] && . /etc/sfa/sfa_config
+
+# source function library
+. /etc/init.d/functions
+
+start() {
+
+ if [ "$SFA_REGISTRY_ENABLED" -eq 1 ]; then
+ action $"SFA Registry" daemon /usr/bin/sfa-server.py -r -d $OPTIONS
+ fi
+
+ if [ "$SFA_AGGREGATE_ENABLED" -eq 1 ]; then
+ action $"SFA Aggregate" daemon /usr/bin/sfa-server.py -a -d $OPTIONS
+ fi
+
+ if [ "$SFA_SM_ENABLED" -eq 1 ]; then
+ action "SFA SliceMgr" daemon /usr/bin/sfa-server.py -s -d $OPTIONS
+ fi
+
+ if [ "$SFA_FLASHPOLICY_ENABLED" -eq 1 ]; then
+ action "Flash Policy Server" daemon /usr/bin/sfa_flashpolicy.py --file="$SFA_FLASHPOLICY_CONFIG_FILE" --port=$SFA_FLASHPOLICY_PORT -d
+ fi
+
+ RETVAL=$?
+ [ $RETVAL -eq 0 ] && touch /var/lock/subsys/sfa-server.py
+
+}
+
+stop() {
+ action $"Shutting down SFA" killproc sfa-server.py
+ RETVAL=$?
+
+ [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/sfa-server.py
+}
+
+
+case "$1" in
+ start) start ;;
+ stop) stop ;;
+ reload) reload force ;;
+ restart) stop; start ;;
+ condrestart)
+ if [ -f /var/lock/subsys/sfa-server.py ]; then
+ stop
+ start
+ fi
+ ;;
+ status)
+ status sfa-server.py
+ RETVAL=$?
+ ;;
+ *)
+ echo $"Usage: $0 {start|stop|reload|restart|condrestart|status}"
+ exit 1
+ ;;
+esac
+
+exit $RETVAL
+
--- /dev/null
+
+#!/usr/bin/python
+
+# import modules used here -- sys is a very standard one
+import sys
+import httplib
+import json
+
+
+from sfa.rspecs.version_manager import VersionManager
+from sfa.senslab.OARrestapi import *
+#from sfa.senslab.slabdriver import SlabDriver
+from sfa.util.config import Config
+from sfa.util.xrn import hrn_to_urn, urn_to_hrn, urn_to_sliver_id
+from sfa.util.plxrn import PlXrn, hostname_to_urn, hrn_to_pl_slicename
+
+from sfa.rspecs.rspec import RSpec
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.node import Node
+#from sfa.rspecs.elements.link import Link
+from sfa.rspecs.elements.sliver import Sliver
+#from sfa.rspecs.elements.login import Login
+#from sfa.rspecs.elements.location import Location
+#from sfa.rspecs.elements.interface import Interface
+#from sfa.rspecs.elements.services import Services
+#from sfa.rspecs.elements.pltag import PLTag
+
+from sfa.rspecs.version_manager import VersionManager
+#from sfa.plc.vlink import get_tc_rate
+from sfa.util.sfatime import datetime_to_epoch
+
+def hostname_to_hrn(root_auth,login_base,hostname):
+ return PlXrn(auth=root_auth,hostname=login_base+'_'+hostname).get_hrn()
+
+class SlabAggregate:
+
+
+ sites = {}
+ nodes = {}
+ api = None
+ interfaces = {}
+ links = {}
+ node_tags = {}
+
+ prepared=False
+
+ user_options = {}
+
+ def __init__(self ,driver):
+ #self.OARImporter = OARapi()
+ self.driver = driver
+ #self.api = api
+ print >>sys.stderr,"\r\n \r\n \t\t_____________INIT Slabaggregate api : %s" %(driver)
+
+
+ def get_slice_and_slivers(self, slice_xrn):
+ """
+ Returns a dict of slivers keyed on the sliver's node_id
+ """
+ slivers = {}
+ slice = None
+ if not slice_xrn:
+ return (slice, slivers)
+ slice_urn = hrn_to_urn(slice_xrn, 'slice')
+ slice_hrn, _ = urn_to_hrn(slice_xrn)
+ slice_name = slice_hrn
+ print >>sys.stderr,"\r\n \r\n \t\t_____________ Slabaggregate api get_slice_and_slivers "
+ slices = self.driver.GetSlices({'slice_hrn':str(slice_name)})
+ print >>sys.stderr,"\r\n \r\n \t\t_____________ Slabaggregate api get_slice_and_slivers slices %s " %(slices)
+ if not slices:
+ return (slice, slivers)
+ slice = slices[0]
+
+ # sort slivers by node id , if there is a job
+ #and therfore, node allocated to this slice
+ if slice['oar_job_id'] is not -1:
+ try:
+
+ for node_id in slice['node_ids']:
+ #node_id = self.driver.root_auth + '.' + node_id
+ sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, slice['record_id_slice'], node_id),
+ 'name': slice['slice_hrn'],
+ 'type': 'slab-vm',
+ 'tags': []})
+ slivers[node_id]= sliver
+ except KeyError:
+ print>>sys.stderr, " \r\n \t\t get_slice_and_slivers KeyError "
+ ## sort sliver attributes by node id
+ ##tags = self.driver.GetSliceTags({'slice_tag_id': slice['slice_tag_ids']})
+ ##for tag in tags:
+ ### most likely a default/global sliver attribute (node_id == None)
+ ##if tag['node_id'] not in slivers:
+ ##sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, slice['slice_id'], ""),
+ ##'name': 'slab-vm',
+ ##'tags': []})
+ ##slivers[tag['node_id']] = sliver
+ ##slivers[tag['node_id']]['tags'].append(tag)
+ print >>sys.stderr,"\r\n \r\n \t\t_____________ Slabaggregate api get_slice_and_slivers slivers %s " %(slivers)
+ return (slice, slivers)
+
+
+
+ def get_nodes(self,slice=None,slivers=[], options={}):
+ filtre = {}
+
+ nodes = self.driver.GetNodes(filtre)
+
+
+ interface_ids = []
+ tag_ids = []
+ nodes_dict = {}
+ for node in nodes:
+ nodes_dict[node['node_id']] = node
+
+ rspec_nodes = []
+ for node in nodes:
+
+ node['hostname'] = hostname_to_hrn( self.driver.root_auth,node['site_login_base'], node['hostname'])
+ rspec_node = Node()
+
+ rspec_node['component_id'] = hostname_to_urn(self.driver.root_auth, node['site_login_base'], node['hostname'])
+ rspec_node['component_name'] = node['hostname']
+ rspec_node['component_manager_id'] = hrn_to_urn(self.driver.root_auth, 'authority+sa')
+ rspec_node['authority_id'] = hrn_to_urn(PlXrn.site_hrn(self.driver.root_auth, node['site_login_base']), 'authority+sa')
+ rspec_node['boot_state'] = node['boot_state']
+ if node['posx'] and node['posy']:
+ location = Location({'longitude':node['posx'], 'latitude': node['posy']})
+ rspec_node['location'] = location
+
+ rspec_node['exclusive'] = 'True'
+ rspec_node['hardware_types']= [HardwareType({'name': 'senslab sensor node'})]
+
+ rspec_node['interfaces'] = []
+
+
+ rspec_node['tags'] = []
+
+ rspec_nodes.append(rspec_node)
+ return (rspec_nodes)
+
+ def get_nodes_and_links(self, slice=None,slivers=[], options={}):
+ # NT: the semantic of this function is not clear to me :
+ # if slice is not defined, then all the nodes should be returned
+ # if slice is defined, we should return only the nodes that are part of this slice
+ # but what is the role of the slivers parameter ?
+ # So i assume that slice['node_ids'] will be the same as slivers for us
+ filter = {}
+ tags_filter = {}
+
+ if slice :
+ if 'node_ids' in slice and slice['node_ids']:
+ #first case, a non empty slice was provided
+ filter['hostname'] = slice['node_ids']
+ tags_filter=filter.copy()
+ nodes = self.driver.GetNodes(filter['hostname'])
+ else :
+ #second case, a slice was provided, but is empty
+ nodes={}
+ else :
+ #third case, no slice was provided
+ nodes = self.driver.GetNodes()
+
+ #geni_available = options.get('geni_available')
+ #if geni_available:
+ #filter['boot_state'] = 'boot'
+ print>>sys.stderr, "\r\n \r\n \t get_nodes_and_links filter %s \r\n \r\n \t slivers %s" %(filter, slivers)
+ #filter.update({'peer_id': None})
+ #nodes = self.driver.GetNodes(filter['hostname'])
+ #print>>sys.stderr, "\r\n \r\n \t get_nodes_and_links nodes %s" %(nodes)
+
+ #site_ids = []
+ #interface_ids = []
+ #tag_ids = []
+ nodes_dict = {}
+ for node in nodes:
+ #site_ids.append(node['site_id'])
+ #interface_ids.extend(node['interface_ids'])
+ #tag_ids.extend(node['node_tag_ids'])
+ nodes_dict[node['node_id']] = node
+
+ # get sites
+ #sites_dict = self.get_sites({'site_id': site_ids})
+ # get interfaces
+ #interfaces = self.get_interfaces({'interface_id':interface_ids})
+ # get tags
+ #node_tags = self.get_node_tags(tags_filter)
+
+ #links = self.get_links(sites_dict, nodes_dict, interfaces)
+
+ rspec_nodes = []
+ for node in nodes:
+ # skip whitelisted nodes
+ #if node['slice_ids_whitelist']:
+ #if not slice or slice['slice_id'] not in node['slice_ids_whitelist']:
+ #continue
+ rspec_node = Node()
+ # xxx how to retrieve site['login_base']
+ #site_id=node['site_id']
+ #site=sites_dict[site_id]
+ rspec_node['component_id'] = hostname_to_urn(self.driver.root_auth, node['site'], node['hostname'])
+ rspec_node['component_name'] = node['hostname']
+ rspec_node['component_manager_id'] = hrn_to_urn(self.driver.root_auth, 'authority+sa')
+ #rspec_node['component_manager_id'] = Xrn(self.driver.root_auth, 'authority+sa').get_urn()
+ rspec_node['authority_id'] = hrn_to_urn(PlXrn.site_hrn(self.driver.root_auth, node['site']), 'authority+sa')
+ # do not include boot state (<available> element) in the manifest rspec
+ if not slice:
+ rspec_node['boot_state'] = node['boot_state']
+ rspec_node['exclusive'] = 'true'
+ rspec_node['hardware_types'] = [HardwareType({'name': 'slab-sensor'})]
+ # only doing this because protogeni rspec needs
+ # to advertise available initscripts
+ rspec_node['pl_initscripts'] = None
+ # add site/interface info to nodes.
+ # assumes that sites, interfaces and tags have already been prepared.
+ #site = sites_dict[node['site_id']]
+ #if site['longitude'] and site['latitude']:
+ #location = Location({'longitude': site['longitude'], 'latitude': site['latitude'], 'country': 'unknown'})
+ #rspec_node['location'] = location
+ if node['posx'] and node['posy']:
+ location = Location({'longitude':node['posx'], 'latitude': node['posy']})
+ rspec_node['location'] = location
+ rspec_node['interfaces'] = []
+ #if_count=0
+ #for if_id in node['interface_ids']:
+ #interface = Interface(interfaces[if_id])
+ #interface['ipv4'] = interface['ip']
+ #interface['component_id'] = PlXrn(auth=self.driver.hrn,
+ #interface='node%s:eth%s' % (node['node_id'], if_count)).get_urn()
+ # interfaces in the manifest need a client id
+ #if slice:
+ #interface['client_id'] = "%s:%s" % (node['node_id'], if_id)
+ #rspec_node['interfaces'].append(interface)
+ #if_count+=1
+
+ #tags = [PLTag(node_tags[tag_id]) for tag_id in node['node_tag_ids']]
+ rspec_node['tags'] = []
+ if node['hrn'] in slivers:
+ # add sliver info
+ sliver = slivers[node['node_id']]
+ rspec_node['sliver_id'] = sliver['sliver_id']
+ rspec_node['client_id'] = node['hostname']
+ rspec_node['slivers'] = [sliver]
+
+ # slivers always provide the ssh service
+ #login = Login({'authentication': 'ssh-keys', 'hostname': node['hostname'], 'port':'22', 'username': sliver['name']})
+ #service = Services({'login': login})
+ #rspec_node['services'] = [service]
+ rspec_nodes.append(rspec_node)
+ return (rspec_nodes)
+
+#from plc/aggregate.py
+ def get_rspec(self, slice_xrn=None, version = None, options={}):
+
+ rspec = None
+ version_manager = VersionManager()
+ version = version_manager.get_version(version)
+ print>>sys.stderr, " \r\n SlabAggregate \t\t get_rspec ************** version %s version_manager %s options %s \r\n" %(version,version_manager,options)
+
+ if not slice_xrn:
+ rspec_version = version_manager._get_version(version.type, version.version, 'ad')
+ else:
+ rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
+ slice, slivers = self.get_slice_and_slivers(slice_xrn)
+ #at this point sliver my be {} if no senslab job is running for this user/slice.
+ rspec = RSpec(version=rspec_version, user_options=options)
+ #if slice and 'expires' in slice:
+ #rspec.xml.set('expires', datetime_to_epoch(slice['expires']))
+ # add sliver defaults
+ #nodes, links = self.get_nodes_and_links(slice, slivers)
+ nodes = self.get_nodes_and_links(slice,slivers)
+ rspec.version.add_nodes(nodes)
+
+ #rspec.version.add_links(links)
+ default_sliver = slivers.get(None, [])
+ if default_sliver:
+ default_sliver_attribs = default_sliver.get('tags', [])
+ print>>sys.stderr, " \r\n SlabAggregate \t\t get_rspec ************** default_sliver_attribs %s \r\n" %(default_sliver_attribs)
+ for attrib in default_sliver_attribs:
+ print>>sys.stderr, " \r\n SlabAggregate \t\t get_rspec ************** attrib %s \r\n" %(attrib)
+ logger.info(attrib)
+ rspec.version.add_default_sliver_attribute(attrib['tagname'], attrib['value'])
+
+ return rspec.toxml()
--- /dev/null
+import sys
+import subprocess
+import datetime
+from time import gmtime, strftime
+
+from sfa.util.faults import MissingSfaInfo , SliverDoesNotExist
+#from sfa.util.sfatime import datetime_to_string
+from sfa.util.sfalogging import logger
+#from sfa.storage.table import SfaTable
+from sfa.util.defaultdict import defaultdict
+
+from sfa.storage.record import Record
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord
+
+
+from sfa.trust.certificate import *
+from sfa.trust.credential import *
+from sfa.trust.gid import GID
+
+from sfa.managers.driver import Driver
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec
+
+from sfa.util.xrn import hrn_to_urn, urn_to_sliver_id
+from sfa.util.plxrn import slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename
+
+## thierry: everything that is API-related (i.e. handling incoming requests)
+# is taken care of
+# SlabDriver should be really only about talking to the senslab testbed
+
+## thierry : please avoid wildcard imports :)
+from sfa.senslab.OARrestapi import OARrestapi
+from sfa.senslab.LDAPapi import LDAPapi
+#from sfa.senslab.SenslabImportUsers import SenslabImportUsers
+from sfa.senslab.parsing import parse_filter
+from sfa.senslab.slabpostgres import SlabDB, slab_dbsession,SlabSliceDB
+from sfa.senslab.slabaggregate import SlabAggregate
+from sfa.senslab.slabslices import SlabSlices
+
+def list_to_dict(recs, key):
+ """
+ convert a list of dictionaries into a dictionary keyed on the
+ specified dictionary key
+ """
+ # print>>sys.stderr, " \r\n \t\t 1list_to_dict : rec %s \r\n \t\t list_to_dict key %s" %(recs,key)
+ keys = [rec[key] for rec in recs]
+ #print>>sys.stderr, " \r\n \t\t list_to_dict : rec %s \r\n \t\t list_to_dict keys %s" %(recs,keys)
+ return dict(zip(keys, recs))
+
+# thierry : note
+# this inheritance scheme is so that the driver object can receive
+# GetNodes or GetSites sorts of calls directly
+# and thus minimize the differences in the managers with the pl version
+class SlabDriver(Driver):
+
+ def __init__(self, config):
+ Driver.__init__ (self, config)
+ self.config=config
+ self.hrn = config.SFA_INTERFACE_HRN
+
+ self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
+
+
+ print >>sys.stderr, "\r\n_____________ SFA SENSLAB DRIVER \r\n"
+ # thierry - just to not break the rest of this code
+
+
+ #self.oar = OARapi()
+ self.oar = OARrestapi()
+ self.ldap = LDAPapi()
+ #self.users = SenslabImportUsers()
+ self.time_format = "%Y-%m-%d %H:%M:%S"
+ self.db = SlabDB(config)
+ #self.logger=sfa_logger()
+ self.cache=None
+
+
+ def sliver_status(self,slice_urn,slice_hrn):
+ # receive a status request for slice named urn/hrn urn:publicid:IDN+senslab+nturro_slice hrn senslab.nturro_slice
+ # shall return a structure as described in
+ # http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
+ # NT : not sure if we should implement this or not, but used by sface.
+ slices = self.GetSlices([slice_hrn])
+ if len(slices) is 0:
+ raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
+ sl = slices[0]
+ print >>sys.stderr, "\r\n \r\n_____________ Sliver status urn %s hrn %s slices %s \r\n " %(slice_urn,slice_hrn,slices)
+ if sl['oar_job_id'] is not -1:
+
+ # report about the local nodes only
+ nodes = self.GetNodes({'hostname':sl['node_ids']},
+ ['node_id', 'hostname','site_login_base','boot_state'])
+ if len(nodes) is 0:
+ raise SliverDoesNotExist("No slivers allocated ")
+
+
+ site_logins = [node['site_login_base'] for node in nodes]
+
+ result = {}
+ top_level_status = 'unknown'
+ if nodes:
+ top_level_status = 'ready'
+ result['geni_urn'] = slice_urn
+ result['slab_login'] = sl['job_user']
+
+ timestamp = float(sl['startTime']) + float(sl['walltime'])
+ result['slab_expires'] = strftime(self.time_format, gmtime(float(timestamp)))
+
+ resources = []
+ for node in nodes:
+ res = {}
+ res['slab_hostname'] = node['hostname']
+ res['slab_boot_state'] = node['boot_state']
+
+ sliver_id = urn_to_sliver_id(slice_urn, sl['record_id_slice'], node['node_id'])
+ res['geni_urn'] = sliver_id
+ if node['boot_state'] == 'Alive':
+ res['geni_status'] = 'ready'
+ else:
+ res['geni_status'] = 'failed'
+ top_level_status = 'failed'
+
+ res['geni_error'] = ''
+
+ resources.append(res)
+
+ result['geni_status'] = top_level_status
+ result['geni_resources'] = resources
+ print >>sys.stderr, "\r\n \r\n_____________ Sliver status resources %s res %s \r\n " %(resources,res)
+ return result
+
+
+ def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
+ aggregate = SlabAggregate(self)
+ #aggregate = SlabAggregate(self)
+ slices = SlabSlices(self)
+ peer = slices.get_peer(slice_hrn)
+ sfa_peer = slices.get_sfa_peer(slice_hrn)
+ slice_record=None
+ #print>>sys.stderr, " \r\n \r\n create_sliver creds %s \r\n \r\n users %s " %(creds,users)
+
+ if not isinstance(creds, list):
+ creds = [creds]
+
+ #for cred in creds:
+ #cred_obj=Credential(string=cred)
+ #print >>sys.stderr," \r\n \r\n create_sliver cred %s " %(cred)
+ #GIDcall = cred_obj.get_gid_caller()
+ #GIDobj = cred_obj.get_gid_object()
+ #print >>sys.stderr," \r\n \r\n create_sliver GIDobj pubkey %s hrn %s " %(GIDobj.get_pubkey().get_pubkey_string(), GIDobj.get_hrn())
+ #print >>sys.stderr," \r\n \r\n create_sliver GIDcall pubkey %s hrn %s" %(GIDcall.get_pubkey().get_pubkey_string(),GIDobj.get_hrn())
+
+
+ #tmpcert = GID(string = users[0]['gid'])
+ #print >>sys.stderr," \r\n \r\n create_sliver tmpcer pubkey %s hrn %s " %(tmpcert.get_pubkey().get_pubkey_string(), tmpcert.get_hrn())
+
+ if users:
+ slice_record = users[0].get('slice_record', {})
+
+ # parse rspec
+ rspec = RSpec(rspec_string)
+ requested_attributes = rspec.version.get_slice_attributes()
+
+ # ensure site record exists
+ #site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer, options=options)
+ # ensure slice record exists
+ slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer, options=options)
+ # ensure person records exists
+ persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer, options=options)
+ # ensure slice attributes exists
+ #slices.verify_slice_attributes(slice, requested_attributes, options=options)
+
+ # add/remove slice from nodes
+ requested_slivers = [node.get('component_name') for node in rspec.version.get_nodes_with_slivers()]
+ nodes = slices.verify_slice_nodes(slice, requested_slivers, peer)
+
+
+
+ # handle MyPLC peer association.
+ # only used by plc and ple.
+ #slices.handle_peer(site, slice, persons, peer)
+
+ return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
+
+
+ def delete_sliver (self, slice_urn, slice_hrn, creds, options):
+
+ slices = self.GetSlices({'slice_hrn': slice_hrn})
+ if not slices:
+ return 1
+ slice = slices[0]
+
+ # determine if this is a peer slice
+ # xxx I wonder if this would not need to use PlSlices.get_peer instead
+ # in which case plc.peers could be deprecated as this here
+ # is the only/last call to this last method in plc.peers
+ peer = peers.get_peer(self, slice_hrn)
+ try:
+ if peer:
+ self.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
+ self.DeleteSliceFromNodes(slice_hrn, slice['node_ids'])
+ finally:
+ if peer:
+ self.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
+ return 1
+
+
+
+
+ # first 2 args are None in case of resource discovery
+ def list_resources (self, slice_urn, slice_hrn, creds, options):
+ #cached_requested = options.get('cached', True)
+
+ version_manager = VersionManager()
+ # get the rspec's return format from options
+ rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
+ version_string = "rspec_%s" % (rspec_version)
+
+ #panos adding the info option to the caching key (can be improved)
+ if options.get('info'):
+ version_string = version_string + "_"+options.get('info', 'default')
+
+ # look in cache first
+ #if cached_requested and self.cache and not slice_hrn:
+ #rspec = self.cache.get(version_string)
+ #if rspec:
+ #logger.debug("SlabDriver.ListResources: returning cached advertisement")
+ #return rspec
+
+ #panos: passing user-defined options
+ #print "manager options = ",options
+ aggregate = SlabAggregate(self)
+ origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
+ print>>sys.stderr, " \r\n \r\n \t SLABDRIVER get_rspec origin_hrn %s" %(origin_hrn)
+ options.update({'origin_hrn':origin_hrn})
+ print>>sys.stderr, " \r\n \r\n \t SLABDRIVER get_rspec options %s" %(options)
+ rspec = aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version,
+ options=options)
+
+ # cache the result
+ #if self.cache and not slice_hrn:
+ #logger.debug("Slab.ListResources: stores advertisement in cache")
+ #self.cache.add(version_string, rspec)
+
+ return rspec
+
+
+ def list_slices (self, creds, options):
+ # look in cache first
+ #if self.cache:
+ #slices = self.cache.get('slices')
+ #if slices:
+ #logger.debug("PlDriver.list_slices returns from cache")
+ #return slices
+
+ # get data from db
+ print>>sys.stderr, " \r\n \t\t SLABDRIVER.PY list_slices"
+ slices = self.GetSlices()
+ slice_hrns = [slicename_to_hrn(self.hrn, slice['slice_hrn']) for slice in slices]
+ slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
+
+ # cache the result
+ #if self.cache:
+ #logger.debug ("SlabDriver.list_slices stores value in cache")
+ #self.cache.add('slices', slice_urns)
+
+ return slice_urns
+
+ #No site or node register supported
+ def register (self, sfa_record, hrn, pub_key):
+ type = sfa_record['type']
+ slab_record = self.sfa_fields_to_slab_fields(type, hrn, sfa_record)
+
+ #if type == 'authority':
+ #sites = self.shell.GetSites([slab_record['login_base']])
+ #if not sites:
+ #pointer = self.shell.AddSite(slab_record)
+ #else:
+ #pointer = sites[0]['site_id']
+
+ if type == 'slice':
+ acceptable_fields=['url', 'instantiation', 'name', 'description']
+ for key in slab_record.keys():
+ if key not in acceptable_fields:
+ slab_record.pop(key)
+ print>>sys.stderr, " \r\n \t\t SLABDRIVER.PY register"
+ slices = self.GetSlices([slab_record['hrn']])
+ if not slices:
+ pointer = self.AddSlice(slab_record)
+ else:
+ pointer = slices[0]['slice_id']
+
+ elif type == 'user':
+ persons = self.GetPersons([sfa_record['hrn']])
+ if not persons:
+ pointer = self.AddPerson(dict(sfa_record))
+ #add in LDAP
+ else:
+ pointer = persons[0]['person_id']
+
+ #Does this make sense to senslab ?
+ #if 'enabled' in sfa_record and sfa_record['enabled']:
+ #self.UpdatePerson(pointer, {'enabled': sfa_record['enabled']})
+
+ # add this person to the site only if she is being added for the first
+ # time by sfa and doesont already exist in plc
+ if not persons or not persons[0]['site_ids']:
+ login_base = get_leaf(sfa_record['authority'])
+ self.AddPersonToSite(pointer, login_base)
+
+ # What roles should this user have?
+ self.AddRoleToPerson('user', pointer)
+ # Add the user's key
+ if pub_key:
+ self.AddPersonKey(pointer, {'key_type' : 'ssh', 'key' : pub_key})
+
+ #No node adding outside OAR
+ #elif type == 'node':
+ #login_base = hrn_to_slab_login_base(sfa_record['authority'])
+ #nodes = self.GetNodes([slab_record['hostname']])
+ #if not nodes:
+ #pointer = self.AddNode(login_base, slab_record)
+ #else:
+ #pointer = nodes[0]['node_id']
+
+ return pointer
+
+ #No site or node record update allowed
+ def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
+ pointer = old_sfa_record['pointer']
+ type = old_sfa_record['type']
+
+ # new_key implemented for users only
+ if new_key and type not in [ 'user' ]:
+ raise UnknownSfaType(type)
+
+ #if (type == "authority"):
+ #self.shell.UpdateSite(pointer, new_sfa_record)
+
+ if type == "slice":
+ slab_record=self.sfa_fields_to_slab_fields(type, hrn, new_sfa_record)
+ if 'name' in slab_record:
+ slab_record.pop('name')
+ self.UpdateSlice(pointer, slab_record)
+
+ elif type == "user":
+ update_fields = {}
+ all_fields = new_sfa_record
+ for key in all_fields.keys():
+ if key in ['first_name', 'last_name', 'title', 'email',
+ 'password', 'phone', 'url', 'bio', 'accepted_aup',
+ 'enabled']:
+ update_fields[key] = all_fields[key]
+ self.UpdatePerson(pointer, update_fields)
+
+ if new_key:
+ # must check this key against the previous one if it exists
+ persons = self.GetPersons([pointer], ['key_ids'])
+ person = persons[0]
+ keys = person['key_ids']
+ keys = self.GetKeys(person['key_ids'])
+
+ # Delete all stale keys
+ key_exists = False
+ for key in keys:
+ if new_key != key['key']:
+ self.DeleteKey(key['key_id'])
+ else:
+ key_exists = True
+ if not key_exists:
+ self.AddPersonKey(pointer, {'key_type': 'ssh', 'key': new_key})
+
+ #elif type == "node":
+ #self.UpdateNode(pointer, new_sfa_record)
+
+ return True
+
+
+ def remove (self, sfa_record):
+ type=sfa_record['type']
+ hrn=sfa_record['hrn']
+ record_id= sfa_record['record_id']
+ if type == 'user':
+ username = hrn.split(".")[len(hrn.split(".")) -1]
+ #get user in ldap
+ persons = self.GetPersons(username)
+ # only delete this person if he has site ids. if he doesnt, it probably means
+ # he was just removed from a site, not actually deleted
+ if persons and persons[0]['site_ids']:
+ self.DeletePerson(username)
+ elif type == 'slice':
+ if self.GetSlices(hrn):
+ self.DeleteSlice(hrn)
+
+ #elif type == 'authority':
+ #if self.GetSites(pointer):
+ #self.DeleteSite(pointer)
+
+ return True
+
+ def GetPeers (self,auth = None, peer_filter=None, return_fields=None):
+
+ existing_records = {}
+ existing_hrns_by_types= {}
+ all_records = dbsession.query(RegRecord).all
+ for record in all_records:
+ existing_records[record.hrn] = record
+ if record.type not in existing_hrns_by_types:
+ existing_hrns_by_types[record.type] = [record.hrn]
+ else:
+ existing_hrns_by_types.update({record.type:(existing_hrns_by_types[record.type].append(record.hrn))})
+
+ print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers existing_hrns_by_types %s " %( existing_hrns_by_types)
+ return_records = []
+ #records_list = table.findObjects({'type':'authority+sa'})
+ try:
+ for hrn in existing_hrns_by_types['authority+sa']:
+ records_list.append(existing_records[hrn])
+ print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers records_list %s " %(records_list)
+
+ except:
+ pass
+
+ if not peer_filter and not return_fields:
+ return records_list
+ return_records = parse_filter(records_list,peer_filter, 'peers', return_fields)
+
+ return return_records
+
+
+
+ def GetPersons(self, person_filter=None, return_fields=None):
+
+ person_list = self.ldap.ldapFind({'authority': self.root_auth })
+
+ #check = False
+ #if person_filter and isinstance(person_filter, dict):
+ #for k in person_filter.keys():
+ #if k in person_list[0].keys():
+ #check = True
+
+ return_person_list = parse_filter(person_list,person_filter ,'persons', return_fields)
+ if return_person_list:
+ print>>sys.stderr, " \r\n GetPersons person_filter %s return_fields %s " %(person_filter,return_fields)
+ return return_person_list
+
+ def GetTimezone(self):
+ time = self.oar.parser.SendRequest("GET_timezone")
+ return time
+
+
+ def DeleteJobs(self, job_id, username):
+ if not job_id:
+ return
+ reqdict = {}
+ reqdict['method'] = "delete"
+ reqdict['strval'] = str(job_id)
+ answer = self.oar.POSTRequestToOARRestAPI('DELETE_jobs_id',reqdict,username)
+ print>>sys.stderr, "\r\n \r\n jobid DeleteJobs %s " %(answer)
+
+
+ def GetJobs(self,job_id= None, resources=True,return_fields=None, username = None):
+ #job_resources=['reserved_resources', 'assigned_resources','job_id', 'job_uri', 'assigned_nodes',\
+ #'api_timestamp']
+ #assigned_res = ['resource_id', 'resource_uri']
+ #assigned_n = ['node', 'node_uri']
+
+
+ if job_id and resources is False:
+ req = "GET_jobs_id"
+ node_list_k = 'assigned_network_address'
+
+ if job_id and resources :
+ req = "GET_jobs_id_resources"
+ node_list_k = 'reserved_resources'
+
+
+
+ #Get job info from OAR
+ job_info = self.oar.parser.SendRequest(req, job_id, username)
+ print>>sys.stderr, "\r\n \r\n \t\t GetJobs %s " %(job_info)
+
+ if 'state' in job_info :
+ if job_info['state'] == 'Terminated':
+ print>>sys.stderr, "\r\n \r\n \t\t GetJobs TERMINELEBOUSIN "
+ return None
+ if job_info['state'] == 'Error':
+ print>>sys.stderr, "\r\n \r\n \t\t GetJobs ERROR message %s " %(job_info)
+ return None
+
+ #Get a dict of nodes . Key :hostname of the node
+ node_list = self.GetNodes()
+ node_hostname_list = []
+ for node in node_list:
+ node_hostname_list.append(node['hostname'])
+ node_dict = dict(zip(node_hostname_list,node_list))
+
+ #print>>sys.stderr, "\r\n \r\n \r\n \r\n \r\n \t\t GetJobs GetNODES %s " %(node_list)
+ try :
+
+ #for n in job_info[node_list]:
+ #n = str(self.root_auth) + str(n)
+
+ liste =job_info[node_list_k]
+ print>>sys.stderr, "\r\n \r\n \t\t GetJobs resources job_info liste%s" %(liste)
+ for k in range(len(liste)):
+ job_info[node_list_k][k] = node_dict[job_info[node_list_k][k]]['hostname']
+
+ print>>sys.stderr, "\r\n \r\n \t\t YYYYYYYYYYYYGetJobs resources job_info %s" %(job_info)
+ job_info.update({'node_ids':job_info[node_list_k]})
+ del job_info[node_list_k]
+ return job_info
+
+ except KeyError:
+ print>>sys.stderr, "\r\n \r\n \t\t GetJobs KEYERROR "
+
+
+
+
+
+
+ def GetNodes(self,node_filter= None, return_fields=None):
+
+ node_dict =self.oar.parser.SendRequest("GET_resources_full")
+ print>>sys.stderr, "\r\n \r\n \t\t SLABDRIVER.PY GetNodes "
+ return_node_list = []
+ if not (node_filter or return_fields):
+ return_node_list = node_dict.values()
+ return return_node_list
+
+ return_node_list= parse_filter(node_dict.values(),node_filter ,'node', return_fields)
+ return return_node_list
+
+
+ def GetSites(self, site_filter = None, return_fields=None):
+ site_dict =self.oar.parser.SendRequest("GET_sites")
+ print>>sys.stderr, "\r\n \r\n \t\t SLABDRIVER.PY GetSites "
+ return_site_list = []
+ if not ( site_filter or return_fields):
+ return_site_list = site_dict.values()
+ return return_site_list
+
+ return_site_list = parse_filter(site_dict.values(), site_filter,'site', return_fields)
+ return return_site_list
+
+ #TODO : filtrer au niveau de la query voir sqlalchemy
+ #http://docs.sqlalchemy.org/en/latest/orm/tutorial.html#returning-lists-and-scalars
+ def GetSlices(self,slice_filter = None, return_fields=None):
+
+ #sliceslist = self.db.find('slice_senslab',columns = ['oar_job_id', 'slice_hrn', 'record_id_slice','record_id_user'], record_filter=slice_filter)
+ sliceslist = slab_dbsession.query(SlabSliceDB).all()
+ #sliceslist = slices_records.order_by("record_id_slice").all()
+
+ print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY GetSlices slices %s slice_filter %s " %(sliceslist,slice_filter)
+
+ return_slice_list = parse_filter(sliceslist, slice_filter,'slice', return_fields)
+
+ if return_slice_list:
+ for sl in return_slice_list:
+ #login = sl['slice_hrn'].split(".")[1].split("_")[0]
+ login = sl.slice_hrn.split(".")[1].split("_")[0]
+ print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY GetSlices sl %s " %(sl)
+ if sl.oar_job_id is not -1:
+ rslt = self.GetJobs( sl.oar_job_id,resources=False, username = login )
+ print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY GetSlices GetJobs %s " %(rslt)
+ if rslt :
+ sl.update(rslt)
+ sl.update({'hrn':str(sl['slice_hrn'])})
+ #If GetJobs is empty, this means the job is now in the 'Terminated' state
+ #Update the slice record
+ else :
+ sl['oar_job_id'] = '-1'
+ sl.update({'hrn':str(sl['slice_hrn'])})
+ #self.db.update_senslab_slice(sl)
+
+ print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY GetSlices return_slice_list %s" %(return_slice_list)
+ return return_slice_list
+
+
+
+ def testbed_name (self): return "senslab2"
+
+ # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
+ def aggregate_version (self):
+ version_manager = VersionManager()
+ ad_rspec_versions = []
+ request_rspec_versions = []
+ for rspec_version in version_manager.versions:
+ if rspec_version.content_type in ['*', 'ad']:
+ ad_rspec_versions.append(rspec_version.to_dict())
+ if rspec_version.content_type in ['*', 'request']:
+ request_rspec_versions.append(rspec_version.to_dict())
+ return {
+ 'testbed':self.testbed_name(),
+ 'geni_request_rspec_versions': request_rspec_versions,
+ 'geni_ad_rspec_versions': ad_rspec_versions,
+ }
+
+
+
+
+
+
+ ##
+ # Convert SFA fields to PLC fields for use when registering up updating
+ # registry record in the PLC database
+ #
+ # @param type type of record (user, slice, ...)
+ # @param hrn human readable name
+ # @param sfa_fields dictionary of SFA fields
+ # @param slab_fields dictionary of PLC fields (output)
+
+ def sfa_fields_to_slab_fields(self, type, hrn, record):
+
+ def convert_ints(tmpdict, int_fields):
+ for field in int_fields:
+ if field in tmpdict:
+ tmpdict[field] = int(tmpdict[field])
+
+ slab_record = {}
+ #for field in record:
+ # slab_record[field] = record[field]
+
+ if type == "slice":
+ #instantion used in get_slivers ?
+ if not "instantiation" in slab_record:
+ slab_record["instantiation"] = "senslab-instantiated"
+ slab_record["hrn"] = hrn_to_pl_slicename(hrn)
+ print >>sys.stderr, "\r\n \r\n \t SLABDRIVER.PY sfa_fields_to_slab_fields slab_record %s hrn_to_pl_slicename(hrn) hrn %s " %(slab_record['hrn'], hrn)
+ if "url" in record:
+ slab_record["url"] = record["url"]
+ if "description" in record:
+ slab_record["description"] = record["description"]
+ if "expires" in record:
+ slab_record["expires"] = int(record["expires"])
+
+ #nodes added by OAR only and then imported to SFA
+ #elif type == "node":
+ #if not "hostname" in slab_record:
+ #if not "hostname" in record:
+ #raise MissingSfaInfo("hostname")
+ #slab_record["hostname"] = record["hostname"]
+ #if not "model" in slab_record:
+ #slab_record["model"] = "geni"
+
+ #One authority only
+ #elif type == "authority":
+ #slab_record["login_base"] = hrn_to_slab_login_base(hrn)
+
+ #if not "name" in slab_record:
+ #slab_record["name"] = hrn
+
+ #if not "abbreviated_name" in slab_record:
+ #slab_record["abbreviated_name"] = hrn
+
+ #if not "enabled" in slab_record:
+ #slab_record["enabled"] = True
+
+ #if not "is_public" in slab_record:
+ #slab_record["is_public"] = True
+
+ return slab_record
+
+
+
+
+ def AddSliceToNodes(self, slice_name, added_nodes, slice_user=None):
+
+ site_list = []
+ nodeid_list =[]
+ resource = ""
+ reqdict = {}
+ reqdict['property'] ="network_address in ("
+ for node in added_nodes:
+ #Get the ID of the node : remove the root auth and put the site in a separate list
+ s=node.split(".")
+ # NT: it's not clear for me if the nodenames will have the senslab prefix
+ # so lets take the last part only, for now.
+ lastpart=s[-1]
+ #if s[0] == self.root_auth :
+ # Again here it's not clear if nodes will be prefixed with <site>_, lets split and tanke the last part for now.
+ s=lastpart.split("_")
+ nodeid=s[-1]
+ reqdict['property'] += "'"+ nodeid +"', "
+ nodeid_list.append(nodeid)
+ #site_list.append( l[0] )
+ reqdict['property'] = reqdict['property'][0: len( reqdict['property'])-2] +")"
+ reqdict['resource'] ="network_address="+ str(len(nodeid_list))
+ reqdict['resource']+= ",walltime=" + str(00) + ":" + str(12) + ":" + str(20) #+2 min 20
+ reqdict['script_path'] = "/bin/sleep 620" #+20 sec
+ reqdict['type'] = "deploy"
+ reqdict['directory']= ""
+ reqdict['name']= "TestSandrine"
+ timestamp = self.GetTimezone()
+ print>>sys.stderr, "\r\n \r\n AddSliceToNodes slice_name %s added_nodes %s username %s reqdict %s " %(slice_name,added_nodes,slice_user, reqdict)
+ readable_time = strftime(self.time_format, gmtime(float(timestamp)))
+ print >>sys.stderr," \r\n \r\n \t\t\t\t AVANT ParseTimezone readable_time %s timestanp %s " %(readable_time, timestamp )
+ timestamp = timestamp+ 3620 #Add 3 min to server time
+ readable_time = strftime(self.time_format, gmtime(float(timestamp)))
+
+ print >>sys.stderr," \r\n \r\n \t\t\t\tAPRES ParseTimezone readable_time %s timestanp %s " %(readable_time , timestamp)
+ reqdict['reservation'] = readable_time
+
+ # first step : start the OAR job
+ print>>sys.stderr, "\r\n \r\n AddSliceToNodes reqdict %s \r\n site_list %s" %(reqdict,site_list)
+ #OAR = OARrestapi()
+ answer = self.oar.POSTRequestToOARRestAPI('POST_job',reqdict,slice_user)
+ print>>sys.stderr, "\r\n \r\n AddSliceToNodes jobid %s " %(answer)
+ self.db.update('slice',['oar_job_id'], [answer['id']], 'slice_hrn', slice_name)
+
+ jobid=answer['id']
+ print>>sys.stderr, "\r\n \r\n AddSliceToNodes jobid %s added_nodes %s slice_user %s" %(jobid,added_nodes,slice_user)
+ # second step : configure the experiment
+ # we need to store the nodes in a yaml (well...) file like this :
+ # [1,56,23,14,45,75] with name /tmp/sfa<jobid>.json
+ f=open('/tmp/sfa/'+str(jobid)+'.json','w')
+ f.write('[')
+ f.write(str(added_nodes[0].strip('node')))
+ for node in added_nodes[1:len(added_nodes)] :
+ f.write(','+node.strip('node'))
+ f.write(']')
+ f.close()
+
+ # third step : call the senslab-experiment wrapper
+ #command= "java -jar target/sfa-1.0-jar-with-dependencies.jar "+str(jobid)+" "+slice_user
+ javacmdline="/usr/bin/java"
+ jarname="/opt/senslabexperimentwrapper/sfa-1.0-jar-with-dependencies.jar"
+ #ret=subprocess.check_output(["/usr/bin/java", "-jar", ", str(jobid), slice_user])
+ output = subprocess.Popen([javacmdline, "-jar", jarname, str(jobid), slice_user],stdout=subprocess.PIPE).communicate()[0]
+
+ print>>sys.stderr, "\r\n \r\n AddSliceToNodes wrapper returns %s " %(output)
+ return
+
+
+
+
+ def DeleteSliceFromNodes(self, slice_name, deleted_nodes):
+ return
+
+
+
+ def fill_record_sfa_info(self, records):
+
+ def startswith(prefix, values):
+ return [value for value in values if value.startswith(prefix)]
+
+ # get person ids
+ person_ids = []
+ site_ids = []
+ for record in records:
+ person_ids.extend(record.get("person_ids", []))
+ site_ids.extend(record.get("site_ids", []))
+ if 'site_id' in record:
+ site_ids.append(record['site_id'])
+
+ #print>>sys.stderr, "\r\n \r\n _fill_record_sfa_info ___person_ids %s \r\n \t\t site_ids %s " %(person_ids, site_ids)
+
+ # get all pis from the sites we've encountered
+ # and store them in a dictionary keyed on site_id
+ site_pis = {}
+ if site_ids:
+ pi_filter = {'|roles': ['pi'], '|site_ids': site_ids}
+ pi_list = self.GetPersons( pi_filter, ['person_id', 'site_ids'])
+ #print>>sys.stderr, "\r\n \r\n _fill_record_sfa_info ___ GetPersons ['person_id', 'site_ids'] pi_ilist %s" %(pi_list)
+
+ for pi in pi_list:
+ # we will need the pi's hrns also
+ person_ids.append(pi['person_id'])
+
+ # we also need to keep track of the sites these pis
+ # belong to
+ for site_id in pi['site_ids']:
+ if site_id in site_pis:
+ site_pis[site_id].append(pi)
+ else:
+ site_pis[site_id] = [pi]
+
+ # get sfa records for all records associated with these records.
+ # we'll replace pl ids (person_ids) with hrns from the sfa records
+ # we obtain
+
+ # get the sfa records
+ #table = SfaTable()
+ existing_records = {}
+ all_records = dbsession.query(RegRecord).all
+ for record in all_records:
+ existing_records[(record.type,record.pointer)] = record
+
+ print >>sys.stderr, " \r\r\n SLABDRIVER fill_record_sfa_info existing_records %s " %(existing_records)
+ person_list, persons = [], {}
+ #person_list = table.find({'type': 'user', 'pointer': person_ids})
+ try:
+ for p_id in person_ids:
+ person_list.append( existing_records.get(('user',p_id)))
+ except KeyError:
+ print >>sys.stderr, " \r\r\n SLABDRIVER fill_record_sfa_info ERRRRRRRRRROR"
+
+ # create a hrns keyed on the sfa record's pointer.
+ # Its possible for multiple records to have the same pointer so
+ # the dict's value will be a list of hrns.
+ persons = defaultdict(list)
+ for person in person_list:
+ persons[person['pointer']].append(person)
+
+ # get the pl records
+ slab_person_list, slab_persons = [], {}
+ slab_person_list = self.GetPersons(person_ids, ['person_id', 'roles'])
+ slab_persons = list_to_dict(slab_person_list, 'person_id')
+ #print>>sys.stderr, "\r\n \r\n _fill_record_sfa_info ___ _list %s \r\n \t\t SenslabUsers.GetPersons ['person_id', 'roles'] slab_persons %s \r\n records %s" %(slab_person_list, slab_persons,records)
+ # fill sfa info
+
+ for record in records:
+ # skip records with no pl info (top level authorities)
+ #Sandrine 24 oct 11 2 lines
+ #if record['pointer'] == -1:
+ #continue
+ sfa_info = {}
+ type = record['type']
+ if (type == "slice"):
+ # all slice users are researchers
+ #record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice') ? besoin ou pas ?
+ record['PI'] = []
+ record['researcher'] = []
+ for person_id in record.get('person_ids', []):
+ #Sandrine 24 oct 11 line
+ #for person_id in record['person_ids']:
+ hrns = [person['hrn'] for person in persons[person_id]]
+ record['researcher'].extend(hrns)
+
+ # pis at the slice's site
+ slab_pis = site_pis[record['site_id']]
+ pi_ids = [pi['person_id'] for pi in slab_pis]
+ for person_id in pi_ids:
+ hrns = [person['hrn'] for person in persons[person_id]]
+ record['PI'].extend(hrns)
+ record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice')
+ record['geni_creator'] = record['PI']
+
+ elif (type == "authority"):
+ record['PI'] = []
+ record['operator'] = []
+ record['owner'] = []
+ for pointer in record['person_ids']:
+ if pointer not in persons or pointer not in slab_persons:
+ # this means there is not sfa or pl record for this user
+ continue
+ hrns = [person['hrn'] for person in persons[pointer]]
+ roles = slab_persons[pointer]['roles']
+ if 'pi' in roles:
+ record['PI'].extend(hrns)
+ if 'tech' in roles:
+ record['operator'].extend(hrns)
+ if 'admin' in roles:
+ record['owner'].extend(hrns)
+ # xxx TODO: OrganizationName
+ elif (type == "node"):
+ sfa_info['dns'] = record.get("hostname", "")
+ # xxx TODO: URI, LatLong, IP, DNS
+
+ elif (type == "user"):
+ sfa_info['email'] = record.get("email", "")
+ sfa_info['geni_urn'] = hrn_to_urn(record['hrn'], 'user')
+ sfa_info['geni_certificate'] = record['gid']
+ # xxx TODO: PostalAddress, Phone
+
+ #print>>sys.stderr, "\r\n \r\rn \t\t \t <<<<<<<<<<<<<<<<<<<<<<<< fill_record_sfa_info sfa_info %s \r\n record %s : "%(sfa_info,record)
+ record.update(sfa_info)
+
+ def augment_records_with_testbed_info (self, sfa_records):
+ return self.fill_record_info (sfa_records)
+
+ def fill_record_info(self, records):
+ """
+ Given a SFA record, fill in the senslab specific and SFA specific
+ fields in the record.
+ """
+ print >>sys.stderr, "\r\n \t\t BEFORE fill_record_info %s" %(records)
+ if not isinstance(records, list):
+ records = [records]
+
+ parkour = records
+ try:
+ for record in parkour:
+
+ if str(record['type']) == 'slice':
+ print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info record %s" %(record)
+ #sfatable = SfaTable()
+
+ existing_records_by_id = {}
+ all_records = dbsession.query(RegRecord).all
+ for rec in all_records:
+ existing_records_by_id[rec.record_id] = rec
+ print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info existing_records_by_id %s" %(existing_records_by_id)
+
+ recslice = self.db.find('slice',str(record['hrn']))
+ if isinstance(recslice,list) and len(recslice) == 1:
+ recslice = recslice[0]
+ #recuser = sfatable.find( recslice['record_id_user'], ['hrn'])
+ recuser = existing_records_by_id[recslice['record_id_user']]['hrn']
+ print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info %s" %(recuser)
+
+ if isinstance(recuser,list) and len(recuser) == 1:
+ recuser = recuser[0]
+ record.update({'PI':[recuser['hrn']],
+ 'researcher': [recuser['hrn']],
+ 'name':record['hrn'],
+ 'oar_job_id':recslice['oar_job_id'],
+ 'node_ids': [],
+ 'person_ids':[recslice['record_id_user']]})
+
+ elif str(record['type']) == 'user':
+ recslice = self.db.find('slice', record_filter={'record_id_user':record['record_id']})
+ for rec in recslice:
+ rec.update({'type':'slice'})
+ rec.update({'hrn':rec['slice_hrn'], 'record_id':rec['record_id_slice']})
+ records.append(rec)
+ print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info ADDING SLIC EINFO recslice %s" %(recslice)
+
+ print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info OKrecords %s" %(records)
+ except TypeError:
+ print >>sys.stderr, "\r\n \t\t SLABDRIVER fill_record_info EXCEPTION RECORDS : %s" %(records)
+ return
+
+ #self.fill_record_slab_info(records)
+ ##print >>sys.stderr, "\r\n \t\t after fill_record_slab_info %s" %(records)
+ #self.fill_record_sfa_info(records)
+ #print >>sys.stderr, "\r\n \t\t after fill_record_sfa_info"
+
+ #def update_membership_list(self, oldRecord, record, listName, addFunc, delFunc):
+ ## get a list of the HRNs tht are members of the old and new records
+ #if oldRecord:
+ #oldList = oldRecord.get(listName, [])
+ #else:
+ #oldList = []
+ #newList = record.get(listName, [])
+
+ ## if the lists are the same, then we don't have to update anything
+ #if (oldList == newList):
+ #return
+
+ ## build a list of the new person ids, by looking up each person to get
+ ## their pointer
+ #newIdList = []
+ #table = SfaTable()
+ #records = table.find({'type': 'user', 'hrn': newList})
+ #for rec in records:
+ #newIdList.append(rec['pointer'])
+
+ ## build a list of the old person ids from the person_ids field
+ #if oldRecord:
+ #oldIdList = oldRecord.get("person_ids", [])
+ #containerId = oldRecord.get_pointer()
+ #else:
+ ## if oldRecord==None, then we are doing a Register, instead of an
+ ## update.
+ #oldIdList = []
+ #containerId = record.get_pointer()
+
+ ## add people who are in the new list, but not the oldList
+ #for personId in newIdList:
+ #if not (personId in oldIdList):
+ #addFunc(self.plauth, personId, containerId)
+
+ ## remove people who are in the old list, but not the new list
+ #for personId in oldIdList:
+ #if not (personId in newIdList):
+ #delFunc(self.plauth, personId, containerId)
+
+ #def update_membership(self, oldRecord, record):
+ #print >>sys.stderr, " \r\n \r\n ***SLABDRIVER.PY update_membership record ", record
+ #if record.type == "slice":
+ #self.update_membership_list(oldRecord, record, 'researcher',
+ #self.users.AddPersonToSlice,
+ #self.users.DeletePersonFromSlice)
+ #elif record.type == "authority":
+ ## xxx TODO
+ #pass
+
+### thierry
+# I don't think you plan on running a component manager at this point
+# let me clean up the mess of ComponentAPI that is deprecated anyways
--- /dev/null
+import sys
+
+from sqlalchemy import create_engine
+from sqlalchemy.orm import sessionmaker
+
+from sfa.util.config import Config
+from sfa.util.sfalogging import logger
+
+from sqlalchemy import Column, Integer, String, DateTime
+from sqlalchemy import Table, Column, MetaData, join, ForeignKey
+import sfa.storage.model as model
+
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.orm import relationship, backref
+
+
+from sqlalchemy import MetaData, Table
+from sqlalchemy.exc import NoSuchTableError
+
+#Dict holding the columns names of the table as keys
+#and their type, used for creation of the table
+slice_table = {'record_id_user':'integer PRIMARY KEY references X ON DELETE CASCADE ON UPDATE CASCADE','oar_job_id':'integer DEFAULT -1', 'record_id_slice':'integer', 'slice_hrn':'text NOT NULL'}
+
+#Dict with all the specific senslab tables
+tablenames_dict = {'slice_senslab': slice_table}
+
+##############################
+
+
+
+SlabBase = declarative_base()
+
+
+
+
+class SlabSliceDB (SlabBase):
+ __tablename__ = 'slice_senslab'
+ record_id_user = Column(Integer, primary_key=True)
+ oar_job_id = Column( Integer,default = -1)
+ record_id_slice = Column(Integer)
+ slice_hrn = Column(String,nullable = False)
+
+ def __init__ (self, slice_hrn =None, oar_job_id=None, record_id_slice=None, record_id_user= None):
+ if record_id_slice:
+ self.record_id_slice = record_id_slice
+ if slice_hrn:
+ self.slice_hrn = slice_hrn
+ if oar_job_id:
+ self.oar_job_id = oar_job_id
+ if slice_hrn:
+ self.slice_hrn = slice_hrn
+ if record_id_user:
+ self.record_id_user= record_id_user
+
+ def __repr__(self):
+ result="<Record id user =%s, slice hrn=%s, oar_job id=%s,Record id slice =%s" % \
+ (self.record_id_user, self.slice_hrn, self.oar_job_id, self.record_id_slice)
+ result += ">"
+ return result
+
+
+
+
+class SlabDB:
+ def __init__(self,config):
+ self.sl_base = SlabBase
+
+ dbname="slab_sfa"
+ # will be created lazily on-demand
+ self.slab_session = None
+ # the former PostgreSQL.py used the psycopg2 directly and was doing
+ #self.connection.set_client_encoding("UNICODE")
+ # it's unclear how to achieve this in sqlalchemy, nor if it's needed at all
+ # http://www.sqlalchemy.org/docs/dialects/postgresql.html#unicode
+ # we indeed have /var/lib/pgsql/data/postgresql.conf where
+ # this setting is unset, it might be an angle to tweak that if need be
+ # try a unix socket first - omitting the hostname does the trick
+ unix_url = "postgresql+psycopg2://%s:%s@:%s/%s"%\
+ (config.SFA_DB_USER,config.SFA_DB_PASSWORD,config.SFA_DB_PORT,dbname)
+ print >>sys.stderr, " \r\n \r\n SLAPOSTGRES INIT unix_url %s" %(unix_url)
+ # the TCP fallback method
+ tcp_url = "postgresql+psycopg2://%s:%s@%s:%s/%s"%\
+ (config.SFA_DB_USER,config.SFA_DB_PASSWORD,config.SFA_DB_HOST,config.SFA_DB_PORT,dbname)
+ for url in [ unix_url, tcp_url ] :
+ try:
+ self.slab_engine = create_engine (url,echo_pool=True,echo=True)
+ self.check()
+ self.url=url
+ return
+ except:
+ pass
+ self.slab_engine=None
+ raise Exception,"Could not connect to database"
+
+ def check (self):
+ self.slab_engine.execute ("select 1").scalar()
+
+
+ def session (self):
+ if self.slab_session is None:
+ Session=sessionmaker ()
+ self.slab_session=Session(bind=self.slab_engine)
+ return self.slab_session
+
+
+
+
+ #Close connection to database
+ def close(self):
+ if self.connection is not None:
+ self.connection.close()
+ self.connection = None
+
+
+
+
+ def exists(self, tablename):
+ """
+ Checks if the table specified as tablename exists.
+
+ """
+
+ try:
+ metadata = MetaData (bind=self.slab_engine)
+ table=Table (tablename, metadata, autoload=True)
+
+ return True
+ except NoSuchTableError:
+ print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES EXISTS NOPE! tablename %s " %(tablename)
+ return False
+
+
+ def createtable(self, tablename ):
+ """
+ Creates the specifed table. Uses the global dictionnary holding the tablenames and
+ the table schema.
+
+ """
+
+ print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES createtable SlabBase.metadata.sorted_tables %s \r\n engine %s" %(SlabBase.metadata.sorted_tables , slab_engine)
+ SlabBase.metadata.create_all(slab_engine)
+ return
+
+
+ def find (self, name = None, filter_dict = None):
+ if filter_dict:
+ filter_statement = "and_(SlabSliceDB."
+ for k in filter_dict:
+ filter_statement += str(k)+ "==" + str(filter_dict[l])
+ filter_statement +=')'
+ print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES find filter_statement %s"%(filter_statement)
+ slab_dbsession.query(SlabSliceDB).filter(filter_statement)
+
+
+
+
+from sfa.util.config import Config
+
+slab_alchemy= SlabDB(Config())
+slab_engine=slab_alchemy.slab_engine
+slab_dbsession=slab_alchemy.session()
\ No newline at end of file
--- /dev/null
+from types import StringTypes
+from collections import defaultdict
+import sys
+from sfa.util.xrn import get_leaf, get_authority, urn_to_hrn
+from sfa.util.plxrn import hrn_to_pl_slicename
+from sfa.util.policy import Policy
+from sfa.rspecs.rspec import RSpec
+from sfa.plc.vlink import VLink
+from sfa.util.xrn import Xrn
+from sfa.util.sfalogging import logger
+
+MAXINT = 2L**31-1
+
+class SlabSlices:
+
+ rspec_to_slice_tag = {'max_rate':'net_max_rate'}
+
+ #def __init__(self, api, ttl = .5, origin_hrn=None):
+ #self.api = api
+ ##filepath = path + os.sep + filename
+ #self.policy = Policy(self.api)
+ #self.origin_hrn = origin_hrn
+ #self.registry = api.registries[api.hrn]
+ #self.credential = api.getCredential()
+ #self.nodes = []
+ #self.persons = []
+
+
+ def __init__(self, driver):
+ self.driver = driver
+ #def get_slivers(self, xrn, node=None):
+ #hrn, type = urn_to_hrn(xrn)
+
+ #slice_name = hrn_to_pl_slicename(hrn)
+ ## XX Should we just call PLCAPI.GetSliceTicket(slice_name) instead
+ ## of doing all of this?
+ ##return self.api.driver.GetSliceTicket(self.auth, slice_name)
+
+ ## from PLCAPI.GetSlivers.get_slivers()
+ #slice_fields = ['slice_id', 'name', 'instantiation', 'expires', 'person_ids', 'slice_tag_ids']
+ #slices = self.api.driver.GetSlices(slice_name, slice_fields)
+ ## Build up list of users and slice attributes
+ #person_ids = set()
+ #all_slice_tag_ids = set()
+ #for slice in slices:
+ #person_ids.update(slice['person_ids'])
+ #all_slice_tag_ids.update(slice['slice_tag_ids'])
+ #person_ids = list(person_ids)
+ #all_slice_tag_ids = list(all_slice_tag_ids)
+ ## Get user information
+ #all_persons_list = self.api.driver.GetPersons({'person_id':person_ids,'enabled':True}, ['person_id', 'enabled', 'key_ids'])
+ #all_persons = {}
+ #for person in all_persons_list:
+ #all_persons[person['person_id']] = person
+
+ ## Build up list of keys
+ #key_ids = set()
+ #for person in all_persons.values():
+ #key_ids.update(person['key_ids'])
+ #key_ids = list(key_ids)
+ ## Get user account keys
+ #all_keys_list = self.api.driver.GetKeys(key_ids, ['key_id', 'key', 'key_type'])
+ #all_keys = {}
+ #for key in all_keys_list:
+ #all_keys[key['key_id']] = key
+ ## Get slice attributes
+ #all_slice_tags_list = self.api.driver.GetSliceTags(all_slice_tag_ids)
+ #all_slice_tags = {}
+ #for slice_tag in all_slice_tags_list:
+ #all_slice_tags[slice_tag['slice_tag_id']] = slice_tag
+
+ #slivers = []
+ #for slice in slices:
+ #keys = []
+ #for person_id in slice['person_ids']:
+ #if person_id in all_persons:
+ #person = all_persons[person_id]
+ #if not person['enabled']:
+ #continue
+ #for key_id in person['key_ids']:
+ #if key_id in all_keys:
+ #key = all_keys[key_id]
+ #keys += [{'key_type': key['key_type'],
+ #'key': key['key']}]
+ #attributes = []
+ ## All (per-node and global) attributes for this slice
+ #slice_tags = []
+ #for slice_tag_id in slice['slice_tag_ids']:
+ #if slice_tag_id in all_slice_tags:
+ #slice_tags.append(all_slice_tags[slice_tag_id])
+ ## Per-node sliver attributes take precedence over global
+ ## slice attributes, so set them first.
+ ## Then comes nodegroup slice attributes
+ ## Followed by global slice attributes
+ #sliver_attributes = []
+
+ #if node is not None:
+ #for sliver_attribute in filter(lambda a: a['node_id'] == node['node_id'], slice_tags):
+ #sliver_attributes.append(sliver_attribute['tagname'])
+ #attributes.append({'tagname': sliver_attribute['tagname'],
+ #'value': sliver_attribute['value']})
+
+ ## set nodegroup slice attributes
+ #for slice_tag in filter(lambda a: a['nodegroup_id'] in node['nodegroup_ids'], slice_tags):
+ ## Do not set any nodegroup slice attributes for
+ ## which there is at least one sliver attribute
+ ## already set.
+ #if slice_tag not in slice_tags:
+ #attributes.append({'tagname': slice_tag['tagname'],
+ #'value': slice_tag['value']})
+
+ #for slice_tag in filter(lambda a: a['node_id'] is None, slice_tags):
+ ## Do not set any global slice attributes for
+ ## which there is at least one sliver attribute
+ ## already set.
+ #if slice_tag['tagname'] not in sliver_attributes:
+ #attributes.append({'tagname': slice_tag['tagname'],
+ #'value': slice_tag['value']})
+
+ ## XXX Sanity check; though technically this should be a system invariant
+ ## checked with an assertion
+ #if slice['expires'] > MAXINT: slice['expires']= MAXINT
+
+ #slivers.append({
+ #'hrn': hrn,
+ #'name': slice['name'],
+ #'slice_id': slice['slice_id'],
+ #'instantiation': slice['instantiation'],
+ #'expires': slice['expires'],
+ #'keys': keys,
+ #'attributes': attributes
+ #})
+
+ #return slivers
+
+ def get_peer(self, xrn):
+ hrn, type = urn_to_hrn(xrn)
+ # Becaues of myplc federation, we first need to determine if this
+ # slice belongs to out local plc or a myplc peer. We will assume it
+ # is a local site, unless we find out otherwise
+ peer = None
+ print>>sys.stderr, " \r\n \r\n \t slices.py get_peer slice_authority "
+ # get this slice's authority (site)
+ slice_authority = get_authority(hrn)
+
+ # get this site's authority (sfa root authority or sub authority)
+ site_authority = get_authority(slice_authority).lower()
+
+ # check if we are already peered with this site_authority, if so
+ peers = self.driver.GetPeers({'hrn':site_authority})
+ print>>sys.stderr, " \r\n \r\n \t slices.py get_peer slice_authority %s site_authority %s" %(slice_authority,site_authority)
+ for peer_record in peers:
+ names = [name.lower() for name in peer_record.values() if isinstance(name, StringTypes)]
+ if site_authority in names:
+ peer = peer_record
+ print>>sys.stderr, " \r\n \r\n \t slices.py get_peer peer %s " %(peer)
+ return peer
+
+ def get_sfa_peer(self, xrn):
+ hrn, type = urn_to_hrn(xrn)
+
+ # return the authority for this hrn or None if we are the authority
+ sfa_peer = None
+ slice_authority = get_authority(hrn)
+ site_authority = get_authority(slice_authority)
+
+ if site_authority != self.driver.hrn:
+ sfa_peer = site_authority
+
+ return sfa_peer
+
+ def verify_slice_nodes(self, slice, requested_slivers, peer):
+ current_slivers = []
+ deleted_nodes = []
+ if slice['node_ids']:
+ nodes = self.driver.GetNodes(slice['node_ids'], ['hostname'])
+ current_slivers = [node['hostname'] for node in nodes]
+
+ # remove nodes not in rspec
+ deleted_nodes = list(set(current_slivers).difference(requested_slivers))
+
+ # add nodes from rspec
+ added_nodes = list(set(requested_slivers).difference(current_slivers))
+ print>>sys.stderr , "\r\n \r\n \t slices.py verify_slice_nodes added_nodes %s slice %s" %( added_nodes,slice)
+ try:
+ #if peer:
+ #self.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
+ #PI is a list, get the only username in this list
+ #so that the OAR/LDAP knows the user: remove the authority from the name
+ tmp= slice['PI'][0].split(".")
+ username = tmp[(len(tmp)-1)]
+ self.driver.AddSliceToNodes(slice['name'], added_nodes, username)
+
+ if deleted_nodes:
+ self.driver.DeleteSliceFromNodes(slice['name'], deleted_nodes)
+
+ except:
+ logger.log_exc('Failed to add/remove slice from nodes')
+
+ def free_egre_key(self):
+ used = set()
+ for tag in self.driver.GetSliceTags({'tagname': 'egre_key'}):
+ used.add(int(tag['value']))
+
+ for i in range(1, 256):
+ if i not in used:
+ key = i
+ break
+ else:
+ raise KeyError("No more EGRE keys available")
+
+ return str(key)
+
+
+
+
+
+
+ def handle_peer(self, site, slice, persons, peer):
+ if peer:
+ # bind site
+ try:
+ if site:
+ self.driver.BindObjectToPeer('site', site['site_id'], peer['shortname'], slice['site_id'])
+ except Exception,e:
+ self.driver.DeleteSite(site['site_id'])
+ raise e
+
+ # bind slice
+ try:
+ if slice:
+ self.driver.BindObjectToPeer('slice', slice['slice_id'], peer['shortname'], slice['slice_id'])
+ except Exception,e:
+ self.driver.DeleteSlice(slice['slice_id'])
+ raise e
+
+ # bind persons
+ for person in persons:
+ try:
+ self.driver.BindObjectToPeer('person',
+ person['person_id'], peer['shortname'], person['peer_person_id'])
+
+ for (key, remote_key_id) in zip(person['keys'], person['key_ids']):
+ try:
+ self.driver.BindObjectToPeer( 'key', key['key_id'], peer['shortname'], remote_key_id)
+ except:
+ self.driver.DeleteKey(key['key_id'])
+ logger("failed to bind key: %s to peer: %s " % (key['key_id'], peer['shortname']))
+ except Exception,e:
+ self.driver.DeletePerson(person['person_id'])
+ raise e
+
+ return slice
+
+ #def verify_site(self, slice_xrn, slice_record={}, peer=None, sfa_peer=None, options={}):
+ #(slice_hrn, type) = urn_to_hrn(slice_xrn)
+ #site_hrn = get_authority(slice_hrn)
+ ## login base can't be longer than 20 characters
+ ##slicename = hrn_to_pl_slicename(slice_hrn)
+ #authority_name = slice_hrn.split('.')[0]
+ #login_base = authority_name[:20]
+ #print >>sys.stderr, " \r\n \r\n \t\t SLABSLICES.PY verify_site authority_name %s login_base %s slice_hrn %s" %(authority_name,login_base,slice_hrn)
+
+ #sites = self.driver.GetSites(login_base)
+ #if not sites:
+ ## create new site record
+ #site = {'name': 'geni.%s' % authority_name,
+ #'abbreviated_name': authority_name,
+ #'login_base': login_base,
+ #'max_slices': 100,
+ #'max_slivers': 1000,
+ #'enabled': True,
+ #'peer_site_id': None}
+ #if peer:
+ #site['peer_site_id'] = slice_record.get('site_id', None)
+ #site['site_id'] = self.driver.AddSite(site)
+ ## exempt federated sites from monitor policies
+ #self.driver.AddSiteTag(site['site_id'], 'exempt_site_until', "20200101")
+
+ ### is this still necessary?
+ ### add record to the local registry
+ ##if sfa_peer and slice_record:
+ ##peer_dict = {'type': 'authority', 'hrn': site_hrn, \
+ ##'peer_authority': sfa_peer, 'pointer': site['site_id']}
+ ##self.registry.register_peer_object(self.credential, peer_dict)
+ #else:
+ #site = sites[0]
+ #if peer:
+ ## unbind from peer so we can modify if necessary. Will bind back later
+ #self.driver.UnBindObjectFromPeer('site', site['site_id'], peer['shortname'])
+
+ #return site
+
+ def verify_slice(self, slice_hrn, slice_record, peer, sfa_peer, options={} ):
+ #slicename = hrn_to_pl_slicename(slice_hrn)
+ #parts = hrn_to_pl_slicename(slice_hrn).split("_")
+ login_base = slice_hrn.split(".")[0]
+ slicename = slice_hrn
+ slices = self.driver.GetSlices([slicename])
+ print>>sys.stderr, " \r\n \r\rn Slices.py verify_slice slicename %s slices %s slice_record %s"%(slicename ,slices, slice_record)
+ if not slices:
+ slice = {'name': slicename,
+ 'url': slice_record.get('url', slice_hrn),
+ #'description': slice_record.get('description', slice_hrn)
+ }
+ # add the slice
+ slice['slice_id'] = self.driver.AddSlice(slice)
+ slice['node_ids'] = []
+ slice['person_ids'] = []
+ #if peer:
+ #slice['peer_slice_id'] = slice_record.get('slice_id', None)
+ # mark this slice as an sfa peer record
+ #if sfa_peer:
+ #peer_dict = {'type': 'slice', 'hrn': slice_hrn,
+ #'peer_authority': sfa_peer, 'pointer': slice['slice_id']}
+ #self.registry.register_peer_object(self.credential, peer_dict)
+ else:
+ slice = slices[0]
+ slice.update(slice_record)
+ #del slice['last_updated']
+ #del slice['date_created']
+ #if peer:
+ #slice['peer_slice_id'] = slice_record.get('slice_id', None)
+ ## unbind from peer so we can modify if necessary. Will bind back later
+ #self.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
+ #Update existing record (e.g. expires field) it with the latest info.
+ ##if slice_record and slice['expires'] != slice_record['expires']:
+ ##self.driver.UpdateSlice( slice['slice_id'], {'expires' : slice_record['expires']})
+
+ return slice
+
+ #def get_existing_persons(self, users):
+ def verify_persons(self, slice_hrn, slice_record, users, peer, sfa_peer, options={}):
+ users_by_id = {}
+ users_by_hrn = {}
+ users_dict = {}
+
+ for user in users:
+ if 'person_id' in user and 'hrn' in user:
+ users_by_id[user['person_id']] = user
+ users_dict[user['person_id']] = {'person_id':user['person_id'], 'hrn':user['hrn']}
+
+ users_by_hrn[user['hrn']] = user
+ users_dict[user['hrn']] = {'person_id':user['person_id'], 'hrn':user['hrn']}
+
+ #print>>sys.stderr, " \r\n \r\n \t slabslices.py verify_person users_dict %s \r\n user_by_hrn %s \r\n \tusers_by_id %s " %( users_dict,users_by_hrn, users_by_id)
+
+ existing_user_ids = []
+ existing_user_hrns = []
+ existing_users= []
+ #Check if user is in LDAP using its hrn.
+ #Assuming Senslab is centralised : one LDAP for all sites, user_id unknown from LDAP
+ # LDAP does not provide users id, therfore we rely on hrns
+ if users_by_hrn:
+ existing_users = self.driver.GetPersons({'hrn': users_by_hrn.keys()},
+ ['hrn','pkey'])
+ if existing_users:
+ for user in existing_users :
+ #for k in users_dict[user['hrn']] :
+ existing_user_hrns.append (users_dict[user['hrn']]['hrn'])
+ existing_user_ids.append (users_dict[user['hrn']]['person_id'])
+ #print>>sys.stderr, " \r\n \r\n \t slabslices.py verify_person existing_user_ids.append (users_dict[user['hrn']][k]) %s \r\n existing_users %s " %( existing_user_ids,existing_users)
+
+
+ # requested slice users
+ requested_user_ids = users_by_id.keys()
+ requested_user_hrns = users_by_hrn.keys()
+ #print>>sys.stderr, " \r\n \r\n \t slabslices.py verify_person requested_user_ids %s user_by_hrn %s " %( requested_user_ids,users_by_hrn)
+ # existing slice users
+ existing_slice_users_filter = {'hrn': slice_record.get('PI', [])}
+ #print>>sys.stderr, " \r\n \r\n slices.py verify_person requested_user_ids %s existing_slice_users_filter %s slice_record %s" %(requested_user_ids,existing_slice_users_filter,slice_record)
+
+ existing_slice_users = self.driver.GetPersons(existing_slice_users_filter,['hrn','pkey'])
+ #print>>sys.stderr, " \r\n \r\n slices.py verify_person existing_slice_users %s " %(existing_slice_users)
+
+ existing_slice_user_hrns = [user['hrn'] for user in existing_slice_users]
+
+ #print>>sys.stderr, " \r\n \r\n slices.py verify_person requested_user_ids %s existing_slice_user_hrns %s " %(requested_user_ids,existing_slice_user_hrns)
+ # users to be added, removed or updated
+
+ added_user_hrns = set(requested_user_hrns).difference(set(existing_user_hrns))
+
+ added_slice_user_hrns = set(requested_user_hrns).difference(existing_slice_user_hrns)
+
+ removed_user_hrns = set(existing_slice_user_hrns).difference(requested_user_hrns)
+
+
+ updated_user_hrns = set(existing_slice_user_hrns).intersection(requested_user_hrns)
+ #print>>sys.stderr, " \r\n \r\n slices.py verify_persons added_user_ids %s added_slice_user_ids %s " %(added_user_ids,added_slice_user_ids)
+ #print>>sys.stderr, " \r\n \r\n slices.py verify_persons removed_user_hrns %s updated_user_hrns %s " %(removed_user_hrns,updated_user_hrns)
+ # Remove stale users (only if we are not appending)
+ append = options.get('append', True)
+ if append == False:
+ for removed_user_hrn in removed_user_hrns:
+ self.driver.DeletePersonFromSlice(removed_user_hrn, slice_record['name'])
+ # update_existing users
+ updated_users_list = [user for user in existing_slice_users if user['hrn'] in \
+ updated_user_hrns]
+ print>>sys.stderr, " \r\n \r\n slices.py verify_persons removed_user_hrns %s updated_users_list %s " %(removed_user_hrns,updated_users_list)
+ #self.verify_keys(existing_slice_users, updated_users_list, peer, append)
+
+ added_persons = []
+ # add new users
+ for added_user_hrn in added_user_hrns:
+ added_user = users_dict[added_user_hrn]
+ #hrn, type = urn_to_hrn(added_user['urn'])
+ person = {
+ #'first_name': added_user.get('first_name', hrn),
+ #'last_name': added_user.get('last_name', hrn),
+ 'person_id': added_user['person_id'],
+ #'peer_person_id': None,
+ #'keys': [],
+ #'key_ids': added_user.get('key_ids', []),
+
+ }
+ #print>>sys.stderr, " \r\n \r\n slices.py verify_persons added_user_ids %s " %(added_user_ids)
+ person['person_id'] = self.driver.AddPerson(person)
+ if peer:
+ person['peer_person_id'] = added_user['person_id']
+ added_persons.append(person)
+
+ # enable the account
+ self.driver.UpdatePerson(person['person_id'], {'enabled': True})
+
+ # add person to site
+ #self.driver.AddPersonToSite(added_user_id, login_base)
+
+ #for key_string in added_user.get('keys', []):
+ #key = {'key':key_string, 'key_type':'ssh'}
+ #key['key_id'] = self.driver.AddPersonKey(person['person_id'], key)
+ #person['keys'].append(key)
+
+ # add the registry record
+ #if sfa_peer:
+ #peer_dict = {'type': 'user', 'hrn': hrn, 'peer_authority': sfa_peer, \
+ #'pointer': person['person_id']}
+ #self.registry.register_peer_object(self.credential, peer_dict)
+ for added_slice_user_hrn in added_slice_user_hrns.union(added_user_hrns):
+ self.driver.AddPersonToSlice(added_slice_user_hrn, slice_record['name'])
+ #for added_slice_user_id in added_slice_user_ids.union(added_user_ids):
+ # add person to the slice
+ #self.driver.AddPersonToSlice(added_slice_user_id, slice_record['name'])
+ # if this is a peer record then it should already be bound to a peer.
+ # no need to return worry about it getting bound later
+
+ return added_persons
+
+
+ def verify_keys(self, persons, users, peer, options={}):
+ # existing keys
+ key_ids = []
+ for person in persons:
+ key_ids.extend(person['key_ids'])
+ keylist = self.driver.GetKeys(key_ids, ['key_id', 'key'])
+ keydict = {}
+ for key in keylist:
+ keydict[key['key']] = key['key_id']
+ existing_keys = keydict.keys()
+ persondict = {}
+ for person in persons:
+ persondict[person['email']] = person
+
+ # add new keys
+ requested_keys = []
+ updated_persons = []
+ for user in users:
+ user_keys = user.get('keys', [])
+ updated_persons.append(user)
+ for key_string in user_keys:
+ requested_keys.append(key_string)
+ if key_string not in existing_keys:
+ key = {'key': key_string, 'key_type': 'ssh'}
+ try:
+ if peer:
+ person = persondict[user['email']]
+ self.driver.UnBindObjectFromPeer('person', person['person_id'], peer['shortname'])
+ key['key_id'] = self.driver.AddPersonKey(user['email'], key)
+ if peer:
+ key_index = user_keys.index(key['key'])
+ remote_key_id = user['key_ids'][key_index]
+ self.driver.BindObjectToPeer('key', key['key_id'], peer['shortname'], remote_key_id)
+
+ finally:
+ if peer:
+ self.driver.BindObjectToPeer('person', person['person_id'], peer['shortname'], user['person_id'])
+
+ # remove old keys (only if we are not appending)
+ if append == False:
+ removed_keys = set(existing_keys).difference(requested_keys)
+ for existing_key_id in keydict:
+ if keydict[existing_key_id] in removed_keys:
+ try:
+ if peer:
+ self.driver.UnBindObjectFromPeer('key', existing_key_id, peer['shortname'])
+ self.driver.DeleteKey(existing_key_id)
+ except:
+ pass
+
+ #def verify_slice_attributes(self, slice, requested_slice_attributes, append=False, admin=False):
+ ## get list of attributes users ar able to manage
+ #filter = {'category': '*slice*'}
+ #if not admin:
+ #filter['|roles'] = ['user']
+ #slice_attributes = self.driver.GetTagTypes(filter)
+ #valid_slice_attribute_names = [attribute['tagname'] for attribute in slice_attributes]
+
+ ## get sliver attributes
+ #added_slice_attributes = []
+ #removed_slice_attributes = []
+ #ignored_slice_attribute_names = []
+ #existing_slice_attributes = self.driver.GetSliceTags({'slice_id': slice['slice_id']})
+
+ ## get attributes that should be removed
+ #for slice_tag in existing_slice_attributes:
+ #if slice_tag['tagname'] in ignored_slice_attribute_names:
+ ## If a slice already has a admin only role it was probably given to them by an
+ ## admin, so we should ignore it.
+ #ignored_slice_attribute_names.append(slice_tag['tagname'])
+ #else:
+ ## If an existing slice attribute was not found in the request it should
+ ## be removed
+ #attribute_found=False
+ #for requested_attribute in requested_slice_attributes:
+ #if requested_attribute['name'] == slice_tag['tagname'] and \
+ #requested_attribute['value'] == slice_tag['value']:
+ #attribute_found=True
+ #break
+
+ #if not attribute_found and not append:
+ #removed_slice_attributes.append(slice_tag)
+
+ ## get attributes that should be added:
+ #for requested_attribute in requested_slice_attributes:
+ ## if the requested attribute wasn't found we should add it
+ #if requested_attribute['name'] in valid_slice_attribute_names:
+ #attribute_found = False
+ #for existing_attribute in existing_slice_attributes:
+ #if requested_attribute['name'] == existing_attribute['tagname'] and \
+ #requested_attribute['value'] == existing_attribute['value']:
+ #attribute_found=True
+ #break
+ #if not attribute_found:
+ #added_slice_attributes.append(requested_attribute)
+
+
+ ## remove stale attributes
+ #for attribute in removed_slice_attributes:
+ #try:
+ #self.driver.DeleteSliceTag(attribute['slice_tag_id'])
+ #except Exception, e:
+ #self.logger.warn('Failed to remove sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
+ #% (name, value, node_id, str(e)))
+
+ ## add requested_attributes
+ #for attribute in added_slice_attributes:
+ #try:
+ #self.driver.AddSliceTag(slice['name'], attribute['name'], attribute['value'], attribute.get('node_id', None))
+ #except Exception, e:
+ #self.logger.warn('Failed to add sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
+ #% (name, value, node_id, str(e)))
+
+ #def create_slice_aggregate(self, xrn, rspec):
+ #hrn, type = urn_to_hrn(xrn)
+ ## Determine if this is a peer slice
+ #peer = self.get_peer(hrn)
+ #sfa_peer = self.get_sfa_peer(hrn)
+
+ #spec = RSpec(rspec)
+ ## Get the slice record from sfa
+ #slicename = hrn_to_pl_slicename(hrn)
+ #slice = {}
+ #slice_record = None
+ #registry = self.api.registries[self.api.hrn]
+ #credential = self.api.getCredential()
+
+ #site_id, remote_site_id = self.verify_site(registry, credential, hrn, peer, sfa_peer)
+ #slice = self.verify_slice(registry, credential, hrn, site_id, remote_site_id, peer, sfa_peer)
+
+ ## find out where this slice is currently running
+ #nodelist = self.driver.GetNodes(slice['node_ids'], ['hostname'])
+ #hostnames = [node['hostname'] for node in nodelist]
+
+ ## get netspec details
+ #nodespecs = spec.getDictsByTagName('NodeSpec')
+
+ ## dict in which to store slice attributes to set for the nodes
+ #nodes = {}
+ #for nodespec in nodespecs:
+ #if isinstance(nodespec['name'], list):
+ #for nodename in nodespec['name']:
+ #nodes[nodename] = {}
+ #for k in nodespec.keys():
+ #rspec_attribute_value = nodespec[k]
+ #if (self.rspec_to_slice_tag.has_key(k)):
+ #slice_tag_name = self.rspec_to_slice_tag[k]
+ #nodes[nodename][slice_tag_name] = rspec_attribute_value
+ #elif isinstance(nodespec['name'], StringTypes):
+ #nodename = nodespec['name']
+ #nodes[nodename] = {}
+ #for k in nodespec.keys():
+ #rspec_attribute_value = nodespec[k]
+ #if (self.rspec_to_slice_tag.has_key(k)):
+ #slice_tag_name = self.rspec_to_slice_tag[k]
+ #nodes[nodename][slice_tag_name] = rspec_attribute_value
+
+ #for k in nodespec.keys():
+ #rspec_attribute_value = nodespec[k]
+ #if (self.rspec_to_slice_tag.has_key(k)):
+ #slice_tag_name = self.rspec_to_slice_tag[k]
+ #nodes[nodename][slice_tag_name] = rspec_attribute_value
+
+ #node_names = nodes.keys()
+ ## remove nodes not in rspec
+ #deleted_nodes = list(set(hostnames).difference(node_names))
+ ## add nodes from rspec
+ #added_nodes = list(set(node_names).difference(hostnames))
+
+ #try:
+ #if peer:
+ #self.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
+
+ #self.driver.AddSliceToNodes(slicename, added_nodes)
+
+ ## Add recognized slice tags
+ #for node_name in node_names:
+ #node = nodes[node_name]
+ #for slice_tag in node.keys():
+ #value = node[slice_tag]
+ #if (isinstance(value, list)):
+ #value = value[0]
+
+ #self.driver.AddSliceTag(slicename, slice_tag, value, node_name)
+
+ #self.driver.DeleteSliceFromNodes(slicename, deleted_nodes)
+ #finally:
+ #if peer:
+ #self.driver.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
+
+ #return 1
+
# There should be a gid file in /etc/sfa/trusted_roots for every
# peer registry found in in the registries.xml config file. If there
# are any missing gids, request a new one from the peer registry.
+ print>>sys.stderr, " \r\n \r\n \t=============================================== install_peer_certs server_key_file %s server_cert_file %s"%(server_key_file,server_cert_file)
api = SfaApi(key_file = server_key_file, cert_file = server_cert_file)
registries = Registries()
aggregates = Aggregates()
peer_gids = []
if not new_hrns:
return
-
+ print>>sys.stderr," \r\n \r\n \t=============================================== install_peer_certs interfaces %s api.config.SFA_INTERFACE_HRN %s new_hrns %s" %( interfaces,api.config.SFA_INTERFACE_HRN,new_hrns)
trusted_certs_dir = api.config.get_trustedroots_dir()
- for new_hrn in new_hrns:
+ for new_hrn in new_hrns:
if not new_hrn: continue
# the gid for this interface should already be installed
if new_hrn == api.config.SFA_INTERFACE_HRN: continue
url = interfaces[new_hrn].get_url()
interface = interfaces[new_hrn].server_proxy(server_key_file, server_cert_file, timeout=30)
# skip non sfa aggregates
+ print>>sys.stderr, " \r\n \r\n \t=============================================== install_peer_certs IIIinterface %s url %s" %(interface,url)
server_version = api.get_cached_server_version(interface)
+ print>>sys.stderr, " \r\n \r\n \t=============================================== install_peer_certs server_version %s \r\n \r\rn \t\t =============================================== server_version['sfa'] %s, " %(server_version, server_version['sfa'])
if 'sfa' not in server_version:
logger.info("get_trusted_certs: skipping non sfa aggregate: %s" % new_hrn)
continue
-
trusted_gids = ReturnValue.get_value(interface.get_trusted_certs())
if trusted_gids:
# the gid we want should be the first one in the list,
# but lets make sure
- for trusted_gid in trusted_gids:
# default message
message = "interface: %s\t" % (api.interface)
message += "unable to install trusted gid for %s" % \
(new_hrn)
gid = GID(string=trusted_gid)
+ print>>sys.stderr, " \r\n \r\n \t=============================================== install_peer_certs gid %s " %(gid)
peer_gids.append(gid)
if gid.get_hrn() == new_hrn:
gid_filename = os.path.join(trusted_certs_dir, '%s.gid' % new_hrn)
hierarchy = Hierarchy()
auth_info = hierarchy.get_interface_auth_info()
server_key_file = auth_info.get_privkey_filename()
- server_cert_file = auth_info.get_gid_filename()
-
+ server_cert_file = auth_info.get_gid_filename()
+ print>>sys.stderr, " \r\n \t\t\t\t\t SFA-START MAIN auth_info %s server_key_file %s server_cert_file %s "%(auth_info, server_key_file,server_cert_file)
# ensure interface cert is present in trusted roots dir
trusted_roots = TrustedRoots(config.get_trustedroots_dir())
trusted_roots.add_gid(GID(filename=server_cert_file))
valid = []
if not isinstance(creds, list):
creds = [creds]
+ #print>>sys.stderr, "\r\n \r\n \t AUTH.PY checkCredentials hrn %s" %(hrn)
logger.debug("Auth.checkCredentials with %d creds"%len(creds))
for cred in creds:
try:
self.client_cred = Credential(string = cred)
self.client_gid = self.client_cred.get_gid_caller()
self.object_gid = self.client_cred.get_gid_object()
-
+ #print>>sys.stderr, " \r\n \r\n \t AUTH.PY check client_gid %s hrn %s object_gid %s" %(self.client_gid.get_hrn(),hrn, self.object_gid.get_hrn())
# make sure the client_gid is not blank
if not self.client_gid:
raise MissingCallerGID(self.client_cred.get_subject())
self.verifyPeerCert(self.peer_cert, self.client_gid)
# make sure the client is allowed to perform the operation
- if operation:
+ if operation:
+ #print>>sys.stderr, " \r\n \r\n \t AUTH.PY check operation %s trusted_cert_list %s " %(operation,self.trusted_cert_list)
if not self.client_cred.can_perform(operation):
+ #print>>sys.stderr, " \r\n \r\n \t AUTH.PY InsufficientRights(operation)"
raise InsufficientRights(operation)
if self.trusted_cert_list:
self.client_cred.verify(self.trusted_cert_file_list, self.config.SFA_CREDENTIAL_SCHEMA)
+ #print>>sys.stderr, " \r\n \r\n \t AUTH.PY check trusted_cert_file_list %s self.config.SFA_CREDENTIAL_SCHEMA %s" %(self.trusted_cert_file_list, self.config.SFA_CREDENTIAL_SCHEMA)
+
else:
raise MissingTrustedRoots(self.config.get_trustedroots_dir())
# Make sure the credential's target matches the specified hrn.
# This check does not apply to trusted peers
trusted_peers = [gid.get_hrn() for gid in self.trusted_cert_list]
+ #print>>sys.stderr, " \r\n \r\n \t AUTH.PY check trusted_peers ", trusted_peers
if hrn and self.client_gid.get_hrn() not in trusted_peers:
+
target_hrn = self.object_gid.get_hrn()
if not hrn == target_hrn:
raise PermissionError("Target hrn: %s doesn't match specified hrn: %s " % \
@param name human readable name to test
"""
object_hrn = self.object_gid.get_hrn()
- if object_hrn == name:
- return
- if name.startswith(object_hrn + "."):
+ #strname = str(name).strip("['']")
+ if object_hrn == name:
+ #if object_hrn == strname:
+ return
+ if name.startswith(object_hrn + ".") :
+ #if strname.startswith((object_hrn + ".")) is True:
return
#if name.startswith(get_authority(name)):
#return
-
+
raise PermissionError(name)
def determine_user_rights(self, caller_hrn, record):
# Credentials are signed XML files that assign a subject gid privileges to an object gid
##
-import os
+import os,sys
from types import StringTypes
import datetime
from StringIO import StringIO
def get_refid(self):
+ #print>>sys.stderr," \r\n \r\n credential.py Signature get_refid\ self.refid %s " %(self.refid)
if not self.refid:
self.decode()
+ #print>>sys.stderr," \r\n \r\n credential.py Signature get_refid self.refid %s " %(self.refid)
return self.refid
def get_xml(self):
def updateRefID(self):
if not self.parent:
- self.set_refid('ref0')
+ self.set_refid('ref0')
+ #print>>sys.stderr, " \r\n \r\n updateRefID next_cred ref0 "
return []
refs = []
next_cred = self.parent
+
while next_cred:
+
refs.append(next_cred.get_refid())
if next_cred.parent:
next_cred = next_cred.parent
+ #print>>sys.stderr, " \r\n \r\n updateRefID next_cred "
else:
next_cred = None
+ #print>>sys.stderr, " \r\n \r\n updateRefID next_cred NONE"
# Find a unique refid for this credential
# Failures here include unreadable files
# or non PEM files
trusted_cert_objects.append(GID(filename=f))
+ #print>>sys.stderr, " \r\n \t\t\t credential.py verify trusted_certs %s" %(GID(filename=f).get_hrn())
ok_trusted_certs.append(f)
except Exception, exc:
logger.error("Failed to load trusted cert from %s: %r", f, exc)
trusted_certs = ok_trusted_certs
+ #print>>sys.stderr, " \r\n \t\t\t credential.py verify trusted_certs elemnebts %s" %(len(trusted_certs))
# Use legacy verification if this is a legacy credential
if self.legacy:
# Verify the gids of this cred and of its parents
for cur_cred in self.get_credential_list():
cur_cred.get_gid_object().verify_chain(trusted_cert_objects)
- cur_cred.get_gid_caller().verify_chain(trusted_cert_objects)
+ cur_cred.get_gid_caller().verify_chain(trusted_cert_objects)
+ #print>>sys.stderr, " \r\n \t\t\t credential.py verify cur_cred get_gid_object hrn %s get_gid_caller %s" %(cur_cred.get_gid_object().get_hrn(),cur_cred.get_gid_caller().get_hrn())
refs = []
refs.append("Sig_%s" % self.get_refid())
parentRefs = self.updateRefID()
for ref in parentRefs:
refs.append("Sig_%s" % ref)
-
+ #print>>sys.stderr, " \r\n \t\t\t credential.py verify trusted_certs refs", ref
for ref in refs:
# If caller explicitly passed in None that means skip xmlsec1 validation.
# Strange and not typical
# (self.xmlsec_path, ref, cert_args, filename)
verified = os.popen('%s --verify --node-id "%s" %s %s 2>&1' \
% (self.xmlsec_path, ref, cert_args, filename)).read()
+ #print>>sys.stderr, " \r\n \t\t\t credential.py verify filename %s verified %s " %(filename,verified)
if not verified.strip().startswith("OK"):
# xmlsec errors have a msg= which is the interesting bit.
mstart = verified.find("msg=")
msg = verified[mstart:mend]
raise CredentialNotVerifiable("xmlsec1 error verifying cred %s using Signature ID %s: %s %s" % (self.get_summary_tostring(), ref, msg, verified.strip()))
os.remove(filename)
-
+
+ #print>>sys.stderr, " \r\n \t\t\t credential.py HUMMM parents %s", self.parent
# Verify the parents (delegation)
if self.parent:
self.verify_parent(self.parent)
-
+ #print>>sys.stderr, " \r\n \t\t\t credential.py verify trusted_certs parents"
# Make sure the issuer is the target's authority, and is
# itself a valid GID
self.verify_issuer(trusted_cert_objects)
# . The expiry time on the child must be no later than the parent
# . The signer of the child must be the owner of the parent
def verify_parent(self, parent_cred):
+ #print>>sys.stderr, " \r\n\r\n \t verify_parent parent_cred.get_gid_caller().save_to_string(False) %s self.get_signature().get_issuer_gid().save_to_string(False) %s" %(parent_cred.get_gid_caller().get_hrn(),self.get_signature().get_issuer_gid().get_hrn())
# make sure the rights given to the child are a subset of the
# parents rights (and check delegate bits)
if not parent_cred.get_privileges().is_superset(self.get_privileges()):
# sfa should not depend on sfatables
# if the sfatables.runtime import fails, just define run_sfatables as identity
-
+import sys
try:
from sfatables.runtime import SFATablesRules
"""
if not context_callback:
context_callback = fetch_context
-
+
chain = chain.upper()
rules = SFATablesRules(chain)
+ print>>sys.stderr, " \r\n \r\n \t\t \t sfaTablesRuntime.py run_sfatables context_callback %s chain %s rules %s " %(context_callback,chain, rules )
if rules.sorted_rule_list:
contexts = rules.contexts
request_context = context_callback(hrn, origin_hrn, contexts)
#----------------------------------------------------------------------
import re
-
+import sys
from sfa.util.faults import SfaAPIError
# for convenience and smoother translation - we should get rid of these functions eventually
# provide either urn, or (hrn + type)
def __init__ (self, xrn, type=None):
if not xrn: xrn = ""
+
# user has specified xrn : guess if urn or hrn
if xrn.startswith(Xrn.URN_PREFIX):
self.hrn=None
self.urn=xrn
self.urn_to_hrn()
+ #print>>sys.stderr, " \r\n \r\n \t XRN.PY init xrn.startswith(Xrn.URN_PREFIX) hrn %s urn %s type %s" %( self.hrn, self.urn, self.type)
else:
self.urn=None
self.hrn=xrn
self.type=type
self.hrn_to_urn()
+ #print>>sys.stderr, " \r\n \r\n \t XRN.PY init ELSE hrn %s urn %s type %s" %( self.hrn, self.urn, self.type)
# happens all the time ..
# if not type:
# debug_logger.debug("type-less Xrn's are not safe")
def get_hrn_type(self): return (self.hrn, self.type)
def _normalize(self):
+ #print>>sys.stderr, " \r\n \r\n \t XRN.PY _normalize self.hrn %s ",self.hrn
if self.hrn is None: raise SfaAPIError, "Xrn._normalize"
if not hasattr(self,'leaf'):
self.leaf=Xrn.hrn_split(self.hrn)[-1]
# self.authority keeps a list
if not hasattr(self,'authority'):
self.authority=Xrn.hrn_auth_list(self.hrn)
-
+ #print>>sys.stderr, " \r\n \r\n \t XRN.PY _normalize self.hrn %s leaf %s authority %s"%(self.hrn, self.leaf, self.authority)
+
+
def get_leaf(self):
self._normalize()
return self.leaf
#!/usr/bin/python
+# just checking write access on repo
import sys
import unittest