'sfa/generic',
'sfa/managers',
'sfa/importer',
+
+
+
+ 'sfa/senslab',
+
+
+
+
+
'sfa/rspecs',
'sfa/rspecs/elements',
'sfa/rspecs/elements/versions',
rspec.filter({'component_manager_id': server_version['urn']})
rspec = RSpecConverter.to_pg_rspec(rspec.toxml(), content_type='request')
else:
+ print >>sys.stderr, "\r\n \r\n \r\n WOOOOOO"
users = sfa_users_arg(user_records, slice_record)
# do not append users, keys, or slice tags. Anything
--- /dev/null
+from sfa.generic import Generic
+
+import sfa.server.sfaapi
+
+
+
+class slab (Generic):
+
+ # use the standard api class
+ def api_class (self):
+ return sfa.server.sfaapi.SfaApi
+
+ # the importer class
+ def importer_class (self):
+ import sfa.importer.slabimporter
+ return sfa.importer.slabimporter.SlabImporter
+
+ # the manager classes for the server-side services
+ def registry_manager_class (self) :
+ import sfa.managers.registry_manager
+ return sfa.managers.registry_manager.RegistryManager
+
+ def slicemgr_manager_class (self) :
+ import sfa.managers.slice_manager
+ return sfa.managers.slice_manager.SliceManager
+
+ def aggregate_manager_class (self) :
+ import sfa.managers.aggregate_manager
+ return sfa.managers.aggregate_manager.AggregateManager
+
+ # driver class for server-side services, talk to the whole testbed
+ def driver_class (self):
+ import sfa.senslab.slabdriver
+ return sfa.senslab.slabdriver.SlabDriver
+
+ # slab does not have a component manager yet
+ # manager class
+ def component_manager_class (self):
+ return None
+ # driver_class
+ def component_driver_class (self):
+ return None
+
+
--- /dev/null
+import sys
+
+from sfa.util.config import Config
+from sfa.util.xrn import Xrn, get_authority, hrn_to_urn
+
+from sfa.senslab.slabdriver import SlabDriver
+from sfa.senslab.slabpostgres import SliceSenslab, slab_dbsession
+
+from sfa.trust.certificate import Keypair,convert_public_key
+from sfa.trust.gid import create_uuid
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegAuthority, RegSlice, RegNode, \
+ RegUser, RegKey
+from sfa.util.sfalogging import logger
+
+from sqlalchemy.exc import SQLAlchemyError
+
+
+def _get_site_hrn(site):
+ hrn = site['name']
+ return hrn
+
+class SlabImporter:
+
+ def __init__ (self, auth_hierarchy, loc_logger):
+ self.auth_hierarchy = auth_hierarchy
+ self.logger = loc_logger
+ self.logger.setLevelDebug()
+
+ def hostname_to_hrn_escaped(self, root_auth, hostname):
+ return '.'.join( [root_auth,Xrn.escape(hostname)] )
+
+
+
+ def slicename_to_hrn(self, person_hrn):
+ return (person_hrn +'_slice')
+
+ def add_options (self, parser):
+ # we don't have any options for now
+ pass
+
+ def find_record_by_type_hrn(self,type,hrn):
+ return self.records_by_type_hrn.get ( (type, hrn), None)
+
+ def locate_by_type_pointer (self, type, pointer):
+ print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES locate_by_type_pointer .........................."
+ ret = self.records_by_type_pointer.get ( (type, pointer), None)
+ print>>sys.stderr, " \r\n \r\n \t SLABPOSTGRES locate_by_type_pointer "
+ return ret
+
+ def update_just_added_records_dict (self, record):
+ tuple = (record.type, record.hrn)
+ if tuple in self.records_by_type_hrn:
+ self.logger.warning ("SlabImporter.update_just_added_records_dict: duplicate (%s,%s)"%tuple)
+ return
+ self.records_by_type_hrn [ tuple ] = record
+
+ def run (self, options):
+ config = Config()
+
+ slabdriver = SlabDriver(config)
+
+ #Create special slice table for senslab
+
+ if not slabdriver.db.exists('slice_senslab'):
+ slabdriver.db.createtable()
+ self.logger.info ("SlabImporter.run: slice_senslab table created ")
+
+ #retrieve all existing SFA objects
+ all_records = dbsession.query(RegRecord).all()
+
+ #create hash by (type,hrn)
+ #used to know if a given record is already known to SFA
+
+ self.records_by_type_hrn = \
+ dict ( [ ( (record.type,record.hrn) , record ) for record in all_records ] )
+
+ # create hash by (type,pointer)
+ self.records_by_type_pointer = \
+ dict ( [ ( (str(record.type),record.pointer) , record ) for record in all_records if record.pointer != -1] )
+
+ # initialize record.stale to True by default, then mark stale=False on the ones that are in use
+ for record in all_records:
+ record.stale=True
+
+ nodes_listdict = slabdriver.GetNodes()
+ nodes_by_id = dict([(node['node_id'],node) for node in nodes_listdict])
+ sites_listdict = slabdriver.GetSites()
+
+ ldap_person_listdict = slabdriver.GetPersons()
+ slices_listdict = slabdriver.GetSlices()
+ try:
+ slices_by_userid = dict ( [ (one_slice['record_id_user'], one_slice ) for one_slice in slices_listdict ] )
+ except TypeError:
+ self.logger.log_exc("SlabImporter: failed to create list of slices by user id.")
+ pass
+
+ for site in sites_listdict:
+ site_hrn = _get_site_hrn(site)
+ site_record = self.find_record_by_type_hrn ('authority', site_hrn)
+ if not site_record:
+ try:
+ urn = hrn_to_urn(site_hrn, 'authority')
+ if not self.auth_hierarchy.auth_exists(urn):
+ self.auth_hierarchy.create_auth(urn)
+ auth_info = self.auth_hierarchy.get_auth_info(urn)
+ site_record = RegAuthority(hrn=site_hrn, gid=auth_info.get_gid_object(),
+ pointer='-1',
+ authority=get_authority(site_hrn))
+ site_record.just_created()
+ dbsession.add(site_record)
+ dbsession.commit()
+ self.logger.info("SlabImporter: imported authority (site) : %s" % site_record)
+ self.update_just_added_records_dict(site_record)
+ except SQLAlchemyError:
+ # if the site import fails then there is no point in trying to import the
+ # site's child records (node, slices, persons), so skip them.
+ self.logger.log_exc("SlabImporter: failed to import site. Skipping child records")
+ continue
+ else:
+ # xxx update the record ...
+ pass
+ site_record.stale=False
+
+ # import node records in site
+ for node_id in site['node_ids']:
+ try:
+ node = nodes_by_id[node_id]
+ except:
+ self.logger.warning ("SlabImporter: cannot find node_id %s - ignored"%node_id)
+ continue
+ site_auth = get_authority(site_hrn)
+ site_name = site['name']
+ escaped_hrn = self.hostname_to_hrn_escaped(slabdriver.root_auth, node['hostname'])
+ print>>sys.stderr, "\r\n \r\n SLABIMPORTER node %s " %(node)
+ hrn = node['hrn']
+
+
+ # xxx this sounds suspicious
+ if len(hrn) > 64: hrn = hrn[:64]
+ node_record = self.find_record_by_type_hrn( 'node', hrn )
+ if not node_record:
+ try:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(escaped_hrn, 'node')
+ node_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ def slab_get_authority(hrn):
+ return hrn.split(".")[0]
+
+ node_record = RegNode (hrn=hrn, gid=node_gid,
+ pointer = '-1',
+ authority=slab_get_authority(hrn))
+ node_record.just_created()
+ dbsession.add(node_record)
+ dbsession.commit()
+ self.logger.info("SlabImporter: imported node: %s" % node_record)
+ self.update_just_added_records_dict(node_record)
+ except:
+ self.logger.log_exc("SlabImporter: failed to import node")
+ else:
+ # xxx update the record ...
+ pass
+ node_record.stale=False
+
+
+ # import persons
+ for person in ldap_person_listdict :
+ if 'ssh-rsa' not in person['pkey']:
+ continue
+ person_hrn = person['hrn']
+ slice_hrn = self.slicename_to_hrn(person['hrn'])
+
+ # xxx suspicious again
+ if len(person_hrn) > 64: person_hrn = person_hrn[:64]
+ person_urn = hrn_to_urn(person_hrn, 'user')
+
+ user_record = self.find_record_by_type_hrn('user', person_hrn)
+ slice_record = self.find_record_by_type_hrn ('slice', slice_hrn)
+
+ # return a tuple pubkey (a plc key object) and pkey (a Keypair object)
+ def init_person_key (person, slab_key):
+ pubkey = None
+ if person['pkey']:
+ # randomly pick first key in set
+ pubkey = slab_key
+
+ try:
+ pkey = convert_public_key(pubkey)
+ except TypeError:
+ #key not good. create another pkey
+ self.logger.warn('SlabImporter: \
+ unable to convert public \
+ key for %s' % person_hrn)
+ pkey = Keypair(create=True)
+
+ else:
+ # the user has no keys. Creating a random keypair for the user's gid
+ self.logger.warn("SlabImporter: person %s does not have a PL public key"%person_hrn)
+ pkey = Keypair(create=True)
+ return (pubkey, pkey)
+
+
+ try:
+ slab_key = person['pkey']
+ # new person
+ if not user_record:
+ (pubkey,pkey) = init_person_key (person, slab_key )
+ if pubkey is not None and pkey is not None :
+ person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey)
+ if person['email']:
+ print>>sys.stderr, "\r\n \r\n SLAB IMPORTER PERSON EMAIL OK email %s " %(person['email'])
+ person_gid.set_email(person['email'])
+ user_record = RegUser (hrn=person_hrn, gid=person_gid,
+ pointer='-1',
+ authority=get_authority(person_hrn),
+ email=person['email'])
+ else:
+ user_record = RegUser (hrn=person_hrn, gid=person_gid,
+ pointer='-1',
+ authority=get_authority(person_hrn))
+
+ if pubkey:
+ user_record.reg_keys = [RegKey (pubkey)]
+ else:
+ self.logger.warning("No key found for user %s"%user_record)
+ user_record.just_created()
+ dbsession.add (user_record)
+ dbsession.commit()
+ self.logger.info("SlabImporter: imported person: %s" % user_record)
+ print>>sys.stderr, "\r\n \r\n SLAB IMPORTER PERSON IMPORT NOTuser_record %s " %(user_record)
+ self.update_just_added_records_dict( user_record )
+ else:
+ # update the record ?
+ # if user's primary key has changed then we need to update the
+ # users gid by forcing an update here
+ sfa_keys = user_record.reg_keys
+
+ new_key=False
+ if slab_key is not sfa_keys :
+ new_key = True
+ if new_key:
+ (pubkey,pkey) = init_person_key (person, slab_key)
+ person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey)
+ if not pubkey:
+ user_record.reg_keys=[]
+ else:
+ user_record.reg_keys=[ RegKey (pubkey)]
+ self.logger.info("SlabImporter: updated person: %s" % user_record)
+ if person['email']:
+ user_record.email = person['email']
+ dbsession.commit()
+ user_record.stale=False
+ except:
+ self.logger.log_exc("SlabImporter: failed to import person %s"%(person) )
+
+ try:
+ slice = slices_by_userid[user_record.record_id]
+ except:
+ self.logger.warning ("SlabImporter: cannot locate slices_by_userid[user_record.record_id] %s - ignored"%user_record)
+
+ if not slice_record :
+ try:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(slice_hrn, 'slice')
+ slice_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ slice_record = RegSlice (hrn=slice_hrn, gid=slice_gid,
+ pointer='-1',
+ authority=get_authority(slice_hrn))
+
+ slice_record.just_created()
+ dbsession.add(slice_record)
+ dbsession.commit()
+
+ #Serial id created after commit
+ #Get it
+ sl_rec = dbsession.query(RegSlice).filter(RegSlice.hrn.match(slice_hrn)).all()
+
+ slab_slice = SliceSenslab( slice_hrn = slice_hrn, record_id_slice=sl_rec[0].record_id, record_id_user= user_record.record_id)
+ print>>sys.stderr, "\r\n \r\n SLAB IMPORTER SLICE IMPORT NOTslice_record %s \r\n slab_slice %s" %(sl_rec,slab_slice)
+ slab_dbsession.add(slab_slice)
+ slab_dbsession.commit()
+ self.logger.info("SlabImporter: imported slice: %s" % slice_record)
+ self.update_just_added_records_dict ( slice_record )
+
+ except:
+ self.logger.log_exc("SlabImporter: failed to import slice")
+
+ #No slice update upon import in senslab
+ else:
+ # xxx update the record ...
+ self.logger.warning ("Slice update not yet implemented")
+ pass
+ # record current users affiliated with the slice
+
+
+ slice_record.reg_researchers = [user_record]
+ dbsession.commit()
+ slice_record.stale=False
+
+
+
+ ### remove stale records
+ # special records must be preserved
+ system_hrns = [slabdriver.hrn, slabdriver.root_auth, slabdriver.hrn+ '.slicemanager']
+ for record in all_records:
+ if record.hrn in system_hrns:
+ record.stale=False
+ if record.peer_authority:
+ record.stale=False
+
+
+ for record in all_records:
+ try:
+ stale=record.stale
+ except:
+ stale=True
+ self.logger.warning("stale not found with %s"%record)
+ if stale:
+ self.logger.info("SlabImporter: deleting stale record: %s" % record)
+ dbsession.delete(record)
+ dbsession.commit()
+
+
+
# an attempt to document what a driver class should provide,
# and implement reasonable defaults
#
-
+import sys
class Driver:
def __init__ (self, config):
# to perform such a core operation (i.e. getting rights right)
# this is no longer in use when performing other SFA operations
def augment_records_with_testbed_info (self, sfa_records):
+ print >>sys.stderr, " \r\n \r\n DRIVER.PY augment_records_with_testbed_info sfa_records ",sfa_records
return sfa_records
# incoming record, as provided by the client to the Register API call
local_records = dbsession.query(RegRecord).filter(RegRecord.hrn.in_(local_hrns))
if type:
local_records = local_records.filter_by(type=type)
- local_records=local_records.all()
+ local_records=local_records.all()
for local_record in local_records:
augment_with_sfa_builtins (local_record)
record.url=neighbour_dict[hrn].get_url()
return
for record in local_records: solve_neighbour_url (record)
-
# convert local record objects to dicts for xmlrpc
# xxx somehow here calling dict(record) issues a weird error
# however record.todict() seems to work fine
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="RSpec"/>
+ </start>
+ <define name="RSpec">
+ <element name="RSpec">
+ <attribute name="type">
+ <data type="NMTOKEN"/>
+ </attribute>
+ <choice>
+ <ref name="network"/>
+ <ref name="request"/>
+ </choice>
+ </element>
+ </define>
+ <define name="network">
+ <element name="network">
+ <attribute name="name">
+ <data type="NMTOKEN"/>
+ </attribute>
+ <optional>
+ <attribute name="slice">
+ <data type="NMTOKEN"/>
+ </attribute>
+ </optional>
+ <optional>
+ <ref name="sliver_defaults"/>
+ </optional>
+ <oneOrMore>
+ <ref name="site"/>
+ </oneOrMore>
+ </element>
+ </define>
+ <define name="sliver_defaults">
+ <element name="sliver_defaults">
+ <ref name="sliver_elements"/>
+ </element>
+ </define>
+ <define name="site">
+ <element name="site">
+ <attribute name="id">
+ <data type="ID"/>
+ </attribute>
+ <element name="name">
+ <text/>
+ </element>
+ <zeroOrMore>
+ <ref name="node"/>
+ </zeroOrMore>
+ </element>
+ </define>
+ <define name="node">
+ <element name="node">
+ <attribute name="node_id">
+ <data type="ID"/>
+ </attribute>
+ <element name="hostname">
+ <text/>
+ </element>
+ <attribute name="reservable">
+ <data type="boolean"/>
+ </attribute>
+ <element name="ip_address">
+ <text/>
+ </element>
+ <optional>
+ <element name="urn">
+ <text/>
+ </element>
+ </optional>
+ <optional>
+ <ref name="leases"/>
+ </optional>
+ <optional>
+ <ref name="sliver"/>
+ </optional>
+ </element>
+ </define>
+ <define name="request">
+ <element name="request">
+ <attribute name="name">
+ <data type="NMTOKEN"/>
+ </attribute>
+ <optional>
+ <ref name="sliver_defaults"/>
+ </optional>
+ <oneOrMore>
+ <ref name="sliver"/>
+ </oneOrMore>
+ </element>
+ </define>
+ <define name="sliver">
+ <element name="sliver">
+ <optional>
+ <attribute name="nodeid">
+ <data type="ID"/>
+ </attribute>
+ </optional>
+ <ref name="sliver_elements"/>
+ </element>
+ </define>
+ <define name="sliver_elements">
+ <interleave>
+ <optional>
+ <element name="capabilities">
+ <text/>
+ </element>
+ </optional>
+ <optional>
+ <element name="delegations">
+ <text/>
+ </element>
+ </optional>
+ <optional>
+ <element name="program">
+ <text/>
+ </element>
+ </optional>
+ </interleave>
+ </define>
+ <define name="leases">
+ <element name="leases">
+ <zeroOrMore>
+ <group>
+ <attribute name="slot"/>
+ <data type="dateTime"/>
+ </attribute>
+ <attribute name="slice">
+ <data type="NMTOKEN"/>
+ </attribute>
+ </group>
+ </zeroOrMore>
+</grammar>
from sfa.util.xrn import urn_to_hrn
from sfa.util.method import Method
from sfa.util.sfatablesRuntime import run_sfatables
+import sys
from sfa.trust.credential import Credential
from sfa.storage.parameter import Parameter, Mixed
from sfa.rspecs.rspec import RSpec
hrn, type = urn_to_hrn(slice_xrn)
self.api.logger.info("interface: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, hrn, self.name))
-
# Find the valid credentials
valid_creds = self.api.auth.checkCredentials(creds, 'createsliver', hrn)
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
import zlib
-
+import sys
from sfa.util.xrn import urn_to_hrn
from sfa.util.method import Method
from sfa.util.sfatablesRuntime import run_sfatables
# get slice's hrn from options
xrn = options.get('geni_slice_urn', '')
(hrn, _) = urn_to_hrn(xrn)
-
+ print >>sys.stderr, " \r\n \r\n \t Lsitresources.pyeuuuuuu call : hrn %s options %s" %( hrn,options )
# Find the valid credentials
valid_creds = self.api.auth.checkCredentials(creds, 'listnodes', hrn)
# get hrn of the original caller
origin_hrn = options.get('origin_hrn', None)
+ print >>sys.stderr, " \r\n \r\n \t Lsitresources :origin_hrn %s sansvqalid credss %s " %(origin_hrn, Credential(string=creds[0]).get_gid_caller().get_hrn())
if not origin_hrn:
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
+ print >>sys.stderr, " \r\n \r\n \t Lsitresources.py000 call : hrn %s self.api.interface %s origin_hrn %s \r\n \r\n \r\n " %(hrn ,self.api.interface,origin_hrn)
rspec = self.api.manager.ListResources(self.api, creds, options)
# filter rspec through sfatables
chain_name = 'OUTGOING'
elif self.api.interface in ['slicemgr']:
chain_name = 'FORWARD-OUTGOING'
- self.api.logger.debug("ListResources: sfatables on chain %s"%chain_name)
+ self.api.logger.debug("ListResources: sfatables on chain %s"%chain_name)
+ print >>sys.stderr, " \r\n \r\n \t Listresources.py001 call : chain_name %s hrn %s origine_hrn %s " %(chain_name, hrn, origin_hrn)
filtered_rspec = run_sfatables(chain_name, hrn, origin_hrn, rspec)
if options.has_key('geni_compressed') and options['geni_compressed'] == True:
from types import StringTypes
from collections import defaultdict
+import sys
from sfa.util.sfatime import utcparse, datetime_to_epoch
from sfa.util.sfalogging import logger
# slice belongs to out local plc or a myplc peer. We will assume it
# is a local site, unless we find out otherwise
peer = None
-
# get this slice's authority (site)
slice_authority = get_authority(hrn)
# get this site's authority (sfa root authority or sub authority)
site_authority = get_authority(slice_authority).lower()
-
# check if we are already peered with this site_authority, if so
peers = self.driver.shell.GetPeers({}, ['peer_id', 'peername', 'shortname', 'hrn_root'])
for peer_record in peers:
@staticmethod
def add_leases(xml, leases):
-
network_elems = xml.xpath('//network')
if len(network_elems) > 0:
network_elem = network_elems[0]
--- /dev/null
+from sfa.util.sfalogging import logger
+from sfa.util.xml import XpathFilter
+from sfa.util.xrn import Xrn
+
+
+
+#from sfa.rspecs.elements.versions.sfav1PLTag import SFAv1PLTag
+#from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
+from sfa.rspecs.elements.lease import Lease
+
+
+
+class Slabv1Lease:
+
+ @staticmethod
+ def add_leases(xml, leases):
+
+ network_elems = xml.xpath('//network')
+ if len(network_elems) > 0:
+ network_elem = network_elems[0]
+ elif len(leases) > 0:
+ network_urn = Xrn(leases[0]['component_id']).get_authority_urn().split(':')[0]
+ network_elem = xml.add_element('network', name = network_urn)
+ else:
+ network_elem = xml
+
+ lease_elems = []
+ for lease in leases:
+ lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
+ lease_elem = network_elem.add_instance('lease', lease, lease_fields)
+ lease_elems.append(lease_elem)
+
+
+ @staticmethod
+ def get_leases(xml, filter={}):
+ xpath = '//lease%s | //default:lease%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
+ lease_elems = xml.xpath(xpath)
+ return Slabv1Lease.get_lease_objs(lease_elems)
+
+ @staticmethod
+ def get_lease_objs(lease_elems):
+ leases = []
+ for lease_elem in lease_elems:
+ lease = Lease(lease_elem.attrib, lease_elem)
+ if lease.get('lease_id'):
+ lease['lease_id'] = lease_elem.attrib['lease_id']
+ lease['component_id'] = lease_elem.attrib['component_id']
+ lease['slice_id'] = lease_elem.attrib['slice_id']
+ lease['start_time'] = lease_elem.attrib['start_time']
+ lease['duration'] = lease_elem.attrib['duration']
+
+ leases.append(lease)
+ return leases
\ No newline at end of file
--- /dev/null
+
+from sfa.util.xrn import Xrn
+from sfa.util.xml import XpathFilter
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.versions.slabv1Sliver import Slabv1Sliver
+from sfa.util.sfalogging import logger
+
+class SlabNode(Node):
+ #First get the fields already defined in the class Node
+ fields = list(Node.fields)
+ #Extend it with senslab's specific fields
+ fields.extend (['archi', 'radio', 'mobile','position'])
+
+
+class SlabPosition(Element):
+ fields = ['posx', 'posy','posz']
+
+
+
+class Slabv1Node:
+
+ @staticmethod
+ def add_connection_information(xml, ldap_username):
+ """ Adds login and ssh connection info in the network item in
+ the xml. Does not create the network element, therefore
+ should be used after add_nodes, which creates the network item.
+
+ """
+ logger.debug(" add_connection_information xml %s" %(xml))
+ #Get network item in the xml
+ network_elems = xml.xpath('//network')
+ if len(network_elems) > 0:
+ network_elem = network_elems[0]
+
+ slab_network_dict = {}
+ slab_network_dict['login'] = ldap_username
+ slab_network_dict['vm'] = 'ssh ' + ldap_username + \
+ '@grenoble.senslab.info'
+ network_elem.set('vm', unicode(slab_network_dict['vm']))
+ network_elem.set('login', unicode( slab_network_dict['login']))
+
+
+ @staticmethod
+ def add_nodes(xml, nodes):
+ #Add network item in the xml
+ network_elems = xml.xpath('//network')
+ if len(network_elems) > 0:
+ network_elem = network_elems[0]
+ elif len(nodes) > 0 and nodes[0].get('component_manager_id'):
+ network_urn = nodes[0]['component_manager_id']
+ network_elem = xml.add_element('network', \
+ name = Xrn(network_urn).get_hrn())
+ else:
+ network_elem = xml
+
+ logger.debug("slabv1Node \t add_nodes nodes %s \r\n "%(nodes))
+ node_elems = []
+ #Then add nodes items to the network item in the xml
+ for node in nodes:
+ #Attach this node to the network element
+ node_fields = ['component_manager_id', 'component_id', 'exclusive',\
+ 'boot_state', 'mobile']
+ node_elem = network_elem.add_instance('node', node, node_fields)
+ node_elems.append(node_elem)
+
+ #Set the attibutes of this node element
+ for attribute in node:
+ # set component name
+ if attribute is 'component_id':
+ component_name = node['component_name']
+ node_elem.set('component_name', component_name)
+
+ # set hardware types, extend fields to add Senslab's architecture
+ #and radio type
+
+ if attribute is 'hardware_types':
+ for hardware_type in node.get('hardware_types', []):
+ fields = HardwareType.fields
+ fields.extend(['archi','radio'])
+ node_elem.add_instance('hardware_types', node, fields)
+
+ # set location
+ if attribute is 'location':
+ node_elem.add_instance('location', node['location'], \
+ Location.fields)
+ # add granularity of the reservation system
+ #TODO put the granularity in network instead SA 18/07/12
+ if attribute is 'granularity' :
+ granularity = node['granularity']
+ if granularity:
+ node_elem.add_instance('granularity', \
+ granularity, granularity.fields)
+
+
+ # set available element
+ if attribute is 'boot_state':
+ if node.get('boot_state').lower() == 'alive':
+ available_elem = node_elem.add_element('available', \
+ now='true')
+ else:
+ available_elem = node_elem.add_element('available', \
+ now='false')
+
+ #set position
+ if attribute is 'position':
+ node_elem.add_instance('position', node['position'], \
+ SlabPosition.fields)
+ ## add services
+ #PGv2Services.add_services(node_elem, node.get('services', []))
+ # add slivers
+ if attribute is 'slivers':
+ slivers = node.get('slivers', [])
+ if not slivers:
+ # we must still advertise the available sliver types
+ slivers = Sliver({'type': 'slab-node'})
+ # we must also advertise the available initscripts
+ #slivers['tags'] = []
+ #if node.get('pl_initscripts'):
+ #for initscript in node.get('pl_initscripts', []):
+ #slivers['tags'].append({'name': 'initscript', \
+ #'value': initscript['name']})
+
+ Slabv1Sliver.add_slivers(node_elem, slivers)
+ return node_elems
+
+
+
+ @staticmethod
+ def get_nodes(xml, filter={}):
+ xpath = '//node%s | //default:node%s' % (XpathFilter.xpath(filter), \
+ XpathFilter.xpath(filter))
+ node_elems = xml.xpath(xpath)
+ return Slabv1Node.get_node_objs(node_elems)
+
+ @staticmethod
+ def get_nodes_with_slivers(xml, sliver_filter={}):
+
+ xpath = '//node[count(sliver)>0] | \
+ //default:node[count(default:sliver) > 0]'
+ node_elems = xml.xpath(xpath)
+ logger.debug("SLABV1NODE \tget_nodes_with_slivers \
+ node_elems %s"%(node_elems))
+ return Slabv1Node.get_node_objs(node_elems)
+
+ @staticmethod
+ def get_node_objs(node_elems):
+ nodes = []
+ for node_elem in node_elems:
+ node = Node(node_elem.attrib, node_elem)
+ nodes.append(node)
+ if 'component_id' in node_elem.attrib:
+ node['authority_id'] = \
+ Xrn(node_elem.attrib['component_id']).get_authority_urn()
+
+ # get hardware types
+ hardware_type_elems = node_elem.xpath('./default:hardware_type | \
+ ./hardware_type')
+ node['hardware_types'] = [hw_type.get_instance(HardwareType) \
+ for hw_type in hardware_type_elems]
+
+ # get location
+ location_elems = node_elem.xpath('./default:location | ./location')
+ locations = [location_elem.get_instance(Location) \
+ for location_elem in location_elems]
+ if len(locations) > 0:
+ node['location'] = locations[0]
+
+ # get interfaces
+ iface_elems = node_elem.xpath('./default:interface | ./interface')
+ node['interfaces'] = [iface_elem.get_instance(Interface) \
+ for iface_elem in iface_elems]
+
+ # get services
+ #node['services'] = PGv2Services.get_services(node_elem)
+
+ # get slivers
+ node['slivers'] = Slabv1Sliver.get_slivers(node_elem)
+ available_elems = node_elem.xpath('./default:available | \
+ ./available')
+ if len(available_elems) > 0 and 'name' in available_elems[0].attrib:
+ if available_elems[0].attrib.get('now', '').lower() == 'true':
+ node['boot_state'] = 'boot'
+ else:
+ node['boot_state'] = 'disabled'
+
+ logger.debug("SLABV1NODE \tget_nodes_objs \
+ #nodes %s"%(nodes))
+ return nodes
+
+
+ @staticmethod
+ def add_slivers(xml, slivers):
+ logger.debug("SLABv1NODE \tadd_slivers ")
+ component_ids = []
+ for sliver in slivers:
+ filter_sliver = {}
+ if isinstance(sliver, str):
+ filter_sliver['component_id'] = '*%s*' % sliver
+ sliver = {}
+ elif 'component_id' in sliver and sliver['component_id']:
+ filter_sliver['component_id'] = '*%s*' % sliver['component_id']
+ if not filter_sliver:
+ continue
+ nodes = Slabv1Node.get_nodes(xml, filter_sliver)
+ if not nodes:
+ continue
+ node = nodes[0]
+ Slabv1Sliver.add_slivers(node, sliver)
+
+ @staticmethod
+ def remove_slivers(xml, hostnames):
+ for hostname in hostnames:
+ nodes = Slabv1Node.get_nodes(xml, \
+ {'component_id': '*%s*' % hostname})
+ for node in nodes:
+ slivers = Slabv1Sliver.get_slivers(node.element)
+ for sliver in slivers:
+ node.element.remove(sliver.element)
+
+
+
--- /dev/null
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.sliver import Sliver
+
+#from sfa.rspecs.elements.versions.pgv2DiskImage import PGv2DiskImage
+import sys
+class Slabv1Sliver:
+
+ @staticmethod
+ def add_slivers(xml, slivers):
+ if not slivers:
+ return
+ if not isinstance(slivers, list):
+ slivers = [slivers]
+ for sliver in slivers:
+ #sliver_elem = xml.add_element('sliver_type')
+ sliver_elem = xml.add_element('sliver')
+ if sliver.get('type'):
+ sliver_elem.set('name', sliver['type'])
+ if sliver.get('client_id'):
+ sliver_elem.set('client_id', sliver['client_id'])
+ #images = sliver.get('disk_images')
+ #if images and isinstance(images, list):
+ #Slabv1DiskImage.add_images(sliver_elem, images)
+ Slabv1Sliver.add_sliver_attributes(sliver_elem, sliver.get('tags', []))
+
+ @staticmethod
+ def add_sliver_attributes(xml, attributes):
+ if attributes:
+ for attribute in attributes:
+ if attribute['name'] == 'initscript':
+ xml.add_element('{%s}initscript' % xml.namespaces['planetlab'], name=attribute['value'])
+ elif tag['tagname'] == 'flack_info':
+ attrib_elem = xml.add_element('{%s}info' % self.namespaces['flack'])
+ attrib_dict = eval(tag['value'])
+ for (key, value) in attrib_dict.items():
+ attrib_elem.set(key, value)
+ @staticmethod
+ def get_slivers(xml, filter={}):
+ xpath = './default:sliver | ./sliver'
+
+ sliver_elems = xml.xpath(xpath)
+ slivers = []
+ for sliver_elem in sliver_elems:
+ sliver = Sliver(sliver_elem.attrib,sliver_elem)
+
+ if 'component_id' in xml.attrib:
+ sliver['component_id'] = xml.attrib['component_id']
+ if 'name' in sliver_elem.attrib:
+ sliver['type'] = sliver_elem.attrib['name']
+ #sliver['images'] = Slabv1DiskImage.get_images(sliver_elem)
+
+ print>>sys.stderr, "\r\n \r\n SLABV1SLIVER.PY \t\t\t get_slivers sliver %s " %( sliver)
+ slivers.append(sliver)
+ return slivers
+
+ @staticmethod
+ def get_sliver_attributes(xml, filter={}):
+ return []
\ No newline at end of file
--- /dev/null
+from sfa.rspecs.sfa_rspec import sfa_rspec_version
+from sfa.rspecs.pg_rspec import pg_rspec_ad_version, pg_rspec_request_version
+
+ad_rspec_versions = [
+ pg_rspec_ad_version,
+ sfa_rspec_version
+ ]
+
+request_rspec_versions = ad_rspec_versions
+
+default_rspec_version = { 'type': 'SFA', 'version': '1' }
+
+supported_rspecs = {'ad_rspec_versions': ad_rspec_versions,
+ 'request_rspec_versions': request_rspec_versions,
+ 'default_ad_rspec': default_rspec_version}
+
--- /dev/null
+from copy import deepcopy
+
+
+from sfa.rspecs.version import RSpecVersion
+import sys
+from sfa.rspecs.elements.versions.slabv1Lease import Slabv1Lease
+from sfa.rspecs.elements.versions.slabv1Node import Slabv1Node
+from sfa.rspecs.elements.versions.slabv1Sliver import Slabv1Sliver
+
+
+from sfa.rspecs.elements.versions.sfav1Lease import SFAv1Lease
+
+from sfa.util.sfalogging import logger
+
+class Slabv1(RSpecVersion):
+ #enabled = True
+ type = 'Slab'
+ content_type = 'ad'
+ version = '1'
+ #template = '<RSpec type="%s"></RSpec>' % type
+
+ schema = 'http://senslab.info/resources/rspec/1/ad.xsd'
+ namespace = 'http://www.geni.net/resources/rspec/3'
+ extensions = {
+ 'flack': "http://www.protogeni.net/resources/rspec/ext/flack/1",
+ 'planetlab': "http://www.planet-lab.org/resources/sfa/ext/planetlab/1",
+ }
+ namespaces = dict(extensions.items() + [('default', namespace)])
+ elements = []
+
+ # Network
+ def get_networks(self):
+ #WARNING Added //default:network to the xpath
+ #otherwise network element not detected 16/07/12 SA
+
+ network_elems = self.xml.xpath('//network | //default:network')
+ networks = [network_elem.get_instance(fields=['name', 'slice']) for \
+ network_elem in network_elems]
+ return networks
+
+
+ def add_network(self, network):
+ network_tags = self.xml.xpath('//network[@name="%s"]' % network)
+ if not network_tags:
+ network_tag = self.xml.add_element('network', name=network)
+ else:
+ network_tag = network_tags[0]
+ return network_tag
+
+
+ # Nodes
+
+ def get_nodes(self, filter=None):
+ return Slabv1Node.get_nodes(self.xml, filter)
+
+ def get_nodes_with_slivers(self):
+ return Slabv1Node.get_nodes_with_slivers(self.xml)
+
+ def get_slice_timeslot(self ):
+ return Slabv1Timeslot.get_slice_timeslot(self.xml)
+
+ def add_connection_information(self, ldap_username):
+ return Slabv1Node.add_connection_information(self.xml,ldap_username)
+
+ def add_nodes(self, nodes, check_for_dupes=False):
+ return Slabv1Node.add_nodes(self.xml,nodes )
+
+ def merge_node(self, source_node_tag, network, no_dupes = False):
+ logger.debug("SLABV1 merge_node")
+ #if no_dupes and self.get_node_element(node['hostname']):
+ ## node already exists
+ #return
+ network_tag = self.add_network(network)
+ network_tag.append(deepcopy(source_node_tag))
+
+ # Slivers
+
+ def get_sliver_attributes(self, hostname, node, network=None):
+ print>>sys.stderr, "\r\n \r\n \r\n \t\t SLABV1.PY get_sliver_attributes hostname %s " %(hostname)
+ nodes = self.get_nodes({'component_id': '*%s*' %hostname})
+ attribs = []
+ print>>sys.stderr, "\r\n \r\n \r\n \t\t SLABV1.PY get_sliver_attributes-----------------nodes %s " %(nodes)
+ if nodes is not None and isinstance(nodes, list) and len(nodes) > 0:
+ node = nodes[0]
+ #if node :
+ #sliver = node.xpath('./default:sliver | ./sliver')
+ #sliver = node.xpath('./default:sliver', namespaces=self.namespaces)
+ sliver = node['slivers']
+
+ if sliver is not None and isinstance(sliver, list) and len(sliver) > 0:
+ sliver = sliver[0]
+ attribs = sliver
+ #attribs = self.attributes_list(sliver)
+ print>>sys.stderr, "\r\n \r\n \r\n \t\t SLABV1.PY get_sliver_attributes----------NN------- sliver %s self.namespaces %s attribs %s " %(sliver, self.namespaces,attribs)
+ return attribs
+
+ def get_slice_attributes(self, network=None):
+
+ slice_attributes = []
+
+ nodes_with_slivers = self.get_nodes_with_slivers()
+
+ # TODO: default sliver attributes in the PG rspec?
+ default_ns_prefix = self.namespaces['default']
+ for node in nodes_with_slivers:
+ sliver_attributes = self.get_sliver_attributes(node['component_id'],node, network)
+ for sliver_attribute in sliver_attributes:
+ name = str(sliver_attribute[0])
+ text = str(sliver_attribute[1])
+ attribs = sliver_attribute[2]
+ # we currently only suppor the <initscript> and <flack> attributes
+ #if 'info' in name:
+ #attribute = {'name': 'flack_info', 'value': str(attribs), 'node_id': node}
+ #slice_attributes.append(attribute)
+ #elif 'initscript' in name:
+ if 'initscript' in name:
+ if attribs is not None and 'name' in attribs:
+ value = attribs['name']
+ else:
+ value = text
+ attribute = {'name': 'initscript', 'value': value, 'node_id': node}
+ slice_attributes.append(attribute)
+
+
+ return slice_attributes
+
+ def attributes_list(self, elem):
+ opts = []
+ if elem is not None:
+ for e in elem:
+ opts.append((e.tag, str(e.text).strip(), e.attrib))
+ return opts
+
+ def get_default_sliver_attributes(self, network=None):
+ return []
+
+ def add_default_sliver_attribute(self, name, value, network=None):
+ pass
+
+ def add_slivers(self, hostnames, attributes=[], sliver_urn=None, append=False):
+ # all nodes hould already be present in the rspec. Remove all
+ # nodes that done have slivers
+ print>>sys.stderr, "\r\n \r\n \r\n \t\t\t SLABv1.PY add_slivers ----->get_node "
+ for hostname in hostnames:
+ node_elems = self.get_nodes({'component_id': '*%s*' % hostname})
+ if not node_elems:
+ continue
+ node_elem = node_elems[0]
+
+ # determine sliver types for this node
+ #TODO : add_slivers valid type of sliver needs to be changed 13/07/12 SA
+ valid_sliver_types = ['slab-node', 'emulab-openvz', 'raw-pc', 'plab-vserver', 'plab-vnode']
+ #valid_sliver_types = ['emulab-openvz', 'raw-pc', 'plab-vserver', 'plab-vnode']
+ requested_sliver_type = None
+ for sliver_type in node_elem.get('slivers', []):
+ if sliver_type.get('type') in valid_sliver_types:
+ requested_sliver_type = sliver_type['type']
+
+ if not requested_sliver_type:
+ continue
+ sliver = {'type': requested_sliver_type,
+ 'pl_tags': attributes}
+ print>>sys.stderr, "\r\n \r\n \r\n \t\t\t SLABv1.PY add_slivers node_elem %s sliver_type %s \r\n \r\n " %(node_elem, sliver_type)
+ # remove available element
+ for available_elem in node_elem.xpath('./default:available | ./available'):
+ node_elem.remove(available_elem)
+
+ # remove interface elements
+ for interface_elem in node_elem.xpath('./default:interface | ./interface'):
+ node_elem.remove(interface_elem)
+
+ # remove existing sliver_type elements
+ for sliver_type in node_elem.get('slivers', []):
+ node_elem.element.remove(sliver_type.element)
+
+ # set the client id
+ node_elem.element.set('client_id', hostname)
+ if sliver_urn:
+ pass
+ # TODO
+ # set the sliver id
+ #slice_id = sliver_info.get('slice_id', -1)
+ #node_id = sliver_info.get('node_id', -1)
+ #sliver_id = urn_to_sliver_id(sliver_urn, slice_id, node_id)
+ #node_elem.set('sliver_id', sliver_id)
+
+ # add the sliver type elemnt
+ Slabv1Sliver.add_slivers(node_elem.element, sliver)
+ #Slabv1SliverType.add_slivers(node_elem.element, sliver)
+
+ # remove all nodes without slivers
+ if not append:
+ for node_elem in self.get_nodes():
+ if not node_elem['client_id']:
+ parent = node_elem.element.getparent()
+ parent.remove(node_elem.element)
+
+ def remove_slivers(self, slivers, network=None, no_dupes=False):
+ Slabv1Node.remove_slivers(self.xml, slivers)
+
+
+ # Utility
+
+ def merge(self, in_rspec):
+ """
+ Merge contents for specified rspec with current rspec
+ """
+
+ if not in_rspec:
+ return
+
+ from sfa.rspecs.rspec import RSpec
+
+ if isinstance(in_rspec, RSpec):
+ rspec = in_rspec
+ else:
+ rspec = RSpec(in_rspec)
+ if rspec.version.type.lower() == 'protogeni':
+ from sfa.rspecs.rspec_converter import RSpecConverter
+ in_rspec = RSpecConverter.to_sfa_rspec(rspec.toxml())
+ rspec = RSpec(in_rspec)
+ # just copy over all networks
+ #Attention special get_networks using //default:network xpath
+ current_networks = self.get_networks()
+ networks = rspec.version.get_networks()
+ for network in networks:
+ current_network = network.get('name')
+ if current_network and current_network not in current_networks:
+ self.xml.append(network.element)
+ current_networks.append(current_network)
+
+
+
+
+
+ # Leases
+
+ def get_leases(self, lease_filter=None):
+ return SFAv1Lease.get_leases(self.xml, lease_filter)
+ #return Slabv1Lease.get_leases(self.xml, lease_filter)
+
+ def add_leases(self, leases, network = None, no_dupes=False):
+ SFAv1Lease.add_leases(self.xml, leases)
+ #Slabv1Lease.add_leases(self.xml, leases)
+
+ def cleanup(self):
+ # remove unncecessary elements, attributes
+ if self.type in ['request', 'manifest']:
+ # remove 'available' element from remaining node elements
+ self.xml.remove_element('//default:available | //available')
+
+
+class Slabv1Ad(Slabv1):
+ enabled = True
+ content_type = 'ad'
+ schema = 'http://senslab.info/resources/rspec/1/ad.xsd'
+ #http://www.geni.net/resources/rspec/3/ad.xsd'
+ template = '<rspec type="advertisement" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://senslab.info/resources/rspec/1" xmlns:flack="http://senslab.info/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://senslab.info/resources/rspec/1 http://senslab.info/resources/rspec/1/ad.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+
+class Slabv1Request(Slabv1):
+ enabled = True
+ content_type = 'request'
+ schema = 'http://senslab.info/resources/rspec/1/request.xsd'
+ #http://www.geni.net/resources/rspec/3/request.xsd
+ template = '<rspec type="request" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://senslab.info/resources/rspec/1" xmlns:flack="http://senslab.info/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://senslab.info/resources/rspec/1 http://senslab.info/resources/rspec/1/request.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+
+class Slabv1Manifest(Slabv1):
+ enabled = True
+ content_type = 'manifest'
+ schema = 'http://senslab.info/resources/rspec/1/manifest.xsd'
+ #http://www.geni.net/resources/rspec/3/manifest.xsd
+ template = '<rspec type="manifest" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://senslab.info/resources/rspec/1" xmlns:flack="http://senslab.info/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://senslab.info/resources/rspec/1 http://senslab.info/resources/rspec/1/manifest.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+
+
+if __name__ == '__main__':
+ from sfa.rspecs.rspec import RSpec
+ from sfa.rspecs.rspec_elements import *
+ r = RSpec('/tmp/slab.rspec')
+ r.load_rspec_elements(Slabv1.elements)
+ r.namespaces = Slabv1.namespaces
+ print r.get(RSpecElements.NODE)
--- /dev/null
+import random
+from passlib.hash import ldap_salted_sha1 as lssha
+from sfa.util.xrn import get_authority
+import ldap
+from sfa.util.config import Config
+from sfa.trust.hierarchy import Hierarchy
+#from sfa.trust.certificate import *
+import ldap.modlist as modlist
+from sfa.util.sfalogging import logger
+import os.path
+
+#API for OpenLDAP
+
+
+class LdapConfig():
+ def __init__(self, config_file = '/etc/sfa/ldap_config.py'):
+ try:
+ execfile(config_file, self.__dict__)
+
+ self.config_file = config_file
+ # path to configuration data
+ self.config_path = os.path.dirname(config_file)
+ except IOError:
+ raise IOError, "Could not find or load the configuration file: %s" \
+ % config_file
+
+
+class ldap_co:
+ """ Set admin login and server configuration variables."""
+
+ def __init__(self):
+ #Senslab PROD LDAP parameters
+ self.ldapserv = None
+ ldap_config = LdapConfig()
+ self.config = ldap_config
+ self.ldapHost = ldap_config.LDAP_IP_ADDRESS
+ self.ldapPeopleDN = ldap_config.LDAP_PEOPLE_DN
+ self.ldapGroupDN = ldap_config.LDAP_GROUP_DN
+ self.ldapAdminDN = ldap_config.LDAP_WEB_DN
+ self.ldapAdminPassword = ldap_config.LDAP_WEB_PASSWORD
+
+
+ self.ldapPort = ldap.PORT
+ self.ldapVersion = ldap.VERSION3
+ self.ldapSearchScope = ldap.SCOPE_SUBTREE
+
+
+ def connect(self, bind = True):
+ """Enables connection to the LDAP server.
+ Set the bind parameter to True if a bind is needed
+ (for add/modify/delete operations).
+ Set to False otherwise.
+
+ """
+ try:
+ self.ldapserv = ldap.open(self.ldapHost)
+ except ldap.LDAPError, error:
+ return {'bool' : False, 'message' : error }
+
+ # Bind with authentification
+ if(bind):
+ return self.bind()
+
+ else:
+ return {'bool': True}
+
+ def bind(self):
+ """ Binding method. """
+ try:
+ # Opens a connection after a call to ldap.open in connect:
+ self.ldapserv = ldap.initialize("ldap://" + self.ldapHost)
+
+ # Bind/authenticate with a user with apropriate
+ #rights to add objects
+ self.ldapserv.simple_bind_s(self.ldapAdminDN, \
+ self.ldapAdminPassword)
+
+ except ldap.LDAPError, error:
+ return {'bool' : False, 'message' : error }
+
+ return {'bool': True}
+
+ def close(self):
+ """ Close the LDAP connection """
+ try:
+ self.ldapserv.unbind_s()
+ except ldap.LDAPError, error:
+ return {'bool' : False, 'message' : error }
+
+
+class LDAPapi :
+ def __init__(self):
+ logger.setLevelDebug()
+ #SFA related config
+ self.senslabauth = Hierarchy()
+ config = Config()
+
+ self.authname = config.SFA_REGISTRY_ROOT_AUTH
+
+ self.conn = ldap_co()
+ self.ldapUserQuotaNFS = self.conn.config.LDAP_USER_QUOTA_NFS
+ self.ldapUserUidNumberMin = self.conn.config.LDAP_USER_UID_NUMBER_MIN
+ self.ldapUserGidNumber = self.conn.config.LDAP_USER_GID_NUMBER
+ self.ldapUserHomePath = self.conn.config.LDAP_USER_HOME_PATH
+
+ self.lengthPassword = 8
+ self.baseDN = self.conn.ldapPeopleDN
+ #authinfo=self.senslabauth.get_auth_info(self.authname)
+
+
+ self.charsPassword = [ '!','$','(',')','*','+',',','-','.', \
+ '0','1','2','3','4','5','6','7','8','9', \
+ 'A','B','C','D','E','F','G','H','I','J', \
+ 'K','L','M','N','O','P','Q','R','S','T', \
+ 'U','V','W','X','Y','Z','_','a','b','c', \
+ 'd','e','f','g','h','i','j','k','l','m', \
+ 'n','o','p','q','r','s','t','u','v','w', \
+ 'x','y','z','\'']
+
+ self.ldapShell = '/bin/bash'
+
+
+ def generate_login(self, record):
+ """Generate login for adding a new user in LDAP Directory
+ (four characters minimum length)
+ Record contains first name and last name.
+
+ """
+ #Remove all special characters from first_name/last name
+ lower_first_name = record['first_name'].replace('-','')\
+ .replace('_','').replace('[','')\
+ .replace(']','').replace(' ','')\
+ .lower()
+ lower_last_name = record['last_name'].replace('-','')\
+ .replace('_','').replace('[','')\
+ .replace(']','').replace(' ','')\
+ .lower()
+ length_last_name = len(lower_last_name)
+ login_max_length = 8
+
+ #Try generating a unique login based on first name and last name
+ getAttrs = ['uid']
+ if length_last_name >= login_max_length :
+ login = lower_last_name[0:login_max_length]
+ index = 0
+ logger.debug("login : %s index : %s" %(login, index))
+ elif length_last_name >= 4 :
+ login = lower_last_name
+ index = 0
+ logger.debug("login : %s index : %s" %(login, index))
+ elif length_last_name == 3 :
+ login = lower_first_name[0:1] + lower_last_name
+ index = 1
+ logger.debug("login : %s index : %s" %(login, index))
+ elif length_last_name == 2:
+ if len ( lower_first_name) >=2:
+ login = lower_first_name[0:2] + lower_last_name
+ index = 2
+ logger.debug("login : %s index : %s" %(login, index))
+ else:
+ logger.error("LoginException : \
+ Generation login error with \
+ minimum four characters")
+
+
+ else :
+ logger.error("LDAP generate_login failed : \
+ impossible to generate unique login for %s %s" \
+ %(lower_first_name,lower_last_name))
+
+ login_filter = '(uid=' + login + ')'
+
+ try :
+ #Check if login already in use
+ while (len(self.LdapSearch(login_filter, getAttrs)) is not 0 ):
+
+ index += 1
+ if index >= 9:
+ logger.error("LoginException : Generation login error \
+ with minimum four characters")
+ else:
+ try:
+ login = lower_first_name[0:index] + \
+ lower_last_name[0:login_max_length-index]
+ login_filter = '(uid='+ login+ ')'
+ except KeyError:
+ print "lower_first_name - lower_last_name too short"
+
+ logger.debug("LDAP.API \t generate_login login %s" %(login))
+ return login
+
+ except ldap.LDAPError, error :
+ logger.log_exc("LDAP generate_login Error %s" %error)
+ return None
+
+
+
+ def generate_password(self):
+
+ """Generate password for adding a new user in LDAP Directory
+ (8 characters length) return password
+
+ """
+ password = str()
+ length = len(self.charsPassword)
+ for index in range(self.lengthPassword):
+ char_index = random.randint(0, length-1)
+ password += self.charsPassword[char_index]
+
+ return password
+
+ def encrypt_password(self, password):
+ """ Use passlib library to make a RFC2307 LDAP encrypted password
+ salt size = 8, use sha-1 algorithm. Returns encrypted password.
+
+ """
+ #Keep consistency with Java Senslab's LDAP API
+ #RFC2307SSHAPasswordEncryptor so set the salt size to 8 bytres
+ return lssha.encrypt(password,salt_size = 8)
+
+
+
+ def find_max_uidNumber(self):
+
+ """Find the LDAP max uidNumber (POSIX uid attribute) .
+ Used when adding a new user in LDAP Directory
+ returns string max uidNumber + 1
+
+ """
+ #First, get all the users in the LDAP
+ getAttrs = "(uidNumber=*)"
+ login_filter = ['uidNumber']
+
+ result_data = self.LdapSearch(getAttrs, login_filter)
+ #It there is no user in LDAP yet, First LDAP user
+ if result_data == []:
+ max_uidnumber = self.ldapUserUidNumberMin
+ #Otherwise, get the highest uidNumber
+ else:
+
+ uidNumberList = [int(r[1]['uidNumber'][0])for r in result_data ]
+ logger.debug("LDAPapi.py \tfind_max_uidNumber \
+ uidNumberList %s " %(uidNumberList))
+ max_uidnumber = max(uidNumberList) + 1
+
+ return str(max_uidnumber)
+
+
+ def get_ssh_pkey(self, record):
+ """TODO ; Get ssh public key from sfa record
+ To be filled by N. Turro ? or using GID pl way?
+
+ """
+ return 'A REMPLIR '
+
+ def make_ldap_filters_from_record(self, record=None):
+ """TODO Handle OR filtering in the ldap query when
+ dealing with a list of records instead of doing a for loop in GetPersons
+ Helper function to make LDAP filter requests out of SFA records.
+ """
+ req_ldap = ''
+ req_ldapdict = {}
+ if record :
+ if 'first_name' in record and 'last_name' in record:
+ req_ldapdict['cn'] = str(record['first_name'])+" "\
+ + str(record['last_name'])
+ if 'email' in record :
+ req_ldapdict['mail'] = record['email']
+ if 'mail' in record:
+ req_ldapdict['mail'] = record['mail']
+ if 'enabled' in record:
+ if record['enabled'] == True :
+ req_ldapdict['shadowExpire'] = '-1'
+ else:
+ req_ldapdict['shadowExpire'] = '0'
+
+ #Hrn should not be part of the filter because the hrn
+ #presented by a certificate of a SFA user not imported in
+ #Senslab does not include the senslab login in it
+ #Plus, the SFA user may already have an account with senslab
+ #using another login.
+
+ #if 'hrn' in record :
+ #splited_hrn = record['hrn'].split(".")
+ #if splited_hrn[0] != self.authname :
+ #logger.warning(" \r\n LDAP.PY \
+ #make_ldap_filters_from_record I know nothing \
+ #about %s my authname is %s not %s" \
+ #%(record['hrn'], self.authname, splited_hrn[0]) )
+
+ #login=splited_hrn[1]
+ #req_ldapdict['uid'] = login
+
+
+ logger.debug("\r\n \t LDAP.PY make_ldap_filters_from_record \
+ record %s req_ldapdict %s" \
+ %(record, req_ldapdict))
+
+ for k in req_ldapdict:
+ req_ldap += '('+ str(k)+ '=' + str(req_ldapdict[k]) + ')'
+ if len(req_ldapdict.keys()) >1 :
+ req_ldap = req_ldap[:0]+"(&"+req_ldap[0:]
+ size = len(req_ldap)
+ req_ldap = req_ldap[:(size-1)] +')'+ req_ldap[(size-1):]
+ else:
+ req_ldap = "(cn=*)"
+
+ return req_ldap
+
+ def make_ldap_attributes_from_record(self, record):
+ """When addind a new user to Senslab's LDAP, creates an attributes
+ dictionnary from the SFA record.
+
+ """
+
+ attrs = {}
+ attrs['objectClass'] = ["top", "person", "inetOrgPerson", \
+ "organizationalPerson", "posixAccount", \
+ "shadowAccount", "systemQuotas", \
+ "ldapPublicKey"]
+
+ attrs['givenName'] = str(record['first_name']).lower().capitalize()
+ attrs['sn'] = str(record['last_name']).lower().capitalize()
+ attrs['cn'] = attrs['givenName'] + ' ' + attrs['sn']
+ attrs['gecos'] = attrs['givenName'] + ' ' + attrs['sn']
+ attrs['uid'] = self.generate_login(record)
+
+ attrs['quota'] = self.ldapUserQuotaNFS
+ attrs['homeDirectory'] = self.ldapUserHomePath + attrs['uid']
+ attrs['loginShell'] = self.ldapShell
+ attrs['gidNumber'] = self.ldapUserGidNumber
+ attrs['uidNumber'] = self.find_max_uidNumber()
+ attrs['mail'] = record['mail'].lower()
+ try:
+ attrs['sshPublicKey'] = record['pkey']
+ except KeyError:
+ attrs['sshPublicKey'] = self.get_ssh_pkey(record)
+
+
+ #Password is automatically generated because SFA user don't go
+ #through the Senslab website used to register new users,
+ #There is no place in SFA where users can enter such information
+ #yet.
+ #If the user wants to set his own password , he must go to the Senslab
+ #website.
+ password = self.generate_password()
+ attrs['userPassword']= self.encrypt_password(password)
+
+ #Account automatically validated (no mail request to admins)
+ #Set to 0 to disable the account, -1 to enable it,
+ attrs['shadowExpire'] = '-1'
+
+ #Motivation field in Senslab
+ attrs['description'] = 'SFA USER FROM OUTSIDE SENSLAB'
+
+ attrs['ou'] = 'SFA' #Optional: organizational unit
+ #No info about those here:
+ attrs['l'] = 'To be defined'#Optional: Locality.
+ attrs['st'] = 'To be defined' #Optional: state or province (country).
+
+ return attrs
+
+
+
+ def LdapAddUser(self, record) :
+ """Add SFA user to LDAP if it is not in LDAP yet. """
+
+ user_ldap_attrs = self.make_ldap_attributes_from_record(record)
+
+
+ #Check if user already in LDAP wih email, first name and last name
+ filter_by = self.make_ldap_filters_from_record(user_ldap_attrs)
+ user_exist = self.LdapSearch(filter_by)
+ if user_exist:
+ logger.warning(" \r\n \t LDAP LdapAddUser user %s %s \
+ already exists" %(user_ldap_attrs['sn'], \
+ user_ldap_attrs['mail']))
+ return {'bool': False}
+
+ #Bind to the server
+ result = self.conn.connect()
+
+ if(result['bool']):
+
+ # A dict to help build the "body" of the object
+
+ logger.debug(" \r\n \t LDAP LdapAddUser attrs %s " %user_ldap_attrs)
+
+ # The dn of our new entry/object
+ dn = 'uid=' + user_ldap_attrs['uid'] + "," + self.baseDN
+
+ try:
+ ldif = modlist.addModlist(user_ldap_attrs)
+ logger.debug("LDAPapi.py add attrs %s \r\n ldif %s"\
+ %(user_ldap_attrs,ldif) )
+ self.conn.ldapserv.add_s(dn,ldif)
+
+ logger.info("Adding user %s login %s in LDAP" \
+ %(user_ldap_attrs['cn'] ,user_ldap_attrs['uid']))
+
+
+ except ldap.LDAPError, error:
+ logger.log_exc("LDAP Add Error %s" %error)
+ return {'bool' : False, 'message' : error }
+
+ self.conn.close()
+ return {'bool': True}
+ else:
+ return result
+
+
+ def LdapDelete(self, person_dn):
+ """
+ Deletes a person in LDAP. Uses the dn of the user.
+ """
+ #Connect and bind
+ result = self.conn.connect()
+ if(result['bool']):
+ try:
+ self.conn.ldapserv.delete_s(person_dn)
+ self.conn.close()
+ return {'bool': True}
+
+ except ldap.LDAPError, error:
+ logger.log_exc("LDAP Delete Error %s" %error)
+ return {'bool': False}
+
+
+ def LdapDeleteUser(self, record_filter):
+ """
+ Deletes a SFA person in LDAP, based on the user's hrn.
+ """
+ #Find uid of the person
+ person = self.LdapFindUser(record_filter,[])
+ logger.debug("LDAPapi.py \t LdapDeleteUser record %s person %s" \
+ %(record_filter, person))
+
+ if person:
+ dn = 'uid=' + person['uid'] + "," +self.baseDN
+ else:
+ return {'bool': False}
+
+ result = self.LdapDelete(dn)
+ return result
+
+
+ def LdapModify(self, dn, old_attributes_dict, new_attributes_dict):
+ """ Modifies a LDAP entry """
+
+ ldif = modlist.modifyModlist(old_attributes_dict,new_attributes_dict)
+ # Connect and bind/authenticate
+ result = self.conn.connect()
+ if (result['bool']):
+ try:
+ self.conn.ldapserv.modify_s(dn,ldif)
+ self.conn.close()
+ return {'bool' : True }
+ except ldap.LDAPError, error:
+ logger.log_exc("LDAP LdapModify Error %s" %error)
+ return {'bool' : False }
+
+
+ def LdapModifyUser(self, user_record, new_attributes_dict):
+ """
+ Gets the record from one user_uid_login based on record_filter
+ and changes the attributes according to the specified new_attributes.
+ Does not use this if we need to modify the uid. Use a ModRDN
+ #operation instead ( modify relative DN )
+ """
+ if user_record is None:
+ logger.error("LDAP \t LdapModifyUser Need user record ")
+ return {'bool': False}
+
+ #Get all the attributes of the user_uid_login
+ #person = self.LdapFindUser(record_filter,[])
+ req_ldap = self.make_ldap_filters_from_record(user_record)
+ person_list = self.LdapSearch(req_ldap,[])
+ logger.debug("LDAPapi.py \t LdapModifyUser person_list : %s" \
+ %(person_list))
+ if person_list and len(person_list) > 1 :
+ logger.error("LDAP \t LdapModifyUser Too many users returned")
+ return {'bool': False}
+ if person_list is None :
+ logger.error("LDAP \t LdapModifyUser User %s doesn't exist "\
+ %(user_record))
+ return {'bool': False}
+
+ # The dn of our existing entry/object
+ #One result only from ldapSearch
+ person = person_list[0][1]
+ dn = 'uid=' + person['uid'][0] + "," +self.baseDN
+
+ if new_attributes_dict:
+ old = {}
+ for k in new_attributes_dict:
+ if k not in person:
+ old[k] = ''
+ else :
+ old[k] = person[k]
+ logger.debug(" LDAPapi.py \t LdapModifyUser new_attributes %s"\
+ %( new_attributes_dict))
+ result = self.LdapModify(dn, old,new_attributes_dict)
+ return result
+ else:
+ logger.error("LDAP \t LdapModifyUser No new attributes given. ")
+ return {'bool': False}
+
+
+
+
+ def LdapMarkUserAsDeleted(self, record):
+
+
+ new_attrs = {}
+ #Disable account
+ new_attrs['shadowExpire'] = '0'
+ logger.debug(" LDAPapi.py \t LdapMarkUserAsDeleted ")
+ ret = self.LdapModifyUser(record, new_attrs)
+ return ret
+
+
+ def LdapResetPassword(self,record):
+ """
+ Resets password for the user whose record is the parameter and changes
+ the corresponding entry in the LDAP.
+
+ """
+ password = self.generate_password()
+ attrs = {}
+ attrs['userPassword'] = self.encrypt_password(password)
+ logger.debug("LDAP LdapResetPassword encrypt_password %s"\
+ %(attrs['userPassword']))
+ result = self.LdapModifyUser(record, attrs)
+ return result
+
+
+ def LdapSearch (self, req_ldap = None, expected_fields = None ):
+ """
+ Used to search directly in LDAP, by using ldap filters and
+ return fields.
+ When req_ldap is None, returns all the entries in the LDAP.
+
+ """
+ result = self.conn.connect(bind = False)
+ if (result['bool']) :
+
+ return_fields_list = []
+ if expected_fields == None :
+ return_fields_list = ['mail','givenName', 'sn', 'uid', \
+ 'sshPublicKey', 'shadowExpire']
+ else :
+ return_fields_list = expected_fields
+ #No specifc request specified, get the whole LDAP
+ if req_ldap == None:
+ req_ldap = '(cn=*)'
+
+ logger.debug("LDAP.PY \t LdapSearch req_ldap %s \
+ return_fields_list %s" \
+ %(req_ldap, return_fields_list))
+
+ try:
+ msg_id = self.conn.ldapserv.search(
+ self.baseDN,ldap.SCOPE_SUBTREE,\
+ req_ldap, return_fields_list)
+ #Get all the results matching the search from ldap in one
+ #shot (1 value)
+ result_type, result_data = \
+ self.conn.ldapserv.result(msg_id,1)
+
+ self.conn.close()
+
+ logger.debug("LDAP.PY \t LdapSearch result_data %s"\
+ %(result_data))
+
+ return result_data
+
+ except ldap.LDAPError,error :
+ logger.log_exc("LDAP LdapSearch Error %s" %error)
+ return []
+
+ else:
+ logger.error("LDAP.PY \t Connection Failed" )
+ return
+
+ def LdapFindUser(self, record = None, is_user_enabled=None, \
+ expected_fields = None):
+ """
+ Search a SFA user with a hrn. User should be already registered
+ in Senslab LDAP.
+ Returns one matching entry
+ """
+ custom_record = {}
+ if is_user_enabled:
+
+ custom_record['enabled'] = is_user_enabled
+ if record:
+ custom_record.update(record)
+
+
+ req_ldap = self.make_ldap_filters_from_record(custom_record)
+ return_fields_list = []
+ if expected_fields == None :
+ return_fields_list = ['mail','givenName', 'sn', 'uid', \
+ 'sshPublicKey']
+ else :
+ return_fields_list = expected_fields
+
+ result_data = self.LdapSearch(req_ldap, return_fields_list )
+ logger.debug("LDAP.PY \t LdapFindUser result_data %s" %(result_data))
+
+ if len(result_data) is 0:
+ return None
+ #Asked for a specific user
+ if record :
+ #try:
+ ldapentry = result_data[0][1]
+ logger.debug("LDAP.PY \t LdapFindUser ldapentry %s" %(ldapentry))
+ tmpname = ldapentry['uid'][0]
+
+ tmpemail = ldapentry['mail'][0]
+ if ldapentry['mail'][0] == "unknown":
+ tmpemail = None
+
+ #except IndexError:
+ #logger.error("LDAP ldapFindHRn : no entry for record %s found"\
+ #%(record))
+ #return None
+
+ try:
+ hrn = record['hrn']
+ parent_hrn = get_authority(hrn)
+ peer_authority = None
+ if parent_hrn is not self.authname:
+ peer_authority = parent_hrn
+
+ results = {
+ 'type': 'user',
+ 'pkey': ldapentry['sshPublicKey'][0],
+ #'uid': ldapentry[1]['uid'][0],
+ 'uid': tmpname ,
+ 'email':tmpemail,
+ #'email': ldapentry[1]['mail'][0],
+ 'first_name': ldapentry['givenName'][0],
+ 'last_name': ldapentry['sn'][0],
+ #'phone': 'none',
+ 'serial': 'none',
+ 'authority': parent_hrn,
+ 'peer_authority': peer_authority,
+ 'pointer' : -1,
+ 'hrn': hrn,
+ }
+ except KeyError,error:
+ logger.log_exc("LDAPapi \t LdaFindUser KEyError %s" \
+ %error )
+ return
+ else:
+ #Asked for all users in ldap
+ results = []
+ for ldapentry in result_data:
+ logger.debug(" LDAP.py LdapFindUser ldapentry name : %s " \
+ %(ldapentry[1]['uid'][0]))
+ tmpname = ldapentry[1]['uid'][0]
+ hrn=self.authname+"."+ tmpname
+
+ tmpemail = ldapentry[1]['mail'][0]
+ if ldapentry[1]['mail'][0] == "unknown":
+ tmpemail = None
+
+
+ parent_hrn = get_authority(hrn)
+ parent_auth_info = self.senslabauth.get_auth_info(parent_hrn)
+ try:
+ results.append( {
+ 'type': 'user',
+ 'pkey': ldapentry[1]['sshPublicKey'][0],
+ #'uid': ldapentry[1]['uid'][0],
+ 'uid': tmpname ,
+ 'email':tmpemail,
+ #'email': ldapentry[1]['mail'][0],
+ 'first_name': ldapentry[1]['givenName'][0],
+ 'last_name': ldapentry[1]['sn'][0],
+ #'phone': 'none',
+ 'serial': 'none',
+ 'authority': self.authname,
+ 'peer_authority': '',
+ 'pointer' : -1,
+ 'hrn': hrn,
+ } )
+ except KeyError,error:
+ logger.log_exc("LDAPapi.PY \t LdapFindUser EXCEPTION %s" \
+ %(error))
+ return
+ return results
+
--- /dev/null
+#import sys
+from httplib import HTTPConnection, HTTPException
+import json
+#import datetime
+#from time import gmtime, strftime
+
+#import urllib
+#import urllib2
+from sfa.util.config import Config
+#from sfa.util.xrn import hrn_to_urn, get_authority, Xrn, get_leaf
+
+from sfa.util.sfalogging import logger
+
+
+OARIP = '194.199.16.166'
+
+OAR_REQUEST_POST_URI_DICT = {'POST_job':{'uri': '/oarapi/jobs.json'},
+ 'DELETE_jobs_id':{'uri':'/oarapi/jobs/id.json'},
+ }
+
+POST_FORMAT = {'json' : {'content':"application/json", 'object':json},}
+
+#OARpostdatareqfields = {'resource' :"/nodes=", 'command':"sleep", \
+ #'workdir':"/home/", 'walltime':""}
+
+
+
+
+
+class OARrestapi:
+ def __init__(self):
+ self.oarserver = {}
+ self.oarserver['ip'] = OARIP
+ self.oarserver['port'] = 8800
+ self.oarserver['uri'] = None
+ self.oarserver['postformat'] = 'json'
+ #logger.setLevelDebug()
+
+ self.jobstates = ['Terminated', 'Hold', 'Waiting', 'toLaunch', \
+ 'toError', 'toAckReservation', 'Launching', \
+ 'Finishing', 'Running', 'Suspended', 'Resuming',\
+ 'Error']
+
+ self.parser = OARGETParser(self)
+
+
+ def GETRequestToOARRestAPI(self, request, strval=None , username = None ):
+ self.oarserver['uri'] = \
+ OARGETParser.OARrequests_uri_dict[request]['uri']
+ #Get job details with username
+ if 'owner' in OARGETParser.OARrequests_uri_dict[request] and username:
+ self.oarserver['uri'] += OARGETParser.OARrequests_uri_dict[request]['owner'] + username
+ headers = {}
+ data = json.dumps({})
+ logger.debug("OARrestapi \tGETRequestToOARRestAPI %s" %(request))
+ if strval:
+ self.oarserver['uri'] = self.oarserver['uri'].\
+ replace("id",str(strval))
+ logger.debug("OARrestapi: \t GETRequestToOARRestAPI \
+ self.oarserver['uri'] %s strval %s" \
+ %(self.oarserver['uri'], strval))
+ if username:
+ headers['X-REMOTE_IDENT'] = username
+ try :
+ #seems that it does not work if we don't add this
+ headers['content-length'] = '0'
+
+ conn = HTTPConnection(self.oarserver['ip'], \
+ self.oarserver['port'])
+ conn.request("GET", self.oarserver['uri'], data, headers)
+ resp = ( conn.getresponse()).read()
+ conn.close()
+ except HTTPException, error :
+ logger.log_exc("GET_OAR_SRVR : Problem with OAR server : %s " \
+ %(error))
+ #raise ServerError("GET_OAR_SRVR : Could not reach OARserver")
+ try:
+ js_dict = json.loads(resp)
+ return js_dict
+
+ except ValueError, error:
+ logger.log_exc("Failed to parse Server Response: %s ERROR %s"\
+ %(js_dict, error))
+ #raise ServerError("Failed to parse Server Response:" + js)
+
+
+ def POSTRequestToOARRestAPI(self, request, datadict, username=None):
+ """ Used to post a job on OAR , along with data associated
+ with the job.
+
+ """
+
+ #first check that all params for are OK
+ try:
+ self.oarserver['uri'] = OAR_REQUEST_POST_URI_DICT[request]['uri']
+
+ except KeyError:
+ logger.log_exc("OARrestapi \tPOSTRequestToOARRestAPI request not \
+ valid")
+ return
+ if datadict and 'strval' in datadict:
+ self.oarserver['uri'] = self.oarserver['uri'].replace("id", \
+ str(datadict['strval']))
+ del datadict['strval']
+
+ data = json.dumps(datadict)
+ headers = {'X-REMOTE_IDENT':username, \
+ 'content-type': POST_FORMAT['json']['content'], \
+ 'content-length':str(len(data))}
+ try :
+
+ conn = HTTPConnection(self.oarserver['ip'], \
+ self.oarserver['port'])
+ conn.request("POST", self.oarserver['uri'], data, headers)
+ resp = (conn.getresponse()).read()
+ conn.close()
+ except NotConnected:
+ logger.log_exc("POSTRequestToOARRestAPI NotConnected ERROR: \
+ data %s \r\n \t\n \t\t headers %s uri %s" \
+ %(data,headers,self.oarserver['uri']))
+
+ #raise ServerError("POST_OAR_SRVR : error")
+
+ try:
+ answer = json.loads(resp)
+ logger.debug("POSTRequestToOARRestAPI : answer %s" %(answer))
+ return answer
+
+ except ValueError, error:
+ logger.log_exc("Failed to parse Server Response: error %s answer \
+ %s" %(error, answer))
+ #raise ServerError("Failed to parse Server Response:" + answer)
+
+
+
+def AddOarNodeId(tuplelist, value):
+ """ Adds Oar internal node id to the nodes attributes """
+
+ tuplelist.append(('oar_id', int(value)))
+
+
+def AddNodeNetworkAddr(dictnode, value):
+ #Inserts new key. The value associated is a tuple list
+ node_id = value
+
+ dictnode[node_id] = [('node_id', node_id),('hostname', node_id) ]
+
+ return node_id
+
+def AddNodeSite(tuplelist, value):
+ tuplelist.append(('site', str(value)))
+
+def AddNodeRadio(tuplelist, value):
+ tuplelist.append(('radio', str(value)))
+
+
+def AddMobility(tuplelist, value):
+ if value is 0:
+ tuplelist.append(('mobile', 'False'))
+ else :
+ tuplelist.append(('mobile', 'True'))
+
+def AddPosX(tuplelist, value):
+ tuplelist.append(('posx', value))
+
+def AddPosY(tuplelist, value):
+ tuplelist.append(('posy', value))
+
+def AddPosZ(tuplelist, value):
+ tuplelist.append(('posz', value))
+
+def AddBootState(tuplelist, value):
+ tuplelist.append(('boot_state', str(value)))
+
+#Insert a new node into the dictnode dictionary
+def AddNodeId(dictnode, value):
+ #Inserts new key. The value associated is a tuple list
+ node_id = int(value)
+
+ dictnode[node_id] = [('node_id', node_id)]
+ return node_id
+
+def AddHardwareType(tuplelist, value):
+ value_list = value.split(':')
+ tuplelist.append(('archi', value_list[0]))
+ tuplelist.append(('radio', value_list[1]))
+
+
+class OARGETParser:
+ resources_fulljson_dict = {
+ 'network_address' : AddNodeNetworkAddr,
+ 'site': AddNodeSite,
+ 'radio': AddNodeRadio,
+ 'mobile': AddMobility,
+ 'x': AddPosX,
+ 'y': AddPosY,
+ 'z':AddPosZ,
+ 'archi':AddHardwareType,
+ 'state':AddBootState,
+ 'id' : AddOarNodeId,
+ }
+
+
+ def __init__(self, srv) :
+ self.version_json_dict = {
+ 'api_version' : None , 'apilib_version' :None,\
+ 'api_timezone': None, 'api_timestamp': None, 'oar_version': None ,}
+ self.config = Config()
+ self.interface_hrn = self.config.SFA_INTERFACE_HRN
+ self.timezone_json_dict = {
+ 'timezone': None, 'api_timestamp': None, }
+ self.jobs_json_dict = {
+ 'total' : None, 'links' : [],\
+ 'offset':None , 'items' : [], }
+ self.jobs_table_json_dict = self.jobs_json_dict
+ self.jobs_details_json_dict = self.jobs_json_dict
+ self.server = srv
+ self.node_dictlist = {}
+ self.raw_json = None
+ self.site_dict = {}
+ self.SendRequest("GET_version")
+
+
+
+
+
+ def ParseVersion(self) :
+ #print self.raw_json
+ #print >>sys.stderr, self.raw_json
+ if 'oar_version' in self.raw_json :
+ self.version_json_dict.update(api_version = \
+ self.raw_json['api_version'],
+ apilib_version = self.raw_json['apilib_version'],
+ api_timezone = self.raw_json['api_timezone'],
+ api_timestamp = self.raw_json['api_timestamp'],
+ oar_version = self.raw_json['oar_version'] )
+ else :
+ self.version_json_dict.update(api_version = self.raw_json['api'] ,
+ apilib_version = self.raw_json['apilib'],
+ api_timezone = self.raw_json['api_timezone'],
+ api_timestamp = self.raw_json['api_timestamp'],
+ oar_version = self.raw_json['oar'] )
+
+ print self.version_json_dict['apilib_version']
+
+
+ def ParseTimezone(self) :
+ api_timestamp = self.raw_json['api_timestamp']
+ api_tz = self.raw_json['timezone']
+ return api_timestamp, api_tz
+
+ def ParseJobs(self) :
+ self.jobs_list = []
+ print " ParseJobs "
+ return self.raw_json
+
+ def ParseJobsTable(self) :
+ print "ParseJobsTable"
+
+ def ParseJobsDetails (self):
+ # currently, this function is not used a lot,
+ #so i have no idea what be usefull to parse,
+ #returning the full json. NT
+ #logger.debug("ParseJobsDetails %s " %(self.raw_json))
+ return self.raw_json
+
+
+ def ParseJobsIds(self):
+
+ job_resources = ['wanted_resources', 'name', 'id', 'start_time', \
+ 'state','owner','walltime','message']
+
+
+ job_resources_full = ['launching_directory', 'links', \
+ 'resubmit_job_id', 'owner', 'events', 'message', \
+ 'scheduled_start', 'id', 'array_id', 'exit_code', \
+ 'properties', 'state','array_index', 'walltime', \
+ 'type', 'initial_request', 'stop_time', 'project',\
+ 'start_time', 'dependencies','api_timestamp','submission_time', \
+ 'reservation', 'stdout_file', 'types', 'cpuset_name', \
+ 'name', 'wanted_resources','queue','stderr_file','command']
+
+
+ job_info = self.raw_json
+ #logger.debug("OARESTAPI ParseJobsIds %s" %(self.raw_json))
+ values = []
+ try:
+ for k in job_resources:
+ values.append(job_info[k])
+ return dict(zip(job_resources, values))
+
+ except KeyError:
+ logger.log_exc("ParseJobsIds KeyError ")
+
+
+ def ParseJobsIdResources(self):
+ """ Parses the json produced by the request
+ /oarapi/jobs/id/resources.json.
+ Returns a list of oar node ids that are scheduled for the
+ given job id.
+
+ """
+ job_resources = []
+ for resource in self.raw_json['items']:
+ job_resources.append(resource['id'])
+
+ #logger.debug("OARESTAPI \tParseJobsIdResources %s" %(self.raw_json))
+ return job_resources
+
+ def ParseResources(self) :
+ """ Parses the json produced by a get_resources request on oar."""
+
+ #logger.debug("OARESTAPI \tParseResources " )
+ #resources are listed inside the 'items' list from the json
+ self.raw_json = self.raw_json['items']
+ self.ParseNodes()
+
+ def ParseReservedNodes(self):
+ """ Returns an array containing the list of the reserved nodes """
+
+ #resources are listed inside the 'items' list from the json
+ reservation_list = []
+ print "ParseReservedNodes_%s" %(self.raw_json['items'])
+ job = {}
+ #Parse resources info
+ for json_element in self.raw_json['items']:
+ #In case it is a real reservation (not asap case)
+ if json_element['scheduled_start']:
+ job['t_from'] = json_element['scheduled_start']
+ job['t_until'] = int(json_element['scheduled_start']) + \
+ int(json_element['walltime'])
+ #Get resources id list for the job
+ job['resource_ids'] = \
+ [ node_dict['id'] for node_dict in json_element['resources'] ]
+ else:
+ job['t_from'] = "As soon as possible"
+ job['t_until'] = "As soon as possible"
+ job['resource_ids'] = ["Undefined"]
+
+
+ job['state'] = json_element['state']
+ job['lease_id'] = json_element['id']
+
+
+ job['user'] = json_element['owner']
+ #logger.debug("OARRestapi \tParseReservedNodes job %s" %(job))
+ reservation_list.append(job)
+ #reset dict
+ job = {}
+ return reservation_list
+
+ def ParseRunningJobs(self):
+ """ Gets the list of nodes currently in use from the attributes of the
+ running jobs.
+
+ """
+ logger.debug("OARESTAPI \tParseRunningJobs__________________________ ")
+ #resources are listed inside the 'items' list from the json
+ nodes = []
+ for job in self.raw_json['items']:
+ for node in job['nodes']:
+ nodes.append(node['network_address'])
+ return nodes
+
+
+
+ def ParseDeleteJobs(self):
+ """ No need to parse anything in this function.A POST
+ is done to delete the job.
+
+ """
+ return
+
+ def ParseResourcesFull(self) :
+ """ This method is responsible for parsing all the attributes
+ of all the nodes returned by OAR when issuing a get resources full.
+ The information from the nodes and the sites are separated.
+ Updates the node_dictlist so that the dictionnary of the platform's
+ nodes is available afterwards.
+
+ """
+ logger.debug("OARRESTAPI ParseResourcesFull________________________ ")
+ #print self.raw_json[1]
+ #resources are listed inside the 'items' list from the json
+ if self.version_json_dict['apilib_version'] != "0.2.10" :
+ self.raw_json = self.raw_json['items']
+ self.ParseNodes()
+ self.ParseSites()
+ return self.node_dictlist
+
+ def ParseResourcesFullSites(self) :
+ """ UNUSED. Originally used to get information from the sites.
+ ParseResourcesFull is used instead.
+
+ """
+ if self.version_json_dict['apilib_version'] != "0.2.10" :
+ self.raw_json = self.raw_json['items']
+ self.ParseNodes()
+ self.ParseSites()
+ return self.site_dict
+
+
+
+ def ParseNodes(self):
+ """ Parse nodes properties from OAR
+ Put them into a dictionary with key = node id and value is a dictionary
+ of the node properties and properties'values.
+
+ """
+ node_id = None
+ keys = self.resources_fulljson_dict.keys()
+ keys.sort()
+
+ for dictline in self.raw_json:
+ node_id = None
+ # dictionary is empty and/or a new node has to be inserted
+ node_id = self.resources_fulljson_dict['network_address'](\
+ self.node_dictlist, dictline['network_address'])
+ for k in keys:
+ if k in dictline:
+ if k == 'network_address':
+ continue
+
+ self.resources_fulljson_dict[k](\
+ self.node_dictlist[node_id], dictline[k])
+
+ #The last property has been inserted in the property tuple list,
+ #reset node_id
+ #Turn the property tuple list (=dict value) into a dictionary
+ self.node_dictlist[node_id] = dict(self.node_dictlist[node_id])
+ node_id = None
+
+ def slab_hostname_to_hrn(self, root_auth, hostname):
+ return root_auth + '.'+ hostname
+
+
+
+ def ParseSites(self):
+ """ Returns a list of dictionnaries containing the sites' attributes."""
+
+ nodes_per_site = {}
+ config = Config()
+ #logger.debug(" OARrestapi.py \tParseSites self.node_dictlist %s"\
+ #%(self.node_dictlist))
+ # Create a list of nodes per site_id
+ for node_id in self.node_dictlist:
+ node = self.node_dictlist[node_id]
+
+ if node['site'] not in nodes_per_site:
+ nodes_per_site[node['site']] = []
+ nodes_per_site[node['site']].append(node['node_id'])
+ else:
+ if node['node_id'] not in nodes_per_site[node['site']]:
+ nodes_per_site[node['site']].append(node['node_id'])
+
+ #Create a site dictionary whose key is site_login_base (name of the site)
+ # and value is a dictionary of properties, including the list
+ #of the node_ids
+ for node_id in self.node_dictlist:
+ node = self.node_dictlist[node_id]
+ #node.update({'hrn':self.slab_hostname_to_hrn(self.interface_hrn, \
+ #node['site'],node['hostname'])})
+ node.update({'hrn':self.slab_hostname_to_hrn(self.interface_hrn, node['hostname'])})
+ self.node_dictlist.update({node_id:node})
+
+ if node['site'] not in self.site_dict:
+ self.site_dict[node['site']] = {
+ 'site':node['site'],
+ 'node_ids':nodes_per_site[node['site']],
+ 'latitude':"48.83726",
+ 'longitude':"- 2.10336",'name':config.SFA_REGISTRY_ROOT_AUTH,
+ 'pcu_ids':[], 'max_slices':None, 'ext_consortium_id':None,
+ 'max_slivers':None, 'is_public':True, 'peer_site_id': None,
+ 'abbreviated_name':"senslab", 'address_ids': [],
+ 'url':"http,//www.senslab.info", 'person_ids':[],
+ 'site_tag_ids':[], 'enabled': True, 'slice_ids':[],
+ 'date_created': None, 'peer_id': None }
+ #if node['site_login_base'] not in self.site_dict.keys():
+ #self.site_dict[node['site_login_base']] = {'login_base':node['site_login_base'],
+ #'node_ids':nodes_per_site[node['site_login_base']],
+ #'latitude':"48.83726",
+ #'longitude':"- 2.10336",'name':"senslab",
+ #'pcu_ids':[], 'max_slices':None, 'ext_consortium_id':None,
+ #'max_slivers':None, 'is_public':True, 'peer_site_id': None,
+ #'abbreviated_name':"senslab", 'address_ids': [],
+ #'url':"http,//www.senslab.info", 'person_ids':[],
+ #'site_tag_ids':[], 'enabled': True, 'slice_ids':[],
+ #'date_created': None, 'peer_id': None }
+
+
+
+
+ OARrequests_uri_dict = {
+ 'GET_version':
+ {'uri':'/oarapi/version.json', 'parse_func': ParseVersion},
+ 'GET_timezone':
+ {'uri':'/oarapi/timezone.json' ,'parse_func': ParseTimezone },
+ 'GET_jobs':
+ {'uri':'/oarapi/jobs.json','parse_func': ParseJobs},
+ 'GET_jobs_id':
+ {'uri':'/oarapi/jobs/id.json','parse_func': ParseJobsIds},
+ 'GET_jobs_id_resources':
+ {'uri':'/oarapi/jobs/id/resources.json',\
+ 'parse_func': ParseJobsIdResources},
+ 'GET_jobs_table':
+ {'uri':'/oarapi/jobs/table.json','parse_func': ParseJobsTable},
+ 'GET_jobs_details':
+ {'uri':'/oarapi/jobs/details.json',\
+ 'parse_func': ParseJobsDetails},
+ 'GET_reserved_nodes':
+ {'uri':
+ '/oarapi/jobs/details.json?state=Running,Waiting,Launching',\
+ 'owner':'&user=',
+ 'parse_func':ParseReservedNodes},
+
+
+ 'GET_running_jobs':
+ {'uri':'/oarapi/jobs/details.json?state=Running',\
+ 'parse_func':ParseRunningJobs},
+ 'GET_resources_full':
+ {'uri':'/oarapi/resources/full.json',\
+ 'parse_func': ParseResourcesFull},
+ 'GET_sites':
+ {'uri':'/oarapi/resources/full.json',\
+ 'parse_func': ParseResourcesFullSites},
+ 'GET_resources':
+ {'uri':'/oarapi/resources.json' ,'parse_func': ParseResources},
+ 'DELETE_jobs_id':
+ {'uri':'/oarapi/jobs/id.json' ,'parse_func': ParseDeleteJobs}
+ }
+
+
+
+ def SendRequest(self, request, strval = None , username = None):
+ """ Connects to OAR , sends the valid GET requests and uses
+ the appropriate json parsing functions.
+
+ """
+ if request in self.OARrequests_uri_dict :
+ self.raw_json = self.server.GETRequestToOARRestAPI(request, \
+ strval, \
+ username)
+ return self.OARrequests_uri_dict[request]['parse_func'](self)
+ else:
+ logger.error("OARRESTAPI OARGetParse __init__ : ERROR_REQUEST " \
+ %(request))
+
--- /dev/null
+#!/bin/bash
+
+# Configuration first : set the local repository
+# where the code can be found
+# Test number of arguments
+if (( ! $# == 2 ))
+then
+ echo " Usage : bash_nukem repository_directory vm (should be senslab or senslab2)"
+ echo $#
+ exit
+fi
+
+# Check if directory exists
+if [ -d $1 ]
+then
+ git_local_repo=$1
+ echo "RESPOSITORY: " $git_local_repo
+
+fi
+
+# Set which vm we are working on (sfa-vm or sfa-vm2)
+if [[ $2 = "senslab" || $2 = "senslab2" ]]
+then
+ vm=$2
+ echo $vm
+else
+ echo "Vm options should be senslab or senslab2, not " $2
+ exit
+fi
+
+# First restart sfa (prevents stalling when connecting
+# to the DB and dropping tables)
+sudo service sfa restart
+# Nuke the database
+sudo sfaadmin.py registry nuke
+
+# Drop table in slab_sfa
+# to avoid duplicates.
+
+psql -d slab_sfa -U sfa -W -q -c "drop table slice_senslab;"
+
+
+# ATTENTION :Save the config file /etc/sfa/sfa_config
+# before continuing
+
+# Remove all the remaining gid, creds files
+# of the server
+sudo rm -rf /var/lib/sfa
+cd /etc/sfa
+sudo rm -rf *
+sudo service sfa restart
+
+# Put back the config file that you saved before
+cd $git_local_repo
+sudo make clean
+make
+sudo python setup.py install
+# sudo service sfa restart
+
+# Wrote /etc/sfa/configs/site.xml
+# Merged
+# /etc/sfa/default_config.xml
+# and /etc/sfa/configs/site.xml
+# into /etc/sfa/sfa_config.xml
+# sudo sfa-config-tty
+#
+#sudo cp $git_local_repo/sfa/senslab/config/$vm/sfa_config /etc/sfa/sfa_config
+sudo cp $git_local_repo/sfa/senslab/config/$vm/sfa_config.xml /etc/sfa/sfa_config.xml
+sudo cp $git_local_repo/sfa/senslab/config/$vm/default_config.xml /etc/sfa/default_config.xml
+# sudo cp $git_local_repo/sfa/senslab/config/$vm/site.xml /etc/sfa/site.xml
+# sudo cp $git_local_repo/sfa/senslab/config/$vm/site_config /etc/sfa/configs/site_config
+# sudo ln -s ldap_config.py /etc/sfa/ldap_config.py
+sudo cp $git_local_repo/sfa/senslab/config/ldap_config.py /etc/sfa/ldap_config.py
+sudo service sfa restart
+
+# User stuff : clean your folder
+cd ~/.sfi
+rm *.sscert *.cred *.gid sfi_cache.dat
+cd ~
+
+# Import the datbase form ldap
+sudo sfaadmin.py registry import_registry
+sudo service sfa restart
+
+sudo rm -rf /var/lib/sfa/authorities/plc
\ No newline at end of file
--- /dev/null
+Source code management:
+Git
+
+git repository url :
+git://138.96.116.40/sfa.git
+git://git.f-lab.fr/sfa.git
+
+
+Branches to build:
+senslab2
+
+Add a step to build:
+Excecute a shell script
+_________________________________________________________________________________________
+make clean
+make
+sudo python setup.py install
+git_local_repo="/root/.jenkins/jobs/SFA/workspace"
+#jenkins clones the git repo here when started by root
+vm="senslab2"
+sudo cp $git_local_repo/sfa/senslab/config/$vm/sfa_config.xml /etc/sfa/sfa_config.xml
+sudo cp $git_local_repo/sfa/senslab/config/$vm/default_config.xml /etc/sfa/default_config.xml
+
+sudo cp $git_local_repo/sfa/senslab/config/ldap_config.py /etc/sfa/ldap_config.py
+sudo service sfa restart
+#nosetests --with-xcoverage --with-xunit --cover-package=senslab --cover-erase --verbose
+#FAIL nosetests --with-xcoverage --with-xunit --all-modules --traverse-namespace --cover-package=senslab --cover-erase --cover-inclusive --verbose
+nosetests --with-xcoverage --with-xunit --traverse-namespace --cover-package=senslab --cover-erase --verbose
+
+pylint -f parseable /root/.jenkins/jobs/SFA/workspace/sfa/senslab/ | tee pylint.out
+____________________________________________________________________________________________
+
+
+Add a step to build :
+Publish tests report results JUnit
+nosestests.xml
+
+Add a step to build :
+Publish cobertura covergae report
+**/coverage.xml
+Consider only stable builds
+
+Add a step to build :
+Report violations
+pylint :
+XML filename pattern : **/pylint.out
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+Default SFA configuration file
+
+Thierry Parmentelat
+
+-->
+
+<!DOCTYPE configuration PUBLIC "-//PlanetLab Central//DTD PLC configuration//EN" "plc_config.dtd">
+
+<configuration>
+ <variables>
+
+ <!-- ======================================== -->
+ <category id="sfa">
+ <name>General</name>
+ <description>Basic system variables.</description>
+
+ <variablelist>
+ <variable id="generic_flavour" type="string">
+ <name>Generic Flavour</name>
+ <value>slab</value>
+ <description>This string refers to a class located in sfa.generic that describes
+ which specific implementation needs to be used for api, manager and driver objects.
+ PlanetLab users do not need to change this setting.
+ </description>
+ </variable>
+
+ <variable id="interface_hrn" type="string">
+ <name>Human readable name</name>
+ <value>senslab</value>
+ <description>The human readable name for this interface.</description>
+ </variable>
+
+ <variable id="credential_schema" type="string">
+ <name>Credential Schema</name>
+ <value>/etc/sfa/credential.xsd</value>
+ <description>The path to the default credential schema</description>
+ </variable>
+
+ <variable id="api_loglevel" type="int">
+ <name>Debug</name>
+ <value>2</value>
+ <description>Logging level; 0=minimum, 1=info, 2=debug</description>
+ </variable>
+
+ <variable id="max_slice_renew" type="int">
+ <name>Max Slice Renew</name>
+ <value>60</value>
+ <description>Maximum amout of days a user can extend/renew their slices to</description>
+ </variable>
+
+ <variable id="session_key_path" type="string">
+ <name>User Session Keys Path </name>
+ <value>/var/lib/sfa/session_keys</value>
+ <description>Some services will peform operations on behalf of a user, but make
+ it look like the user is the one performing the operation. Doing this requires a
+ valid key pair and credential for the user. This option defines the path where
+ key pairs and credentials are generated and stored.
+ This functionality is used by the SFA web GUI.
+ </description>
+ </variable>
+
+ <variable id="data_dir" type="string">
+ <name>Data Directory </name>
+ <value>/var/lib/sfa/</value>
+ <description>Directory where cached certficiates and other data is stored.
+ </description>
+ </variable>
+
+ </variablelist>
+ </category>
+
+ <!-- ======================================== -->
+ <category id="sfa_registry">
+ <name>Registry</name>
+ <description>The settings that affect the registry that will run
+ as part of this SFA instance.</description>
+
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enable Registry</name>
+ <value>true</value>
+ <description>Allows this local SFA instance to run as a
+ registry.</description>
+ </variable>
+
+ <variable id="host" type="hostname">
+ <name>Hostname</name>
+ <value>localhost</value>
+ <description>The hostname where the registry is expected to
+ be found; using localhost when the local registry is enabled
+ seems reasonable.</description>
+ </variable>
+
+ <variable id="port" type="int">
+ <name>Port number</name>
+ <value>12345</value>
+ <description>The port where the registry is to be found.</description>
+ </variable>
+
+ <variable id="root_auth" type="string">
+ <name>Root Authority</name>
+ <value>senslab</value>
+ <description>The hrn of the registry's root auth.</description>
+ </variable>
+
+ </variablelist>
+ </category>
+
+ <!-- ======================================== -->
+ <category id="sfa_sm">
+ <name>Slice Manager</name>
+ <description>The settings that affect the slice manager that will run
+ as part of this SFA instance.</description>
+
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enable Slice Manager</name>
+ <value>true</value>
+ <description>Allows this local SFA instance to run as a
+ slice manager.</description>
+ </variable>
+
+ <variable id="host" type="hostname">
+ <name>Hostname</name>
+ <value>localhost</value>
+ <description>The hostname where the slice manager is expected to
+ be found.</description>
+ </variable>
+
+ <variable id="port" type="int">
+ <name>Port number</name>
+ <value>12347</value>
+ <description>The port where the slice manager is to be found.</description>
+ </variable>
+
+ <variable id="caching" type="boolean">
+ <name>Cache advertisement rspec</name>
+ <value>false</value>
+ <description>Enable caching of the global advertisement, as
+ returned by ListResources without a slice argument. </description>
+ </variable>
+
+ </variablelist>
+ </category>
+
+ <!-- ======================================== -->
+ <category id="sfa_aggregate">
+ <name>Aggregate</name>
+ <description>The settings that affect the aggregate manager that will run
+ as part of this SFA instance.</description>
+
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enable Aggregate</name>
+ <value>true</value>
+ <description>Allows this local SFA instance to run as an
+ aggregate manager.</description>
+ </variable>
+
+ <variable id="host" type="hostname">
+ <name>Hostname</name>
+ <value>localhost</value>
+ <description>The hostname where the aggregate is expected to
+ be found.</description>
+ </variable>
+
+ <variable id="port" type="int">
+ <name>Port number</name>
+ <value>12346</value>
+ <description>The port where the aggregate is to be found.</description>
+ </variable>
+
+ <variable id="caching" type="boolean">
+ <name>Cache advertisement rspec</name>
+ <value>true</value>
+ <description>Enable caching of the global advertisement, as
+ returned by ListResources without a slice argument. </description>
+ </variable>
+
+ </variablelist>
+
+ </category>
+
+ <!-- ======================================== -->
+ <category id="sfa_db">
+ <name></name>
+ <description>The settings that tell this SFA instance where to find its database. You can essentially leave this as-is unless you plan on hosting your data on some other box.</description>
+
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enabled</name>
+ <value>true</value>
+ <description>Enable the database server on this machine.</description>
+ </variable>
+
+ <variable id="host" type="hostname">
+ <name>Database host</name>
+ <value>localhost</value>
+ <description>The host where the SFA database can be reached.</description>
+ </variable>
+
+ <variable id="port" type="int">
+ <name>Database port</name>
+ <value>5432</value>
+ <description>The port where the SFA database can be reached.</description>
+ </variable>
+
+ <variable id="user" type="string">
+ <name>Database user</name>
+ <value>sfa</value>
+ <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+ </variable>
+
+ <variable id="password" type="string">
+ <name>Database password</name>
+ <value>sfa</value>
+ <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+ </variable>
+
+ <variable id="name" type="string">
+ <name>Database name</name>
+ <value>sfa</value>
+ <description>SFA database name.</description>
+ </variable>
+
+
+ </variablelist>
+ </category>
+
+ <!-- ======================================== -->
+ <category id="sfa_flashpolicy">
+ <name>SFA Flash Policy</name>
+ <description>The settings that affect the flash policy server that will run
+ as part of this SFA instance.</description>
+
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enable Flash Policy Server</name>
+ <value>false</value>
+ <description>Allows this local SFA instance to run a
+ flash policy server.</description>
+ </variable>
+ <variable id="config_file" type="string">
+ <name>Flash policy config file</name>
+ <value>/etc/sfa/sfa_flashpolicy_config.xml</value>
+ <description>The path to where the flash policy config file can be reached.</description>
+ </variable>
+ <variable id="port" type="int">
+ <name>Flash policy port</name>
+ <value>843</value>
+ <description>The flash policy server port.</description>
+ </variable>
+ </variablelist>
+ </category>
+
+ <!-- ======================================== -->
+ <category id="sfa_plc">
+ <name></name>
+ <description>The settings that tell this SFA instance how to interact with the underlying PLC. Refer to plc-config-tty on this installation for more information.</description>
+
+ <variablelist>
+ <variable id="user" type="string">
+ <name>PLC login name for an admin user; SFA will carry on operations under this account.</name>
+ <value>root@localhost.localdomain</value>
+ <description></description>
+ </variable>
+
+ <variable id="password" type="string">
+ <name>Password</name>
+ <value>root</value>
+ <description>The PLC password for SFA_PLC_USER.</description>
+ </variable>
+
+ <variable id="url" type="string">
+ <name>URL</name>
+ <value>https://localhost:443/PLCAPI/</value>
+ <description>Full URL of PLC interface.</description>
+ </variable>
+
+ </variablelist>
+ </category>
+
+ <!-- ======================================== -->
+ <category id="sfa_federica">
+ <name></name>
+ <description>The settings that tell this SFA instance how to interact with the FEDERICA testbed.</description>
+
+ <variablelist>
+ <variable id="url" type="string">
+ <name>XMLRPC URL</name>
+ <value>https://root:password@federica.sfa.wrapper.com:8443/fedewrapper/xmlrpc/</value>
+ <description>URL for the federica xmlrpc API; login and password need to be set like in http://login:password@hostname:port/the/path </description>
+ </variable>
+ </variablelist>
+ </category>
+
+ <!-- ======================================== -->
+ <category id="sfa_nova">
+ <name>SFA Flash Policy</name>
+ <description>The settings that affect how SFA connects to
+ the Nova/EC2 API</description>
+ <variablelist>
+ <variable id="user" type="string">
+ <name>Sfa nova user</name>
+ <value>novaadmin</value>
+ <description>Account/context to use when performing
+ administrative nova operations</description>
+ </variable>
+ <variable id="api_url" type="string">
+ <name>Nova API url</name>
+ <value>127.0.0.1</value>
+ <description>The Nova/EC2 API url </description>
+ </variable>
+ <variable id="api_port" type="int">
+ <name>Nova API Port</name>
+ <value>8773</value>
+ <description>The Nova/EC2 API port.</description>
+ </variable>
+ <variable id="novarc" type="string">
+ <name>novarc</name>
+ <value>/root/novarc</value>
+ <description>Path to novarc client config file</description>
+ </variable>
+ </variablelist>
+ </category>
+
+ </variables>
+
+ <comps>
+ <!-- deprecated - not used anymore - use .lst files instead -->
+ </comps>
+
+</configuration>
--- /dev/null
+# DO NOT EDIT. This file was automatically generated at
+# Mon Jun 25 15:01:21 2012 from:
+#
+# /etc/sfa/sfa_config.xml
+
+# XMLRPC URL
+# URL for the federica xmlrpc API; login and password need to be set
+# like in http://login:password@hostname:port/the/path
+SFA_FEDERICA_URL='https://root:password@federica.sfa.wrapper.com:8443/fedewrapper/xmlrpc/'
+
+# Cache advertisement rspec
+# Enable caching of the global advertisement, as returned by
+# ListResources without a slice argument.
+SFA_AGGREGATE_CACHING=1
+
+# Hostname
+# The hostname where the aggregate is expected to be found.
+SFA_AGGREGATE_HOST='localhost'
+
+# Enable Aggregate
+# Allows this local SFA instance to run as an aggregate manager.
+SFA_AGGREGATE_ENABLED=1
+
+# Port number
+# The port where the aggregate is to be found.
+SFA_AGGREGATE_PORT=12346
+
+# Database name
+# SFA database name.
+SFA_DB_NAME='sfa'
+
+# Enabled
+# Enable the database server on this machine.
+SFA_DB_ENABLED=1
+
+# Database host
+# The host where the SFA database can be reached.
+SFA_DB_HOST='localhost'
+
+# Database user
+# When SFA gets co-hosted with a myplc, this should match the PLC
+# config.
+SFA_DB_USER='sfa'
+
+# Database password
+# When SFA gets co-hosted with a myplc, this should match the PLC
+# config.
+SFA_DB_PASSWORD='sfa'
+
+# Database port
+# The port where the SFA database can be reached.
+SFA_DB_PORT=5432
+
+# Flash policy config file
+# The path to where the flash policy config file can be reached.
+SFA_FLASHPOLICY_CONFIG_FILE='/etc/sfa/sfa_flashpolicy_config.xml'
+
+# Enable Flash Policy Server
+# Allows this local SFA instance to run a flash policy server.
+SFA_FLASHPOLICY_ENABLED=0
+
+# Flash policy port
+# The flash policy server port.
+SFA_FLASHPOLICY_PORT=843
+
+# Nova API Port
+# The Nova/EC2 API port.
+SFA_NOVA_API_PORT=8773
+
+# Sfa nova user
+# Account/context to use when performing administrative nova operations
+SFA_NOVA_USER='novaadmin'
+
+# Nova API url
+# The Nova/EC2 API url
+SFA_NOVA_API_URL='127.0.0.1'
+
+# URL
+# Full URL of PLC interface.
+SFA_PLC_URL='https://localhost:443/PLCAPI/'
+
+# Password
+# The PLC password for SFA_PLC_USER.
+SFA_PLC_PASSWORD='root'
+
+# PLC login name for an admin user; SFA will carry on operations under this account.
+SFA_PLC_USER='root@localhost.localdomain'
+
+# Root Authority
+# The hrn of the registry's root auth.
+SFA_REGISTRY_ROOT_AUTH='senslab'
+
+# Hostname
+# The hostname where the registry is expected to be found; using
+# localhost when the local registry is enabled seems reasonable.
+SFA_REGISTRY_HOST='localhost'
+
+# Enable Registry
+# Allows this local SFA instance to run as a registry.
+SFA_REGISTRY_ENABLED=1
+
+# Port number
+# The port where the registry is to be found.
+SFA_REGISTRY_PORT=12345
+
+# Cache advertisement rspec
+# Enable caching of the global advertisement, as returned by
+# ListResources without a slice argument.
+SFA_SM_CACHING=0
+
+# Hostname
+# The hostname where the slice manager is expected to be found.
+SFA_SM_HOST='localhost'
+
+# Enable Slice Manager
+# Allows this local SFA instance to run as a slice manager.
+SFA_SM_ENABLED=1
+
+# Port number
+# The port where the slice manager is to be found.
+SFA_SM_PORT=12347
+
+# Human readable name
+# The human readable name for this interface.
+SFA_INTERFACE_HRN='senslab'
+
+# Generic Flavour
+# This string refers to a class located in sfa.generic that describes
+# which specific implementation needs to be used for api, manager and
+# driver objects. PlanetLab users do not need to change this setting.
+SFA_GENERIC_FLAVOUR='slab'
+
+# Credential Schema
+# The path to the default credential schema
+SFA_CREDENTIAL_SCHEMA='/etc/sfa/credential.xsd'
+
+# Debug
+# Logging level; 0=minimum, 1=info, 2=debug
+SFA_API_LOGLEVEL=0
+
+# User Session Keys Path
+# Some services will peform operations on behalf of a user, but make it
+# look like the user is the one performing the operation. Doing this
+# requires a valid key pair and credential for the user. This option
+# defines the path where key pairs and credentials are generated and
+# stored. This functionality is used by the SFA web GUI.
+SFA_SESSION_KEY_PATH='/var/lib/sfa/session_keys'
+
+# Max Slice Renew
+# Maximum amout of days a user can extend/renew their slices to
+SFA_MAX_SLICE_RENEW=60
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>
+<configuration>
+ <variables>
+ <category id="sfa">
+ <name>General</name>
+ <description>Basic system variables.</description>
+ <variablelist>
+ <variable id="generic_flavour" type="string">
+ <name>Generic Flavour</name>
+ <value>slab</value>
+ <description>This string refers to a class located in sfa.generic that describes
+ which specific implementation needs to be used for api, manager and driver objects.
+ PlanetLab users do not need to change this setting.
+ </description>
+ </variable>
+ <variable id="interface_hrn" type="string">
+ <name>Human readable name</name>
+ <value>senslab</value>
+ <description>The human readable name for this interface.</description>
+ </variable>
+ <variable id="credential_schema" type="string">
+ <name>Credential Schema</name>
+ <value>/etc/sfa/credential.xsd</value>
+ <description>The path to the default credential schema</description>
+ </variable>
+ <variable id="api_loglevel" type="int">
+ <name>Debug</name>
+ <value>0</value>
+ <description>Logging level; 0=minimum, 1=info, 2=debug</description>
+ </variable>
+ <variable id="max_slice_renew" type="int">
+ <name>Max Slice Renew</name>
+ <value>60</value>
+ <description>Maximum amout of days a user can extend/renew their slices to</description>
+ </variable>
+ <variable id="session_key_path" type="string">
+ <name>User Session Keys Path </name>
+ <value>/var/lib/sfa/session_keys</value>
+ <description>Some services will peform operations on behalf of a user, but make
+ it look like the user is the one performing the operation. Doing this requires a
+ valid key pair and credential for the user. This option defines the path where
+ key pairs and credentials are generated and stored.
+ This functionality is used by the SFA web GUI.
+ </description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_registry">
+ <name>Registry</name>
+ <description>The settings that affect the registry that will run
+ as part of this SFA instance.</description>
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enable Registry</name>
+ <value>true</value>
+ <description>Allows this local SFA instance to run as a
+ registry.</description>
+ </variable>
+ <variable id="host" type="hostname">
+ <name>Hostname</name>
+ <value>localhost</value>
+ <description>The hostname where the registry is expected to
+ be found; using localhost when the local registry is enabled
+ seems reasonable.</description>
+ </variable>
+ <variable id="port" type="int">
+ <name>Port number</name>
+ <value>12345</value>
+ <description>The port where the registry is to be found.</description>
+ </variable>
+ <variable id="root_auth" type="string">
+ <name>Root Authority</name>
+ <value>senslab</value>
+ <description>The hrn of the registry's root auth.</description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_sm">
+ <name>Slice Manager</name>
+ <description>The settings that affect the slice manager that will run
+ as part of this SFA instance.</description>
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enable Slice Manager</name>
+ <value>true</value>
+ <description>Allows this local SFA instance to run as a
+ slice manager.</description>
+ </variable>
+ <variable id="host" type="hostname">
+ <name>Hostname</name>
+ <value>localhost</value>
+ <description>The hostname where the slice manager is expected to
+ be found.</description>
+ </variable>
+ <variable id="port" type="int">
+ <name>Port number</name>
+ <value>12347</value>
+ <description>The port where the slice manager is to be found.</description>
+ </variable>
+ <variable id="caching" type="boolean">
+ <name>Cache advertisement rspec</name>
+ <value>false</value>
+ <description>Enable caching of the global advertisement, as
+ returned by ListResources without a slice argument. </description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_aggregate">
+ <name>Aggregate</name>
+ <description>The settings that affect the aggregate manager that will run
+ as part of this SFA instance.</description>
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enable Aggregate</name>
+ <value>true</value>
+ <description>Allows this local SFA instance to run as an
+ aggregate manager.</description>
+ </variable>
+ <variable id="host" type="hostname">
+ <name>Hostname</name>
+ <value>localhost</value>
+ <description>The hostname where the aggregate is expected to
+ be found.</description>
+ </variable>
+ <variable id="port" type="int">
+ <name>Port number</name>
+ <value>12346</value>
+ <description>The port where the aggregate is to be found.</description>
+ </variable>
+ <variable id="caching" type="boolean">
+ <name>Cache advertisement rspec</name>
+ <value>true</value>
+ <description>Enable caching of the global advertisement, as
+ returned by ListResources without a slice argument. </description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_db">
+ <name/>
+ <description>The settings that tell this SFA instance where to find its database. You can essentially leave this as-is unless you plan on hosting your data on some other box.</description>
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enabled</name>
+ <value>true</value>
+ <description>Enable the database server on this machine.</description>
+ </variable>
+ <variable id="host" type="hostname">
+ <name>Database host</name>
+ <value>localhost</value>
+ <description>The host where the SFA database can be reached.</description>
+ </variable>
+ <variable id="port" type="int">
+ <name>Database port</name>
+ <value>5432</value>
+ <description>The port where the SFA database can be reached.</description>
+ </variable>
+ <variable id="user" type="string">
+ <name>Database user</name>
+ <value>sfa</value>
+ <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+ </variable>
+ <variable id="password" type="string">
+ <name>Database password</name>
+ <value>sfa</value>
+ <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+ </variable>
+ <variable id="name" type="string">
+ <name>Database name</name>
+ <value>sfa</value>
+ <description>SFA database name.</description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_flashpolicy">
+ <name>SFA Flash Policy</name>
+ <description>The settings that affect the flash policy server that will run
+ as part of this SFA instance.</description>
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enable Flash Policy Server</name>
+ <value>false</value>
+ <description>Allows this local SFA instance to run a
+ flash policy server.</description>
+ </variable>
+ <variable id="config_file" type="string">
+ <name>Flash policy config file</name>
+ <value>/etc/sfa/sfa_flashpolicy_config.xml</value>
+ <description>The path to where the flash policy config file can be reached.</description>
+ </variable>
+ <variable id="port" type="int">
+ <name>Flash policy port</name>
+ <value>843</value>
+ <description>The flash policy server port.</description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_plc">
+ <name/>
+ <description>The settings that tell this SFA instance how to interact with the underlying PLC. Refer to plc-config-tty on this installation for more information.</description>
+ <variablelist>
+ <variable id="user" type="string">
+ <name>PLC login name for an admin user; SFA will carry on operations under this account.</name>
+ <value>root@localhost.localdomain</value>
+ <description/>
+ </variable>
+ <variable id="password" type="string">
+ <name>Password</name>
+ <value>root</value>
+ <description>The PLC password for SFA_PLC_USER.</description>
+ </variable>
+ <variable id="url" type="string">
+ <name>URL</name>
+ <value>https://localhost:443/PLCAPI/</value>
+ <description>Full URL of PLC interface.</description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_federica">
+ <name/>
+ <description>The settings that tell this SFA instance how to interact with the FEDERICA testbed.</description>
+ <variablelist>
+ <variable id="url" type="string">
+ <name>XMLRPC URL</name>
+ <value>https://root:password@federica.sfa.wrapper.com:8443/fedewrapper/xmlrpc/</value>
+ <description>URL for the federica xmlrpc API; login and password need to be set like in http://login:password@hostname:port/the/path </description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_nova">
+ <name>SFA Flash Policy</name>
+ <description>The settings that affect how SFA connects to
+ the Nova/EC2 API</description>
+ <variablelist>
+ <variable id="user" type="string">
+ <name>Sfa nova user</name>
+ <value>novaadmin</value>
+ <description>Account/context to use when performing
+ administrative nova operations</description>
+ </variable>
+ <variable id="api_url" type="string">
+ <name>Nova API url</name>
+ <value>127.0.0.1</value>
+ <description>The Nova/EC2 API url </description>
+ </variable>
+ <variable id="api_port" type="int">
+ <name>Nova API Port</name>
+ <value>8773</value>
+ <description>The Nova/EC2 API port.</description>
+ </variable>
+ </variablelist>
+ </category>
+ </variables>
+</configuration>
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>
+<configuration>
+ <variables>
+ <category id="sfa_aggregate">
+ <name>Aggregate</name>
+ <description>The settings that affect the aggregate manager that will run
+ as part of this SFA instance.</description>
+ <variablelist>
+ <variable id="port" type="int">
+ <name>Port number</name>
+ <value>12346</value>
+ <description>The port where the aggregate is to be found.</description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_db">
+ <name/>
+ <description>The settings that tell this SFA instance where to find its database. You can essentially leave this as-is unless you plan on hosting your data on some other box.</description>
+ <variablelist>
+ <variable id="user" type="string">
+ <name>Database user</name>
+ <value>sfa</value>
+ <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+ </variable>
+ <variable id="password" type="string">
+ <name>Database password</name>
+ <value>sfa</value>
+ <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_registry">
+ <name>Registry</name>
+ <description>The settings that affect the registry that will run
+ as part of this SFA instance.</description>
+ <variablelist>
+ <variable id="root_auth" type="string">
+ <name>Root Authority</name>
+ <value>senslab</value>
+ <description>The hrn of the registry's root auth.</description>
+ </variable>
+ <variable id="port" type="int">
+ <name>Port number</name>
+ <value>12345</value>
+ <description>The port where the registry is to be found.</description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_sm">
+ <name>Slice Manager</name>
+ <description>The settings that affect the slice manager that will run
+ as part of this SFA instance.</description>
+ <variablelist>
+ <variable id="port" type="int">
+ <name>Port number</name>
+ <value>12347</value>
+ <description>The port where the slice manager is to be found.</description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa">
+ <name>General</name>
+ <description>Basic system variables.</description>
+ <variablelist>
+ <variable id="interface_hrn" type="string">
+ <name>Human readable name</name>
+ <value>senslab</value>
+ <description>The human readable name for this interface.</description>
+ </variable>
+ <variable id="generic_flavour" type="string">
+ <name>Generic Flavour</name>
+ <value>slab</value>
+ <description>This string refers to a class located in sfa.generic that describes
+ which specific implementation needs to be used for api, manager and driver objects.
+ PlanetLab users do not need to change this setting.
+ </description>
+ </variable>
+ <variable id="api_loglevel" type="int">
+ <name>Debug</name>
+ <value>0</value>
+ <description>Logging level; 0=minimum, 1=info, 2=debug</description>
+ </variable>
+ </variablelist>
+ </category>
+ </variables>
+</configuration>
--- /dev/null
+[sfa_federica]
+url = https://root:password@federica.sfa.wrapper.com:8443/fedewrapper/xmlrpc/
+
+[sfa_aggregate]
+caching = true
+host = localhost
+enabled = true
+port = 12346
+
+[sfa_db]
+name = sfa
+enabled = true
+host = localhost
+user = sfa
+password = sfa
+port = 5432
+
+[sfa_flashpolicy]
+config_file = /etc/sfa/sfa_flashpolicy_config.xml
+enabled = false
+port = 843
+
+[sfa_nova]
+api_url = 127.0.0.1
+api_port = 8773
+user = novaadmin
+novarc = /root/novarc
+
+[sfa_plc]
+url = https://localhost:443/PLCAPI/
+password = root
+user = root@localhost.localdomain
+
+[sfa_registry]
+host = localhost
+enabled = true
+port = 12345
+root_auth = senslab
+
+[sfa_sm]
+caching = false
+host = localhost
+enabled = true
+port = 12347
+
+[sfa]
+interface_hrn = senslab
+data_dir = /var/lib/sfa/
+generic_flavour = slab
+credential_schema = /etc/sfa/credential.xsd
+api_loglevel = 2
+session_key_path = /var/lib/sfa/session_keys
+max_slice_renew = 60
+
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>
+
+<!--
+Default SFA configuration file
+
+Thierry Parmentelat
+
+-->
+
+<!DOCTYPE configuration PUBLIC "-//PlanetLab Central//DTD PLC configuration//EN" "plc_config.dtd">
+
+<configuration>
+ <variables>
+
+ <!-- ======================================== -->
+ <category id="sfa">
+ <name>General</name>
+ <description>Basic system variables.</description>
+
+ <variablelist>
+ <variable id="generic_flavour" type="string">
+ <name>Generic Flavour</name>
+ <value>slab</value>
+ <description>This string refers to a class located in sfa.generic that describes
+ which specific implementation needs to be used for api, manager and driver objects.
+ PlanetLab users do not need to change this setting.
+ </description>
+ </variable>
+
+ <variable id="interface_hrn" type="string">
+ <name>Human readable name</name>
+ <value>senslab2</value>
+ <description>The human readable name for this interface.</description>
+ </variable>
+
+ <variable id="credential_schema" type="string">
+ <name>Credential Schema</name>
+ <value>/etc/sfa/credential.xsd</value>
+ <description>The path to the default credential schema</description>
+ </variable>
+
+ <variable id="api_loglevel" type="int">
+ <name>Debug</name>
+ <value>2</value>
+ <description>Logging level; 0=minimum, 1=info, 2=debug</description>
+ </variable>
+
+ <variable id="max_slice_renew" type="int">
+ <name>Max Slice Renew</name>
+ <value>60</value>
+ <description>Maximum amout of days a user can extend/renew their slices to</description>
+ </variable>
+
+ <variable id="session_key_path" type="string">
+ <name>User Session Keys Path </name>
+ <value>/var/lib/sfa/session_keys</value>
+ <description>Some services will peform operations on behalf of a user, but make
+ it look like the user is the one performing the operation. Doing this requires a
+ valid key pair and credential for the user. This option defines the path where
+ key pairs and credentials are generated and stored.
+ This functionality is used by the SFA web GUI.
+ </description>
+ </variable>
+
+ <variable id="data_dir" type="string">
+ <name>Data Directory </name>
+ <value>/var/lib/sfa/</value>
+ <description>Directory where cached certficiates and other data is stored.
+ </description>
+ </variable>
+
+ </variablelist>
+ </category>
+
+ <!-- ======================================== -->
+ <category id="sfa_registry">
+ <name>Registry</name>
+ <description>The settings that affect the registry that will run
+ as part of this SFA instance.</description>
+
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enable Registry</name>
+ <value>true</value>
+ <description>Allows this local SFA instance to run as a
+ registry.</description>
+ </variable>
+
+ <variable id="host" type="hostname">
+ <name>Hostname</name>
+ <value>localhost</value>
+ <description>The hostname where the registry is expected to
+ be found; using localhost when the local registry is enabled
+ seems reasonable.</description>
+ </variable>
+
+ <variable id="port" type="int">
+ <name>Port number</name>
+ <value>52345</value>
+ <description>The port where the registry is to be found.</description>
+ </variable>
+
+ <variable id="root_auth" type="string">
+ <name>Root Authority</name>
+ <value>senslab2</value>
+ <description>The hrn of the registry's root auth.</description>
+ </variable>
+
+ </variablelist>
+ </category>
+
+ <!-- ======================================== -->
+ <category id="sfa_sm">
+ <name>Slice Manager</name>
+ <description>The settings that affect the slice manager that will run
+ as part of this SFA instance.</description>
+
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enable Slice Manager</name>
+ <value>true</value>
+ <description>Allows this local SFA instance to run as a
+ slice manager.</description>
+ </variable>
+
+ <variable id="host" type="hostname">
+ <name>Hostname</name>
+ <value>localhost</value>
+ <description>The hostname where the slice manager is expected to
+ be found.</description>
+ </variable>
+
+ <variable id="port" type="int">
+ <name>Port number</name>
+ <value>52347</value>
+ <description>The port where the slice manager is to be found.</description>
+ </variable>
+
+ <variable id="caching" type="boolean">
+ <name>Cache advertisement rspec</name>
+ <value>false</value>
+ <description>Enable caching of the global advertisement, as
+ returned by ListResources without a slice argument. </description>
+ </variable>
+
+ </variablelist>
+ </category>
+
+ <!-- ======================================== -->
+ <category id="sfa_aggregate">
+ <name>Aggregate</name>
+ <description>The settings that affect the aggregate manager that will run
+ as part of this SFA instance.</description>
+
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enable Aggregate</name>
+ <value>true</value>
+ <description>Allows this local SFA instance to run as an
+ aggregate manager.</description>
+ </variable>
+
+ <variable id="host" type="hostname">
+ <name>Hostname</name>
+ <value>localhost</value>
+ <description>The hostname where the aggregate is expected to
+ be found.</description>
+ </variable>
+
+ <variable id="port" type="int">
+ <name>Port number</name>
+ <value>52346</value>
+ <description>The port where the aggregate is to be found.</description>
+ </variable>
+
+ <variable id="caching" type="boolean">
+ <name>Cache advertisement rspec</name>
+ <value>true</value>
+ <description>Enable caching of the global advertisement, as
+ returned by ListResources without a slice argument. </description>
+ </variable>
+
+ </variablelist>
+
+ </category>
+
+ <!-- ======================================== -->
+ <category id="sfa_db">
+ <name></name>
+ <description>The settings that tell this SFA instance where to find its database. You can essentially leave this as-is unless you plan on hosting your data on some other box.</description>
+
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enabled</name>
+ <value>true</value>
+ <description>Enable the database server on this machine.</description>
+ </variable>
+
+ <variable id="host" type="hostname">
+ <name>Database host</name>
+ <value>localhost</value>
+ <description>The host where the SFA database can be reached.</description>
+ </variable>
+
+ <variable id="port" type="int">
+ <name>Database port</name>
+ <value>5432</value>
+ <description>The port where the SFA database can be reached.</description>
+ </variable>
+
+ <variable id="user" type="string">
+ <name>Database user</name>
+ <value>sfa</value>
+ <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+ </variable>
+
+ <variable id="password" type="string">
+ <name>Database password</name>
+ <value>sfa</value>
+ <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+ </variable>
+
+ <variable id="name" type="string">
+ <name>Database name</name>
+ <value>sfa</value>
+ <description>SFA database name.</description>
+ </variable>
+
+
+ </variablelist>
+ </category>
+
+ <!-- ======================================== -->
+ <category id="sfa_flashpolicy">
+ <name>SFA Flash Policy</name>
+ <description>The settings that affect the flash policy server that will run
+ as part of this SFA instance.</description>
+
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enable Flash Policy Server</name>
+ <value>false</value>
+ <description>Allows this local SFA instance to run a
+ flash policy server.</description>
+ </variable>
+ <variable id="config_file" type="string">
+ <name>Flash policy config file</name>
+ <value>/etc/sfa/sfa_flashpolicy_config.xml</value>
+ <description>The path to where the flash policy config file can be reached.</description>
+ </variable>
+ <variable id="port" type="int">
+ <name>Flash policy port</name>
+ <value>843</value>
+ <description>The flash policy server port.</description>
+ </variable>
+ </variablelist>
+ </category>
+
+ <!-- ======================================== -->
+ <category id="sfa_plc">
+ <name></name>
+ <description>The settings that tell this SFA instance how to interact with the underlying PLC. Refer to plc-config-tty on this installation for more information.</description>
+
+ <variablelist>
+ <variable id="user" type="string">
+ <name>PLC login name for an admin user; SFA will carry on operations under this account.</name>
+ <value>root@localhost.localdomain</value>
+ <description></description>
+ </variable>
+
+ <variable id="password" type="string">
+ <name>Password</name>
+ <value>root</value>
+ <description>The PLC password for SFA_PLC_USER.</description>
+ </variable>
+
+ <variable id="url" type="string">
+ <name>URL</name>
+ <value>https://localhost:443/PLCAPI/</value>
+ <description>Full URL of PLC interface.</description>
+ </variable>
+
+ </variablelist>
+ </category>
+
+ <!-- ======================================== -->
+ <category id="sfa_federica">
+ <name></name>
+ <description>The settings that tell this SFA instance how to interact with the FEDERICA testbed.</description>
+
+ <variablelist>
+ <variable id="url" type="string">
+ <name>XMLRPC URL</name>
+ <value>https://root:password@federica.sfa.wrapper.com:8443/fedewrapper/xmlrpc/</value>
+ <description>URL for the federica xmlrpc API; login and password need to be set like in http://login:password@hostname:port/the/path </description>
+ </variable>
+ </variablelist>
+ </category>
+
+ <!-- ======================================== -->
+ <category id="sfa_nova">
+ <name>SFA Flash Policy</name>
+ <description>The settings that affect how SFA connects to
+ the Nova/EC2 API</description>
+ <variablelist>
+ <variable id="user" type="string">
+ <name>Sfa nova user</name>
+ <value>novaadmin</value>
+ <description>Account/context to use when performing
+ administrative nova operations</description>
+ </variable>
+ <variable id="api_url" type="string">
+ <name>Nova API url</name>
+ <value>127.0.0.1</value>
+ <description>The Nova/EC2 API url </description>
+ </variable>
+ <variable id="api_port" type="int">
+ <name>Nova API Port</name>
+ <value>8773</value>
+ <description>The Nova/EC2 API port.</description>
+ </variable>
+ <variable id="novarc" type="string">
+ <name>novarc</name>
+ <value>/root/novarc</value>
+ <description>Path to novarc client config file</description>
+ </variable>
+ </variablelist>
+ </category>
+
+ </variables>
+
+ <comps>
+ <!-- deprecated - not used anymore - use .lst files instead -->
+ </comps>
+
+</configuration>
--- /dev/null
+# DO NOT EDIT. This file was automatically generated at
+# Mon Jun 25 15:01:21 2012 from:
+#
+# /etc/sfa/sfa_config.xml
+
+# XMLRPC URL
+# URL for the federica xmlrpc API; login and password need to be set
+# like in http://login:password@hostname:port/the/path
+SFA_FEDERICA_URL='https://root:password@federica.sfa.wrapper.com:8443/fedewrapper/xmlrpc/'
+
+# Cache advertisement rspec
+# Enable caching of the global advertisement, as returned by
+# ListResources without a slice argument.
+SFA_AGGREGATE_CACHING=1
+
+# Hostname
+# The hostname where the aggregate is expected to be found.
+SFA_AGGREGATE_HOST='localhost'
+
+# Enable Aggregate
+# Allows this local SFA instance to run as an aggregate manager.
+SFA_AGGREGATE_ENABLED=1
+
+# Port number
+# The port where the aggregate is to be found.
+SFA_AGGREGATE_PORT=52346
+
+# Database name
+# SFA database name.
+SFA_DB_NAME='sfa'
+
+# Enabled
+# Enable the database server on this machine.
+SFA_DB_ENABLED=1
+
+# Database host
+# The host where the SFA database can be reached.
+SFA_DB_HOST='localhost'
+
+# Database user
+# When SFA gets co-hosted with a myplc, this should match the PLC
+# config.
+SFA_DB_USER='sfa'
+
+# Database password
+# When SFA gets co-hosted with a myplc, this should match the PLC
+# config.
+SFA_DB_PASSWORD='sfa'
+
+# Database port
+# The port where the SFA database can be reached.
+SFA_DB_PORT=5432
+
+# Flash policy config file
+# The path to where the flash policy config file can be reached.
+SFA_FLASHPOLICY_CONFIG_FILE='/etc/sfa/sfa_flashpolicy_config.xml'
+
+# Enable Flash Policy Server
+# Allows this local SFA instance to run a flash policy server.
+SFA_FLASHPOLICY_ENABLED=0
+
+# Flash policy port
+# The flash policy server port.
+SFA_FLASHPOLICY_PORT=843
+
+# Nova API Port
+# The Nova/EC2 API port.
+SFA_NOVA_API_PORT=8773
+
+# Sfa nova user
+# Account/context to use when performing administrative nova operations
+SFA_NOVA_USER='novaadmin'
+
+# Nova API url
+# The Nova/EC2 API url
+SFA_NOVA_API_URL='127.0.0.1'
+
+# URL
+# Full URL of PLC interface.
+SFA_PLC_URL='https://localhost:443/PLCAPI/'
+
+# Password
+# The PLC password for SFA_PLC_USER.
+SFA_PLC_PASSWORD='root'
+
+# PLC login name for an admin user; SFA will carry on operations under this account.
+SFA_PLC_USER='root@localhost.localdomain'
+
+# Root Authority
+# The hrn of the registry's root auth.
+SFA_REGISTRY_ROOT_AUTH='senslab2'
+
+# Hostname
+# The hostname where the registry is expected to be found; using
+# localhost when the local registry is enabled seems reasonable.
+SFA_REGISTRY_HOST='localhost'
+
+# Enable Registry
+# Allows this local SFA instance to run as a registry.
+SFA_REGISTRY_ENABLED=1
+
+# Port number
+# The port where the registry is to be found.
+SFA_REGISTRY_PORT=52345
+
+# Cache advertisement rspec
+# Enable caching of the global advertisement, as returned by
+# ListResources without a slice argument.
+SFA_SM_CACHING=0
+
+# Hostname
+# The hostname where the slice manager is expected to be found.
+SFA_SM_HOST='localhost'
+
+# Enable Slice Manager
+# Allows this local SFA instance to run as a slice manager.
+SFA_SM_ENABLED=1
+
+# Port number
+# The port where the slice manager is to be found.
+SFA_SM_PORT=52347
+
+# Human readable name
+# The human readable name for this interface.
+SFA_INTERFACE_HRN='senslab2'
+
+# Generic Flavour
+# This string refers to a class located in sfa.generic that describes
+# which specific implementation needs to be used for api, manager and
+# driver objects. PlanetLab users do not need to change this setting.
+SFA_GENERIC_FLAVOUR='slab'
+
+# Credential Schema
+# The path to the default credential schema
+SFA_CREDENTIAL_SCHEMA='/etc/sfa/credential.xsd'
+
+# Debug
+# Logging level; 0=minimum, 1=info, 2=debug
+SFA_API_LOGLEVEL=2
+
+# User Session Keys Path
+# Some services will peform operations on behalf of a user, but make it
+# look like the user is the one performing the operation. Doing this
+# requires a valid key pair and credential for the user. This option
+# defines the path where key pairs and credentials are generated and
+# stored. This functionality is used by the SFA web GUI.
+SFA_SESSION_KEY_PATH='/var/lib/sfa/session_keys'
+
+# Max Slice Renew
+# Maximum amout of days a user can extend/renew their slices to
+SFA_MAX_SLICE_RENEW=60
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>
+<configuration>
+ <variables>
+ <category id="sfa">
+ <name>General</name>
+ <description>Basic system variables.</description>
+ <variablelist>
+ <variable id="generic_flavour" type="string">
+ <name>Generic Flavour</name>
+ <value>slab</value>
+ <description>This string refers to a class located in sfa.generic that describes
+ which specific implementation needs to be used for api, manager and driver objects.
+ PlanetLab users do not need to change this setting.
+ </description>
+ </variable>
+ <variable id="interface_hrn" type="string">
+ <name>Human readable name</name>
+ <value>senslab2</value>
+ <description>The human readable name for this interface.</description>
+ </variable>
+ <variable id="credential_schema" type="string">
+ <name>Credential Schema</name>
+ <value>/etc/sfa/credential.xsd</value>
+ <description>The path to the default credential schema</description>
+ </variable>
+ <variable id="api_loglevel" type="int">
+ <name>Debug</name>
+ <value>2</value>
+ <description>Logging level; 0=minimum, 1=info, 2=debug</description>
+ </variable>
+ <variable id="max_slice_renew" type="int">
+ <name>Max Slice Renew</name>
+ <value>60</value>
+ <description>Maximum amout of days a user can extend/renew their slices to</description>
+ </variable>
+ <variable id="session_key_path" type="string">
+ <name>User Session Keys Path </name>
+ <value>/var/lib/sfa/session_keys</value>
+ <description>Some services will peform operations on behalf of a user, but make
+ it look like the user is the one performing the operation. Doing this requires a
+ valid key pair and credential for the user. This option defines the path where
+ key pairs and credentials are generated and stored.
+ This functionality is used by the SFA web GUI.
+ </description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_registry">
+ <name>Registry</name>
+ <description>The settings that affect the registry that will run
+ as part of this SFA instance.</description>
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enable Registry</name>
+ <value>true</value>
+ <description>Allows this local SFA instance to run as a
+ registry.</description>
+ </variable>
+ <variable id="host" type="hostname">
+ <name>Hostname</name>
+ <value>localhost</value>
+ <description>The hostname where the registry is expected to
+ be found; using localhost when the local registry is enabled
+ seems reasonable.</description>
+ </variable>
+ <variable id="port" type="int">
+ <name>Port number</name>
+ <value>52345</value>
+ <description>The port where the registry is to be found.</description>
+ </variable>
+ <variable id="root_auth" type="string">
+ <name>Root Authority</name>
+ <value>senslab2</value>
+ <description>The hrn of the registry's root auth.</description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_sm">
+ <name>Slice Manager</name>
+ <description>The settings that affect the slice manager that will run
+ as part of this SFA instance.</description>
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enable Slice Manager</name>
+ <value>true</value>
+ <description>Allows this local SFA instance to run as a
+ slice manager.</description>
+ </variable>
+ <variable id="host" type="hostname">
+ <name>Hostname</name>
+ <value>localhost</value>
+ <description>The hostname where the slice manager is expected to
+ be found.</description>
+ </variable>
+ <variable id="port" type="int">
+ <name>Port number</name>
+ <value>52347</value>
+ <description>The port where the slice manager is to be found.</description>
+ </variable>
+ <variable id="caching" type="boolean">
+ <name>Cache advertisement rspec</name>
+ <value>false</value>
+ <description>Enable caching of the global advertisement, as
+ returned by ListResources without a slice argument. </description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_aggregate">
+ <name>Aggregate</name>
+ <description>The settings that affect the aggregate manager that will run
+ as part of this SFA instance.</description>
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enable Aggregate</name>
+ <value>true</value>
+ <description>Allows this local SFA instance to run as an
+ aggregate manager.</description>
+ </variable>
+ <variable id="host" type="hostname">
+ <name>Hostname</name>
+ <value>localhost</value>
+ <description>The hostname where the aggregate is expected to
+ be found.</description>
+ </variable>
+ <variable id="port" type="int">
+ <name>Port number</name>
+ <value>52346</value>
+ <description>The port where the aggregate is to be found.</description>
+ </variable>
+ <variable id="caching" type="boolean">
+ <name>Cache advertisement rspec</name>
+ <value>true</value>
+ <description>Enable caching of the global advertisement, as
+ returned by ListResources without a slice argument. </description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_db">
+ <name/>
+ <description>The settings that tell this SFA instance where to find its database. You can essentially leave this as-is unless you plan on hosting your data on some other box.</description>
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enabled</name>
+ <value>true</value>
+ <description>Enable the database server on this machine.</description>
+ </variable>
+ <variable id="host" type="hostname">
+ <name>Database host</name>
+ <value>localhost</value>
+ <description>The host where the SFA database can be reached.</description>
+ </variable>
+ <variable id="port" type="int">
+ <name>Database port</name>
+ <value>5432</value>
+ <description>The port where the SFA database can be reached.</description>
+ </variable>
+ <variable id="user" type="string">
+ <name>Database user</name>
+ <value>sfa</value>
+ <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+ </variable>
+ <variable id="password" type="string">
+ <name>Database password</name>
+ <value>sfa</value>
+ <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+ </variable>
+ <variable id="name" type="string">
+ <name>Database name</name>
+ <value>sfa</value>
+ <description>SFA database name.</description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_flashpolicy">
+ <name>SFA Flash Policy</name>
+ <description>The settings that affect the flash policy server that will run
+ as part of this SFA instance.</description>
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enable Flash Policy Server</name>
+ <value>false</value>
+ <description>Allows this local SFA instance to run a
+ flash policy server.</description>
+ </variable>
+ <variable id="config_file" type="string">
+ <name>Flash policy config file</name>
+ <value>/etc/sfa/sfa_flashpolicy_config.xml</value>
+ <description>The path to where the flash policy config file can be reached.</description>
+ </variable>
+ <variable id="port" type="int">
+ <name>Flash policy port</name>
+ <value>843</value>
+ <description>The flash policy server port.</description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_plc">
+ <name/>
+ <description>The settings that tell this SFA instance how to interact with the underlying PLC. Refer to plc-config-tty on this installation for more information.</description>
+ <variablelist>
+ <variable id="user" type="string">
+ <name>PLC login name for an admin user; SFA will carry on operations under this account.</name>
+ <value>root@localhost.localdomain</value>
+ <description/>
+ </variable>
+ <variable id="password" type="string">
+ <name>Password</name>
+ <value>root</value>
+ <description>The PLC password for SFA_PLC_USER.</description>
+ </variable>
+ <variable id="url" type="string">
+ <name>URL</name>
+ <value>https://localhost:443/PLCAPI/</value>
+ <description>Full URL of PLC interface.</description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_federica">
+ <name/>
+ <description>The settings that tell this SFA instance how to interact with the FEDERICA testbed.</description>
+ <variablelist>
+ <variable id="url" type="string">
+ <name>XMLRPC URL</name>
+ <value>https://root:password@federica.sfa.wrapper.com:8443/fedewrapper/xmlrpc/</value>
+ <description>URL for the federica xmlrpc API; login and password need to be set like in http://login:password@hostname:port/the/path </description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_nova">
+ <name>SFA Flash Policy</name>
+ <description>The settings that affect how SFA connects to
+ the Nova/EC2 API</description>
+ <variablelist>
+ <variable id="user" type="string">
+ <name>Sfa nova user</name>
+ <value>novaadmin</value>
+ <description>Account/context to use when performing
+ administrative nova operations</description>
+ </variable>
+ <variable id="api_url" type="string">
+ <name>Nova API url</name>
+ <value>127.0.0.1</value>
+ <description>The Nova/EC2 API url </description>
+ </variable>
+ <variable id="api_port" type="int">
+ <name>Nova API Port</name>
+ <value>8773</value>
+ <description>The Nova/EC2 API port.</description>
+ </variable>
+ </variablelist>
+ </category>
+ </variables>
+</configuration>
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>
+<configuration>
+ <variables>
+ <category id="sfa_aggregate">
+ <name>Aggregate</name>
+ <description>The settings that affect the aggregate manager that will run
+ as part of this SFA instance.</description>
+ <variablelist>
+ <variable id="port" type="int">
+ <name>Port number</name>
+ <value>52346</value>
+ <description>The port where the aggregate is to be found.</description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_db">
+ <name/>
+ <description>The settings that tell this SFA instance where to find its database. You can essentially leave this as-is unless you plan on hosting your data on some other box.</description>
+ <variablelist>
+ <variable id="user" type="string">
+ <name>Database user</name>
+ <value>sfa</value>
+ <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+ </variable>
+ <variable id="password" type="string">
+ <name>Database password</name>
+ <value>sfa</value>
+ <description>When SFA gets co-hosted with a myplc, this should match the PLC config.</description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_registry">
+ <name>Registry</name>
+ <description>The settings that affect the registry that will run
+ as part of this SFA instance.</description>
+ <variablelist>
+ <variable id="root_auth" type="string">
+ <name>Root Authority</name>
+ <value>senslab2</value>
+ <description>The hrn of the registry's root auth.</description>
+ </variable>
+ <variable id="port" type="int">
+ <name>Port number</name>
+ <value>52345</value>
+ <description>The port where the registry is to be found.</description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa_sm">
+ <name>Slice Manager</name>
+ <description>The settings that affect the slice manager that will run
+ as part of this SFA instance.</description>
+ <variablelist>
+ <variable id="port" type="int">
+ <name>Port number</name>
+ <value>52347</value>
+ <description>The port where the slice manager is to be found.</description>
+ </variable>
+ </variablelist>
+ </category>
+ <category id="sfa">
+ <name>General</name>
+ <description>Basic system variables.</description>
+ <variablelist>
+ <variable id="interface_hrn" type="string">
+ <name>Human readable name</name>
+ <value>senslab2</value>
+ <description>The human readable name for this interface.</description>
+ </variable>
+ <variable id="generic_flavour" type="string">
+ <name>Generic Flavour</name>
+ <value>slab</value>
+ <description>This string refers to a class located in sfa.generic that describes
+ which specific implementation needs to be used for api, manager and driver objects.
+ PlanetLab users do not need to change this setting.
+ </description>
+ </variable>
+ <variable id="api_loglevel" type="int">
+ <name>Debug</name>
+ <value>2</value>
+ <description>Logging level; 0=minimum, 1=info, 2=debug</description>
+ </variable>
+ </variablelist>
+ </category>
+ </variables>
+</configuration>
--- /dev/null
+[sfa_federica]
+url = https://root:password@federica.sfa.wrapper.com:8443/fedewrapper/xmlrpc/
+
+[sfa_aggregate]
+caching = true
+host = localhost
+enabled = true
+port = 52346
+
+[sfa_db]
+name = sfa
+enabled = true
+host = localhost
+user = sfa
+password = sfa
+port = 5432
+
+[sfa_flashpolicy]
+config_file = /etc/sfa/sfa_flashpolicy_config.xml
+enabled = false
+port = 843
+
+[sfa_nova]
+api_url = 127.0.0.1
+api_port = 8773
+user = novaadmin
+novarc = /root/novarc
+
+[sfa_plc]
+url = https://localhost:443/PLCAPI/
+password = root
+user = root@localhost.localdomain
+
+[sfa_registry]
+host = localhost
+enabled = true
+port = 52345
+root_auth = senslab2
+
+[sfa_sm]
+caching = false
+host = localhost
+enabled = true
+port = 52347
+
+[sfa]
+interface_hrn = senslab2
+data_dir = /var/lib/sfa/
+generic_flavour = slab
+credential_schema = /etc/sfa/credential.xsd
+api_loglevel = 2
+session_key_path = /var/lib/sfa/session_keys
+max_slice_renew = 60
+
--- /dev/null
+#!/bin/bash
+#
+# sfa starts sfa service
+#
+# chkconfig: 2345 61 39
+#
+# description: starts sfa service
+#
+
+# Source config
+[ -f /etc/sfa/sfa_config ] && . /etc/sfa/sfa_config
+
+# source function library
+. /etc/init.d/functions
+
+start() {
+
+ if [ "$SFA_REGISTRY_ENABLED" -eq 1 ]; then
+ action $"SFA Registry" daemon /usr/bin/sfa-server.py -r -d $OPTIONS
+ fi
+
+ if [ "$SFA_AGGREGATE_ENABLED" -eq 1 ]; then
+ action $"SFA Aggregate" daemon /usr/bin/sfa-server.py -a -d $OPTIONS
+ fi
+
+ if [ "$SFA_SM_ENABLED" -eq 1 ]; then
+ action "SFA SliceMgr" daemon /usr/bin/sfa-server.py -s -d $OPTIONS
+ fi
+
+ if [ "$SFA_FLASHPOLICY_ENABLED" -eq 1 ]; then
+ action "Flash Policy Server" daemon /usr/bin/sfa_flashpolicy.py --file="$SFA_FLASHPOLICY_CONFIG_FILE" --port=$SFA_FLASHPOLICY_PORT -d
+ fi
+
+ RETVAL=$?
+ [ $RETVAL -eq 0 ] && touch /var/lock/subsys/sfa-server.py
+
+}
+
+stop() {
+ action $"Shutting down SFA" killproc sfa-server.py
+ RETVAL=$?
+
+ [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/sfa-server.py
+}
+
+
+case "$1" in
+ start) start ;;
+ stop) stop ;;
+ reload) reload force ;;
+ restart) stop; start ;;
+ condrestart)
+ if [ -f /var/lock/subsys/sfa-server.py ]; then
+ stop
+ start
+ fi
+ ;;
+ status)
+ status sfa-server.py
+ RETVAL=$?
+ ;;
+ *)
+ echo $"Usage: $0 {start|stop|reload|restart|condrestart|status}"
+ exit 1
+ ;;
+esac
+
+exit $RETVAL
+
--- /dev/null
+import time
+from sfa.util.xrn import hrn_to_urn, urn_to_hrn
+
+from sfa.rspecs.rspec import RSpec
+from sfa.rspecs.elements.versions.slabv1Node import SlabPosition
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.login import Login
+from sfa.rspecs.elements.services import Services
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.lease import Lease
+from sfa.rspecs.elements.granularity import Granularity
+from sfa.rspecs.version_manager import VersionManager
+
+
+from sfa.rspecs.elements.versions.slabv1Node import SlabNode
+from sfa.util.sfalogging import logger
+
+from sfa.util.xrn import Xrn
+
+def slab_xrn_to_hostname(xrn):
+ return Xrn.unescape(Xrn(xrn=xrn, type='node').get_leaf())
+
+def slab_xrn_object(root_auth, hostname):
+ """Attributes are urn and hrn.
+ Get the hostname using slab_xrn_to_hostname on the urn.
+
+ """
+ return Xrn('.'.join( [root_auth, Xrn.escape(hostname)]), type='node')
+
+class SlabAggregate:
+
+ sites = {}
+ nodes = {}
+ api = None
+ interfaces = {}
+ links = {}
+ node_tags = {}
+
+ prepared = False
+
+ user_options = {}
+
+ def __init__(self, driver):
+ self.driver = driver
+
+ def get_slice_and_slivers(self, slice_xrn):
+ """
+ Returns a dict of slivers keyed on the sliver's node_id
+ """
+ slivers = {}
+ sfa_slice = None
+ if not slice_xrn:
+ return (sfa_slice, slivers)
+ slice_urn = hrn_to_urn(slice_xrn, 'slice')
+ slice_hrn, _ = urn_to_hrn(slice_xrn)
+ slice_name = slice_hrn
+
+ slices = self.driver.GetSlices(slice_filter= str(slice_name), \
+ slice_filter_type = 'slice_hrn')
+ logger.debug("Slabaggregate api \tget_slice_and_slivers \
+ slices %s self.driver.hrn %s" \
+ %(slices, self.driver.hrn))
+ if not slices:
+ return (sfa_slice, slivers)
+ #if isinstance(sfa_slice, list):
+ #sfa_slice = slices[0]
+ #else:
+ #sfa_slice = slices
+
+ # sort slivers by node id , if there is a job
+ #and therfore, node allocated to this slice
+ for sfa_slice in slices:
+ try:
+ node_ids_list = sfa_slice['node_ids']
+ except KeyError:
+ logger.log_exc("SLABAGGREGATE \t \
+ get_slice_and_slivers KeyError ")
+ continue
+
+ for node in node_ids_list:
+ sliver_xrn = Xrn(slice_urn, type='sliver', id=node)
+ sliver_xrn.set_authority(self.driver.hrn)
+ #node_id = self.driver.root_auth + '.' + node_id
+ sliver = Sliver({'sliver_id':sliver_xrn.urn,
+ 'name': sfa_slice['slice_hrn'],
+ 'type': 'slab-node',
+ 'tags': []})
+
+ slivers[node] = sliver
+
+
+ #Add default sliver attribute :
+ #connection information for senslab
+ tmp = sfa_slice['slice_hrn'].split('.')
+ ldap_username = tmp[1].split('_')[0]
+ vmaddr = 'ssh ' + ldap_username + '@grenoble.senslab.info'
+ slivers['default_sliver'] = {'vm': vmaddr , 'login': ldap_username}
+
+ logger.debug("SLABAGGREGATE api get_slice_and_slivers slivers %s "\
+ %(slivers))
+ return (slices, slivers)
+
+
+
+ def get_nodes(self, slices=None, slivers=[], options={}):
+ # NT: the semantic of this function is not clear to me :
+ # if slice is not defined, then all the nodes should be returned
+ # if slice is defined, we should return only the nodes that
+ # are part of this slice
+ # but what is the role of the slivers parameter ?
+ # So i assume that slice['node_ids'] will be the same as slivers for us
+ #filter_dict = {}
+ #if slice_xrn:
+ #if not slices or not slices['node_ids']:
+ #return ([],[])
+ #tags_filter = {}
+
+ # get the granularity in second for the reservation system
+ grain = self.driver.GetLeaseGranularity()
+
+ # Commenting this part since all nodes should be returned,
+ # even if a slice is provided
+ #if slice :
+ # if 'node_ids' in slice and slice['node_ids']:
+ # #first case, a non empty slice was provided
+ # filter['hostname'] = slice['node_ids']
+ # tags_filter=filter.copy()
+ # nodes = self.driver.GetNodes(filter['hostname'])
+ # else :
+ # #second case, a slice was provided, but is empty
+ # nodes={}
+ #else :
+ # #third case, no slice was provided
+ # nodes = self.driver.GetNodes()
+ nodes = self.driver.GetNodes()
+ #geni_available = options.get('geni_available')
+ #if geni_available:
+ #filter['boot_state'] = 'boot'
+
+ #filter.update({'peer_id': None})
+ #nodes = self.driver.GetNodes(filter['hostname'])
+
+ #site_ids = []
+ #interface_ids = []
+ #tag_ids = []
+ nodes_dict = {}
+ for node in nodes:
+
+ nodes_dict[node['node_id']] = node
+ #logger.debug("SLABAGGREGATE api get_nodes nodes %s "\
+ #%(nodes ))
+ # get sites
+ #sites_dict = self.get_sites({'site_id': site_ids})
+ # get interfaces
+ #interfaces = self.get_interfaces({'interface_id':interface_ids})
+ # get tags
+ #node_tags = self.get_node_tags(tags_filter)
+
+ #if slices, this means we got to list all the nodes given to this slice
+ # Make a list of all the nodes in the slice before getting their attributes
+ rspec_nodes = []
+ slice_nodes_list = []
+ logger.debug("SLABAGGREGATE api get_nodes slice_nodes_list %s "\
+ %(slices ))
+ if slices:
+ for one_slice in slices:
+ try:
+ slice_nodes_list = one_slice['node_ids']
+ except KeyError:
+ pass
+ #for node in one_slice['node_ids']:
+ #slice_nodes_list.append(node)
+
+ reserved_nodes = self.driver.GetNodesCurrentlyInUse()
+ logger.debug("SLABAGGREGATE api get_nodes slice_nodes_list %s "\
+ %(slice_nodes_list))
+ for node in nodes:
+ # skip whitelisted nodes
+ #if node['slice_ids_whitelist']:
+ #if not slice or slice['slice_id'] not in node['slice_ids_whitelist']:
+ #continue
+ #rspec_node = Node()
+ #logger.debug("SLABAGGREGATE api get_nodes node %s "\
+ #%(node))
+ if slice_nodes_list == [] or node['hostname'] in slice_nodes_list:
+
+ rspec_node = SlabNode()
+ # xxx how to retrieve site['login_base']
+ #site_id=node['site_id']
+ #site=sites_dict[site_id]
+ rspec_node['mobile'] = node['mobile']
+ rspec_node['archi'] = node['archi']
+ rspec_node['radio'] = node['radio']
+
+ slab_xrn = slab_xrn_object(self.driver.root_auth, node['hostname'])
+ rspec_node['component_id'] = slab_xrn.urn
+ rspec_node['component_name'] = node['hostname']
+ rspec_node['component_manager_id'] = \
+ hrn_to_urn(self.driver.root_auth, 'authority+sa')
+
+ # Senslab's nodes are federated : there is only one authority
+ # for all Senslab sites, registered in SFA.
+ # Removing the part including the site
+ # in authority_id SA 27/07/12
+ rspec_node['authority_id'] = rspec_node['component_manager_id']
+
+ # do not include boot state (<available> element)
+ #in the manifest rspec
+
+
+ rspec_node['boot_state'] = node['boot_state']
+ if node['hostname'] in reserved_nodes:
+ rspec_node['boot_state'] = "Reserved"
+ rspec_node['exclusive'] = 'true'
+ rspec_node['hardware_types'] = [HardwareType({'name': 'slab-node'})]
+
+ # only doing this because protogeni rspec needs
+ # to advertise available initscripts
+ # add site/interface info to nodes.
+ # assumes that sites, interfaces and tags have already been prepared.
+ #site = sites_dict[node['site_id']]
+ location = Location({'country':'France'})
+ rspec_node['location'] = location
+
+
+ position = SlabPosition()
+ for field in position :
+ try:
+ position[field] = node[field]
+ except KeyError, error :
+ logger.log_exc("SLABAGGREGATE\t get_nodes \
+ position %s "%(error))
+
+ rspec_node['position'] = position
+ #rspec_node['interfaces'] = []
+
+ # Granularity
+ granularity = Granularity({'grain': grain})
+ rspec_node['granularity'] = granularity
+ rspec_node['tags'] = []
+ if node['hostname'] in slivers:
+ # add sliver info
+ sliver = slivers[node['hostname']]
+ rspec_node['sliver_id'] = sliver['sliver_id']
+ rspec_node['client_id'] = node['hostname']
+ rspec_node['slivers'] = [sliver]
+
+ # slivers always provide the ssh service
+ login = Login({'authentication': 'ssh-keys', 'hostname': node['hostname'], 'port':'22', 'username': sliver['name']})
+ service = Services({'login': login})
+ rspec_node['services'] = [service]
+ rspec_nodes.append(rspec_node)
+
+ return (rspec_nodes)
+
+ def get_leases(self, slice_record = None, options = {}):
+
+ now = int(time.time())
+ lease_filter = {'clip': now }
+
+ #if slice_record:
+ #lease_filter.update({'name': slice_record['name']})
+ return_fields = ['lease_id', 'hostname', 'site_id', \
+ 'name', 'start_time', 'duration']
+ #leases = self.driver.GetLeases(lease_filter)
+ leases = self.driver.GetLeases()
+ grain = self.driver.GetLeaseGranularity()
+ site_ids = []
+ rspec_leases = []
+ for lease in leases:
+ #as many leases as there are nodes in the job
+ for node in lease['reserved_nodes']:
+ rspec_lease = Lease()
+ rspec_lease['lease_id'] = lease['lease_id']
+ #site = node['site_id']
+ slab_xrn = slab_xrn_object(self.driver.root_auth, node)
+ rspec_lease['component_id'] = slab_xrn.urn
+ #rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn, \
+ #site, node['hostname'])
+ rspec_lease['slice_id'] = lease['slice_id']
+ rspec_lease['start_time'] = lease['t_from']
+ rspec_lease['duration'] = (lease['t_until'] - lease['t_from']) \
+ / grain
+ rspec_leases.append(rspec_lease)
+ return rspec_leases
+
+
+ #rspec_leases = []
+ #for lease in leases:
+
+ #rspec_lease = Lease()
+
+ ## xxx how to retrieve site['login_base']
+
+ #rspec_lease['lease_id'] = lease['lease_id']
+ #rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn, \
+ #site['login_base'], lease['hostname'])
+ #slice_hrn = slicename_to_hrn(self.driver.hrn, lease['name'])
+ #slice_urn = hrn_to_urn(slice_hrn, 'slice')
+ #rspec_lease['slice_id'] = slice_urn
+ #rspec_lease['t_from'] = lease['t_from']
+ #rspec_lease['t_until'] = lease['t_until']
+ #rspec_leases.append(rspec_lease)
+ #return rspec_leases
+#from plc/aggregate.py
+ def get_rspec(self, slice_xrn=None, version = None, options={}):
+
+ rspec = None
+ version_manager = VersionManager()
+ version = version_manager.get_version(version)
+ logger.debug("SlabAggregate \t get_rspec ***version %s \
+ version.type %s version.version %s options %s \r\n" \
+ %(version,version.type,version.version,options))
+
+ if not slice_xrn:
+ rspec_version = version_manager._get_version(version.type, \
+ version.version, 'ad')
+
+ else:
+ rspec_version = version_manager._get_version(version.type, \
+ version.version, 'manifest')
+
+ slices, slivers = self.get_slice_and_slivers(slice_xrn)
+ #at this point sliver may be empty if no senslab job
+ #is running for this user/slice.
+ rspec = RSpec(version=rspec_version, user_options=options)
+
+
+ #if slice and 'expires' in slice:
+ #rspec.xml.set('expires', datetime_to_epoch(slice['expires']))
+ # add sliver defaults
+ #nodes, links = self.get_nodes(slice, slivers)
+ logger.debug("\r\n \r\n SlabAggregate \tget_rspec ******* slice_xrn %s \r\n \r\n"\
+ %(slice_xrn))
+
+ try:
+ lease_option = options['list_leases']
+ except KeyError:
+ #If no options are specified, at least print the resources
+ if slice_xrn :
+ lease_option = 'all'
+ pass
+
+ if lease_option in ['all', 'resources']:
+ #if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'leases':
+ nodes = self.get_nodes(slices, slivers)
+ #In case creating a job, slice_xrn is not set to None
+ rspec.version.add_nodes(nodes)
+ if slice_xrn :
+ #Get user associated with this slice
+ #user = dbsession.query(RegRecord).filter_by(record_id = \
+ #slices['record_id_user']).first()
+
+ #ldap_username = (user.hrn).split('.')[1]
+
+
+ #for one_slice in slices :
+ ldap_username = slices[0]['slice_hrn']
+ tmp = ldap_username.split('.')
+ ldap_username = tmp[1].split('_')[0]
+
+ if version.type == "Slab":
+ rspec.version.add_connection_information(ldap_username)
+
+ default_sliver = slivers.get('default_sliver', [])
+ if default_sliver:
+ #default_sliver_attribs = default_sliver.get('tags', [])
+ logger.debug("SlabAggregate \tget_rspec **** \
+ default_sliver%s \r\n" %(default_sliver))
+ for attrib in default_sliver:
+ rspec.version.add_default_sliver_attribute(attrib, \
+ default_sliver[attrib])
+ if lease_option in ['all','leases']:
+ #if options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'resources':
+ leases = self.get_leases(slices)
+ rspec.version.add_leases(leases)
+
+ #logger.debug("SlabAggregate \tget_rspec ******* rspec_toxml %s \r\n"\
+ #%(rspec.toxml()))
+ return rspec.toxml()
--- /dev/null
+import subprocess
+
+from datetime import datetime
+
+from sfa.util.faults import SliverDoesNotExist, UnknownSfaType
+from sfa.util.sfalogging import logger
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegUser
+
+from sfa.trust.credential import Credential
+
+
+from sfa.managers.driver import Driver
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec
+
+from sfa.util.xrn import hrn_to_urn
+
+
+## thierry: everything that is API-related (i.e. handling incoming requests)
+# is taken care of
+# SlabDriver should be really only about talking to the senslab testbed
+
+
+from sfa.senslab.OARrestapi import OARrestapi
+from sfa.senslab.LDAPapi import LDAPapi
+
+from sfa.senslab.slabpostgres import SlabDB, slab_dbsession, SliceSenslab
+
+from sfa.senslab.slabaggregate import SlabAggregate, slab_xrn_to_hostname, \
+ slab_xrn_object
+from sfa.senslab.slabslices import SlabSlices
+
+
+
+# thierry : note
+# this inheritance scheme is so that the driver object can receive
+# GetNodes or GetSites sorts of calls directly
+# and thus minimize the differences in the managers with the pl version
+class SlabDriver(Driver):
+ """ Senslab Driver class inherited from Driver generic class.
+
+ Contains methods compliant with the SFA standard and the testbed
+ infrastructure (calls to LDAP and OAR).
+ """
+ def __init__(self, config):
+ Driver.__init__ (self, config)
+ self.config = config
+ self.hrn = config.SFA_INTERFACE_HRN
+ self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
+ self.oar = OARrestapi()
+ self.ldap = LDAPapi()
+ self.time_format = "%Y-%m-%d %H:%M:%S"
+ self.db = SlabDB(config, debug = True)
+ self.cache = None
+
+
+ def sliver_status(self, slice_urn, slice_hrn):
+ """Receive a status request for slice named urn/hrn
+ urn:publicid:IDN+senslab+nturro_slice hrn senslab.nturro_slice
+ shall return a structure as described in
+ http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
+ NT : not sure if we should implement this or not, but used by sface.
+
+ """
+
+ #First get the slice with the slice hrn
+ slice_list = self.GetSlices(slice_filter = slice_hrn, \
+ slice_filter_type = 'slice_hrn')
+
+ if len(slice_list) is 0:
+ raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
+
+ #Slice has the same slice hrn for each slice in the slice/lease list
+ #So fetch the info on the user once
+ one_slice = slice_list[0]
+ recuser = dbsession.query(RegRecord).filter_by(record_id = \
+ one_slice['record_id_user']).first()
+
+ #Make a list of all the nodes hostnames in use for this slice
+ slice_nodes_list = []
+ for sl in slice_list:
+ for node in sl['node_ids']:
+ slice_nodes_list.append(node['hostname'])
+
+ #Get all the corresponding nodes details
+ nodes_all = self.GetNodes({'hostname':slice_nodes_list},
+ ['node_id', 'hostname','site','boot_state'])
+ nodeall_byhostname = dict([(n['hostname'], n) for n in nodes_all])
+
+
+
+ for sl in slice_list:
+
+ #For compatibility
+ top_level_status = 'empty'
+ result = {}
+ result.fromkeys(\
+ ['geni_urn','pl_login','geni_status','geni_resources'], None)
+ result['pl_login'] = recuser.hrn
+ logger.debug("Slabdriver - sliver_status Sliver status \
+ urn %s hrn %s sl %s \r\n " \
+ %(slice_urn, slice_hrn, sl))
+ try:
+ nodes_in_slice = sl['node_ids']
+ except KeyError:
+ #No job in the slice
+ result['geni_status'] = top_level_status
+ result['geni_resources'] = []
+ return result
+
+ top_level_status = 'ready'
+
+ #A job is running on Senslab for this slice
+ # report about the local nodes that are in the slice only
+
+ result['geni_urn'] = slice_urn
+
+
+
+ #timestamp = float(sl['startTime']) + float(sl['walltime'])
+ #result['pl_expires'] = strftime(self.time_format, \
+ #gmtime(float(timestamp)))
+ #result['slab_expires'] = strftime(self.time_format,\
+ #gmtime(float(timestamp)))
+
+ resources = []
+ for node in sl['node_ids']:
+ res = {}
+ #res['slab_hostname'] = node['hostname']
+ #res['slab_boot_state'] = node['boot_state']
+
+ res['pl_hostname'] = node['hostname']
+ res['pl_boot_state'] = \
+ nodeall_byhostname[node['hostname']]['boot_state']
+ #res['pl_last_contact'] = strftime(self.time_format, \
+ #gmtime(float(timestamp)))
+ sliver_id = Xrn(slice_urn, type='slice', \
+ id=nodeall_byhostname[node['hostname']]['node_id'], \
+ authority=self.hrn).urn
+
+ res['geni_urn'] = sliver_id
+ if nodeall_byhostname[node['hostname']]['boot_state'] == 'Alive':
+
+ res['geni_status'] = 'ready'
+ else:
+ res['geni_status'] = 'failed'
+ top_level_status = 'failed'
+
+ res['geni_error'] = ''
+
+ resources.append(res)
+
+ result['geni_status'] = top_level_status
+ result['geni_resources'] = resources
+ logger.debug("SLABDRIVER \tsliver_statusresources %s res %s "\
+ %(resources,res))
+ return result
+
+
+ def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, \
+ users, options):
+ aggregate = SlabAggregate(self)
+
+ slices = SlabSlices(self)
+ peer = slices.get_peer(slice_hrn)
+ sfa_peer = slices.get_sfa_peer(slice_hrn)
+ slice_record = None
+
+ if not isinstance(creds, list):
+ creds = [creds]
+
+ if users:
+ slice_record = users[0].get('slice_record', {})
+
+ # parse rspec
+ rspec = RSpec(rspec_string)
+ logger.debug("SLABDRIVER.PY \t create_sliver \tr spec.version \
+ %s slice_record %s " \
+ %(rspec.version,slice_record))
+
+ # ensure site record exists?
+ # ensure slice record exists
+ #Removed options to verify_slice SA 14/08/12
+ sfa_slice = slices.verify_slice(slice_hrn, slice_record, peer, \
+ sfa_peer)
+
+ #requested_attributes returned by rspec.version.get_slice_attributes()
+ #unused, removed SA 13/08/12
+ rspec.version.get_slice_attributes()
+
+ logger.debug("SLABDRIVER.PY create_sliver slice %s " %(sfa_slice))
+
+ # ensure person records exists
+ #verify_persons returns added persons but since the return value
+ #is not used
+ slices.verify_persons(slice_hrn, sfa_slice, users, peer, \
+ sfa_peer, options=options)
+
+
+
+ # add/remove slice from nodes
+
+ requested_slivers = [node.get('component_name') \
+ for node in rspec.version.get_nodes_with_slivers()]
+ l = [ node for node in rspec.version.get_nodes_with_slivers() ]
+ logger.debug("SLADRIVER \tcreate_sliver requested_slivers \
+ requested_slivers %s listnodes %s" \
+ %(requested_slivers,l))
+ #verify_slice_nodes returns nodes, but unused here. Removed SA 13/08/12.
+ #slices.verify_slice_nodes(sfa_slice, requested_slivers, peer)
+
+ # add/remove leases
+ requested_lease_list = []
+
+ logger.debug("SLABDRIVER.PY \tcreate_sliver AVANTLEASE " )
+ rspec_requested_leases = rspec.version.get_leases()
+ for lease in rspec.version.get_leases():
+ single_requested_lease = {}
+ logger.debug("SLABDRIVER.PY \tcreate_sliver lease %s " %(lease))
+ if not lease.get('lease_id'):
+ single_requested_lease['hostname'] = \
+ slab_xrn_to_hostname(\
+ lease.get('component_id').strip())
+ single_requested_lease['start_time'] = lease.get('start_time')
+ single_requested_lease['duration'] = lease.get('duration')
+
+ if single_requested_lease.get('hostname'):
+ requested_lease_list.append(single_requested_lease)
+
+ logger.debug("SLABDRIVER.PY \tcreate_sliver APRESLEASE" )
+ #dCreate dict of leases by start_time, regrouping nodes reserved
+ #at the same
+ #time, for the same amount of time = one job on OAR
+ requested_job_dict = {}
+ for lease in requested_lease_list:
+
+ #In case it is an asap experiment start_time is empty
+ if lease['start_time'] == '':
+ lease['start_time'] = '0'
+
+ if lease['start_time'] not in requested_job_dict:
+ if isinstance(lease['hostname'], str):
+ lease['hostname'] = [lease['hostname']]
+
+ requested_job_dict[lease['start_time']] = lease
+
+ else :
+ job_lease = requested_job_dict[lease['start_time']]
+ if lease['duration'] == job_lease['duration'] :
+ job_lease['hostname'].append(lease['hostname'])
+
+
+
+
+ logger.debug("SLABDRIVER.PY \tcreate_sliver requested_job_dict %s "\
+ %(requested_job_dict))
+ #verify_slice_leases returns the leases , but the return value is unused
+ #here. Removed SA 13/08/12
+ slices.verify_slice_leases(sfa_slice, \
+ requested_job_dict, peer)
+
+ return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
+
+
+ def delete_sliver (self, slice_urn, slice_hrn, creds, options):
+
+ sfa_slice_list = self.GetSlices(slice_filter = slice_hrn, \
+ slice_filter_type = 'slice_hrn')
+
+ if not sfa_slice_list:
+ return 1
+
+ #Delete all in the slice
+ for sfa_slice in sfa_slice_list:
+
+
+ logger.debug("SLABDRIVER.PY delete_sliver slice %s" %(sfa_slice))
+ slices = SlabSlices(self)
+ # determine if this is a peer slice
+
+ peer = slices.get_peer(slice_hrn)
+ #TODO delete_sliver SA : UnBindObjectFromPeer should be
+ #used when there is another
+ #senslab testbed, which is not the case 14/08/12 .
+
+ logger.debug("SLABDRIVER.PY delete_sliver peer %s" %(peer))
+ try:
+ if peer:
+ self.UnBindObjectFromPeer('slice', \
+ sfa_slice['record_id_slice'], \
+ peer, None)
+ self.DeleteSliceFromNodes(sfa_slice)
+ finally:
+ if peer:
+ self.BindObjectToPeer('slice', \
+ sfa_slice['record_id_slice'], \
+ peer, sfa_slice['peer_slice_id'])
+ return 1
+
+
+ def AddSlice(self, slice_record):
+ slab_slice = SliceSenslab( slice_hrn = slice_record['slice_hrn'], \
+ record_id_slice= slice_record['record_id_slice'] , \
+ record_id_user= slice_record['record_id_user'], \
+ peer_authority = slice_record['peer_authority'])
+ logger.debug("SLABDRIVER.PY \tAddSlice slice_record %s slab_slice %s" \
+ %(slice_record,slab_slice))
+ slab_dbsession.add(slab_slice)
+ slab_dbsession.commit()
+ return
+
+ # first 2 args are None in case of resource discovery
+ def list_resources (self, slice_urn, slice_hrn, creds, options):
+ #cached_requested = options.get('cached', True)
+
+ version_manager = VersionManager()
+ # get the rspec's return format from options
+ rspec_version = \
+ version_manager.get_version(options.get('geni_rspec_version'))
+ version_string = "rspec_%s" % (rspec_version)
+
+ #panos adding the info option to the caching key (can be improved)
+ if options.get('info'):
+ version_string = version_string + "_" + \
+ options.get('info', 'default')
+
+ # look in cache first
+ #if cached_requested and self.cache and not slice_hrn:
+ #rspec = self.cache.get(version_string)
+ #if rspec:
+ #logger.debug("SlabDriver.ListResources: \
+ #returning cached advertisement")
+ #return rspec
+
+ #panos: passing user-defined options
+ aggregate = SlabAggregate(self)
+ origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
+ options.update({'origin_hrn':origin_hrn})
+ rspec = aggregate.get_rspec(slice_xrn=slice_urn, \
+ version=rspec_version, options=options)
+
+ # cache the result
+ #if self.cache and not slice_hrn:
+ #logger.debug("Slab.ListResources: stores advertisement in cache")
+ #self.cache.add(version_string, rspec)
+
+ return rspec
+
+
+ def list_slices (self, creds, options):
+ # look in cache first
+ #if self.cache:
+ #slices = self.cache.get('slices')
+ #if slices:
+ #logger.debug("PlDriver.list_slices returns from cache")
+ #return slices
+
+ # get data from db
+
+ slices = self.GetSlices()
+ logger.debug("SLABDRIVER.PY \tlist_slices hrn %s \r\n \r\n" %(slices))
+ slice_hrns = [slab_slice['slice_hrn'] for slab_slice in slices]
+ #slice_hrns = [slicename_to_hrn(self.hrn, slab_slice['slice_hrn']) \
+ #for slab_slice in slices]
+ slice_urns = [hrn_to_urn(slice_hrn, 'slice') \
+ for slice_hrn in slice_hrns]
+
+ # cache the result
+ #if self.cache:
+ #logger.debug ("SlabDriver.list_slices stores value in cache")
+ #self.cache.add('slices', slice_urns)
+
+ return slice_urns
+
+
+ def register (self, sfa_record, hrn, pub_key):
+ """
+ Adding new user, slice, node or site should not be handled
+ by SFA.
+
+ Adding nodes = OAR
+ Adding users = LDAP Senslab
+ Adding slice = Import from LDAP users
+ Adding site = OAR
+ """
+ return -1
+
+ #No site or node record update allowed
+ def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
+ pointer = old_sfa_record['pointer']
+ old_sfa_record_type = old_sfa_record['type']
+
+ # new_key implemented for users only
+ if new_key and old_sfa_record_type not in [ 'user' ]:
+ raise UnknownSfaType(old_sfa_record_type)
+
+ #if (type == "authority"):
+ #self.shell.UpdateSite(pointer, new_sfa_record)
+
+ if old_sfa_record_type == "slice":
+ slab_record = self.sfa_fields_to_slab_fields(old_sfa_record_type, \
+ hrn, new_sfa_record)
+ if 'name' in slab_record:
+ slab_record.pop('name')
+ #Prototype should be UpdateSlice(self,
+ #auth, slice_id_or_name, slice_fields)
+ #Senslab cannot update slice since slice = job
+ #so we must delete and create another job
+ self.UpdateSlice(pointer, slab_record)
+
+ elif old_sfa_record_type == "user":
+ update_fields = {}
+ all_fields = new_sfa_record
+ for key in all_fields.keys():
+ if key in ['first_name', 'last_name', 'title', 'email',
+ 'password', 'phone', 'url', 'bio', 'accepted_aup',
+ 'enabled']:
+ update_fields[key] = all_fields[key]
+ self.UpdatePerson(pointer, update_fields)
+
+ if new_key:
+ # must check this key against the previous one if it exists
+ persons = self.GetPersons([pointer], ['key_ids'])
+ person = persons[0]
+ keys = person['key_ids']
+ keys = self.GetKeys(person['key_ids'])
+
+ # Delete all stale keys
+ key_exists = False
+ for key in keys:
+ if new_key != key['key']:
+ self.DeleteKey(key['key_id'])
+ else:
+ key_exists = True
+ if not key_exists:
+ self.AddPersonKey(pointer, {'key_type': 'ssh', \
+ 'key': new_key})
+
+
+ return True
+
+
+ def remove (self, sfa_record):
+ sfa_record_type = sfa_record['type']
+ hrn = sfa_record['hrn']
+ if sfa_record_type == 'user':
+
+ #get user from senslab ldap
+ person = self.GetPersons(sfa_record)
+ #No registering at a given site in Senslab.
+ #Once registered to the LDAP, all senslab sites are
+ #accesible.
+ if person :
+ #Mark account as disabled in ldap
+ self.DeletePerson(sfa_record)
+ elif sfa_record_type == 'slice':
+ if self.GetSlices(slice_filter = hrn, \
+ slice_filter_type = 'slice_hrn'):
+ self.DeleteSlice(sfa_record)
+
+ #elif type == 'authority':
+ #if self.GetSites(pointer):
+ #self.DeleteSite(pointer)
+
+ return True
+
+
+
+ #TODO clean GetPeers. 05/07/12SA
+ def GetPeers (self, auth = None, peer_filter=None, return_fields_list=None):
+
+ existing_records = {}
+ existing_hrns_by_types = {}
+ logger.debug("SLABDRIVER \tGetPeers auth = %s, peer_filter %s, \
+ return_field %s " %(auth , peer_filter, return_fields_list))
+ all_records = dbsession.query(RegRecord).filter(RegRecord.type.like('%authority%')).all()
+ for record in all_records:
+ existing_records[(record.hrn, record.type)] = record
+ if record.type not in existing_hrns_by_types:
+ existing_hrns_by_types[record.type] = [record.hrn]
+ logger.debug("SLABDRIVER \tGetPeer\t NOT IN \
+ existing_hrns_by_types %s " %( existing_hrns_by_types))
+ else:
+
+ logger.debug("SLABDRIVER \tGetPeer\t \INNN type %s hrn %s " \
+ %(record.type,record.hrn))
+ existing_hrns_by_types[record.type].append(record.hrn)
+
+
+ logger.debug("SLABDRIVER \tGetPeer\texisting_hrns_by_types %s "\
+ %( existing_hrns_by_types))
+ records_list = []
+
+ try:
+ if peer_filter:
+ records_list.append(existing_records[(peer_filter,'authority')])
+ else :
+ for hrn in existing_hrns_by_types['authority']:
+ records_list.append(existing_records[(hrn,'authority')])
+
+ logger.debug("SLABDRIVER \tGetPeer \trecords_list %s " \
+ %(records_list))
+
+ except KeyError:
+ pass
+
+ return_records = records_list
+ if not peer_filter and not return_fields_list:
+ return records_list
+
+
+ logger.debug("SLABDRIVER \tGetPeer return_records %s " \
+ %(return_records))
+ return return_records
+
+
+ #TODO : Handling OR request in make_ldap_filters_from_records
+ #instead of the for loop
+ #over the records' list
+ def GetPersons(self, person_filter=None):
+ """
+ person_filter should be a list of dictionnaries when not set to None.
+ Returns a list of users whose accounts are enabled found in ldap.
+
+ """
+ logger.debug("SLABDRIVER \tGetPersons person_filter %s" \
+ %(person_filter))
+ person_list = []
+ if person_filter and isinstance(person_filter, list):
+ #If we are looking for a list of users (list of dict records)
+ #Usually the list contains only one user record
+ for searched_attributes in person_filter:
+
+ #Get only enabled user accounts in senslab LDAP :
+ #add a filter for make_ldap_filters_from_record
+ person = self.ldap.LdapFindUser(searched_attributes, \
+ is_user_enabled=True)
+ person_list.append(person)
+
+ else:
+ #Get only enabled user accounts in senslab LDAP :
+ #add a filter for make_ldap_filters_from_record
+ person_list = self.ldap.LdapFindUser(is_user_enabled=True)
+
+ return person_list
+
+ def GetTimezone(self):
+ server_timestamp, server_tz = self.oar.parser.\
+ SendRequest("GET_timezone")
+ return server_timestamp, server_tz
+
+
+ def DeleteJobs(self, job_id, slice_hrn):
+ if not job_id or job_id is -1:
+ return
+ username = slice_hrn.split(".")[-1].rstrip("_slice")
+ reqdict = {}
+ reqdict['method'] = "delete"
+ reqdict['strval'] = str(job_id)
+
+
+ answer = self.oar.POSTRequestToOARRestAPI('DELETE_jobs_id', \
+ reqdict,username)
+ logger.debug("SLABDRIVER \tDeleteJobs jobid %s \r\n answer %s \
+ username %s" %(job_id,answer, username))
+ return answer
+
+
+
+ ##TODO : Unused GetJobsId ? SA 05/07/12
+ #def GetJobsId(self, job_id, username = None ):
+ #"""
+ #Details about a specific job.
+ #Includes details about submission time, jot type, state, events,
+ #owner, assigned ressources, walltime etc...
+
+ #"""
+ #req = "GET_jobs_id"
+ #node_list_k = 'assigned_network_address'
+ ##Get job info from OAR
+ #job_info = self.oar.parser.SendRequest(req, job_id, username)
+
+ #logger.debug("SLABDRIVER \t GetJobsId %s " %(job_info))
+ #try:
+ #if job_info['state'] == 'Terminated':
+ #logger.debug("SLABDRIVER \t GetJobsId job %s TERMINATED"\
+ #%(job_id))
+ #return None
+ #if job_info['state'] == 'Error':
+ #logger.debug("SLABDRIVER \t GetJobsId ERROR message %s "\
+ #%(job_info))
+ #return None
+
+ #except KeyError:
+ #logger.error("SLABDRIVER \tGetJobsId KeyError")
+ #return None
+
+ #parsed_job_info = self.get_info_on_reserved_nodes(job_info, \
+ #node_list_k)
+ ##Replaces the previous entry
+ ##"assigned_network_address" / "reserved_resources"
+ ##with "node_ids"
+ #job_info.update({'node_ids':parsed_job_info[node_list_k]})
+ #del job_info[node_list_k]
+ #logger.debug(" \r\nSLABDRIVER \t GetJobsId job_info %s " %(job_info))
+ #return job_info
+
+
+ def GetJobsResources(self, job_id, username = None):
+ #job_resources=['reserved_resources', 'assigned_resources',\
+ #'job_id', 'job_uri', 'assigned_nodes',\
+ #'api_timestamp']
+ #assigned_res = ['resource_id', 'resource_uri']
+ #assigned_n = ['node', 'node_uri']
+
+ req = "GET_jobs_id_resources"
+
+
+ #Get job resources list from OAR
+ node_id_list = self.oar.parser.SendRequest(req, job_id, username)
+ logger.debug("SLABDRIVER \t GetJobsResources %s " %(node_id_list))
+
+ hostname_list = \
+ self.__get_hostnames_from_oar_node_ids(node_id_list)
+
+
+ #Replaces the previous entry "assigned_network_address" /
+ #"reserved_resources"
+ #with "node_ids"
+ job_info = {'node_ids': hostname_list}
+
+ return job_info
+
+
+ def get_info_on_reserved_nodes(self, job_info, node_list_name):
+ #Get the list of the testbed nodes records and make a
+ #dictionnary keyed on the hostname out of it
+ node_list_dict = self.GetNodes()
+ #node_hostname_list = []
+ node_hostname_list = [node['hostname'] for node in node_list_dict]
+ #for node in node_list_dict:
+ #node_hostname_list.append(node['hostname'])
+ node_dict = dict(zip(node_hostname_list, node_list_dict))
+ try :
+ reserved_node_hostname_list = []
+ for index in range(len(job_info[node_list_name])):
+ #job_info[node_list_name][k] =
+ reserved_node_hostname_list[index] = \
+ node_dict[job_info[node_list_name][index]]['hostname']
+
+ logger.debug("SLABDRIVER \t get_info_on_reserved_nodes \
+ reserved_node_hostname_list %s" \
+ %(reserved_node_hostname_list))
+ except KeyError:
+ logger.error("SLABDRIVER \t get_info_on_reserved_nodes KEYERROR " )
+
+ return reserved_node_hostname_list
+
+ def GetNodesCurrentlyInUse(self):
+ """Returns a list of all the nodes already involved in an oar job"""
+ return self.oar.parser.SendRequest("GET_running_jobs")
+
+ def __get_hostnames_from_oar_node_ids(self, resource_id_list ):
+ full_nodes_dict_list = self.GetNodes()
+ #Put the full node list into a dictionary keyed by oar node id
+ oar_id_node_dict = {}
+ for node in full_nodes_dict_list:
+ oar_id_node_dict[node['oar_id']] = node
+
+ #logger.debug("SLABDRIVER \t __get_hostnames_from_oar_node_ids\
+ #oar_id_node_dict %s" %(oar_id_node_dict))
+
+ hostname_dict_list = []
+ for resource_id in resource_id_list:
+ #Because jobs requested "asap" do not have defined resources
+ if resource_id is not "Undefined":
+ hostname_dict_list.append(\
+ oar_id_node_dict[resource_id]['hostname'])
+
+ #hostname_list.append(oar_id_node_dict[resource_id]['hostname'])
+ return hostname_dict_list
+
+ def GetReservedNodes(self,username = None):
+ #Get the nodes in use and the reserved nodes
+ reservation_dict_list = \
+ self.oar.parser.SendRequest("GET_reserved_nodes", username = username)
+
+
+ for resa in reservation_dict_list:
+ logger.debug ("GetReservedNodes resa %s"%(resa))
+ #dict list of hostnames and their site
+ resa['reserved_nodes'] = \
+ self.__get_hostnames_from_oar_node_ids(resa['resource_ids'])
+
+ #del resa['resource_ids']
+ return reservation_dict_list
+
+ def GetNodes(self, node_filter_dict = None, return_fields_list = None):
+ """
+ node_filter_dict : dictionnary of lists
+
+ """
+ node_dict_by_id = self.oar.parser.SendRequest("GET_resources_full")
+ node_dict_list = node_dict_by_id.values()
+ logger.debug (" SLABDRIVER GetNodes node_filter_dict %s return_fields_list %s "%(node_filter_dict,return_fields_list))
+ #No filtering needed return the list directly
+ if not (node_filter_dict or return_fields_list):
+ return node_dict_list
+
+ return_node_list = []
+ if node_filter_dict:
+ for filter_key in node_filter_dict:
+ try:
+ #Filter the node_dict_list by each value contained in the
+ #list node_filter_dict[filter_key]
+ for value in node_filter_dict[filter_key]:
+ for node in node_dict_list:
+ if node[filter_key] == value:
+ if return_fields_list :
+ tmp = {}
+ for k in return_fields_list:
+ tmp[k] = node[k]
+ return_node_list.append(tmp)
+ else:
+ return_node_list.append(node)
+ except KeyError:
+ logger.log_exc("GetNodes KeyError")
+ return
+
+
+ return return_node_list
+
+
+ def GetSites(self, site_filter_name_list = None, return_fields_list = None):
+ site_dict = self.oar.parser.SendRequest("GET_sites")
+ #site_dict : dict where the key is the sit ename
+ return_site_list = []
+ if not ( site_filter_name_list or return_fields_list):
+ return_site_list = site_dict.values()
+ return return_site_list
+
+ for site_filter_name in site_filter_name_list:
+ if site_filter_name in site_dict:
+ if return_fields_list:
+ for field in return_fields_list:
+ tmp = {}
+ try:
+ tmp[field] = site_dict[site_filter_name][field]
+ except KeyError:
+ logger.error("GetSites KeyError %s "%(field))
+ return None
+ return_site_list.append(tmp)
+ else:
+ return_site_list.append( site_dict[site_filter_name])
+
+
+ return return_site_list
+
+
+
+ def GetSlices(self, slice_filter = None, slice_filter_type = None):
+ #def GetSlices(self, slice_filter = None, slice_filter_type = None, \
+ #return_fields_list = None):
+ """ Get the slice records from the slab db.
+ Returns a slice ditc if slice_filter and slice_filter_type
+ are specified.
+ Returns a list of slice dictionnaries if there are no filters
+ specified.
+
+ """
+ login = None
+ return_slice_list = []
+ slicerec = {}
+ slicerec_dict = {}
+ authorized_filter_types_list = ['slice_hrn', 'record_id_user']
+ slicerec_dictlist = []
+
+
+ if slice_filter_type in authorized_filter_types_list:
+
+
+ def __get_slice_records(slice_filter = None, slice_filter_type = None):
+
+ login = None
+ #Get list of slices based on the slice hrn
+ if slice_filter_type == 'slice_hrn':
+
+ login = slice_filter.split(".")[1].split("_")[0]
+
+ #DO NOT USE RegSlice - reg_researchers to get the hrn of the user
+ #otherwise will mess up the RegRecord in Resolve, don't know
+ #why - SA 08/08/2012
+
+ #Only one entry for one user = one slice in slice_senslab table
+ slicerec = slab_dbsession.query(SliceSenslab).filter_by(slice_hrn = slice_filter).first()
+
+ #Get slice based on user id
+ if slice_filter_type == 'record_id_user':
+ slicerec = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = slice_filter).first()
+
+ if slicerec is None:
+ return login, []
+ else:
+ fixed_slicerec_dict = slicerec.dump_sqlalchemyobj_to_dict()
+
+ if login is None :
+ login = fixed_slicerec_dict['slice_hrn'].split(".")[1].split("_")[0]
+ return login, fixed_slicerec_dict
+
+
+
+
+ login, fixed_slicerec_dict = __get_slice_records(slice_filter, slice_filter_type)
+ logger.debug(" SLABDRIVER \tGetSlices login %s \
+ slice record %s" \
+ %(login, fixed_slicerec_dict))
+
+
+
+ #One slice can have multiple jobs
+
+ leases_list = self.GetReservedNodes(username = login)
+ #If no job is running or no job scheduled
+ if leases_list == [] :
+ return [fixed_slicerec_dict]
+
+ #Several jobs for one slice
+ for lease in leases_list :
+ slicerec_dict = {}
+
+
+ #Check with OAR the status of the job if a job id is in
+ #the slice record
+
+
+
+ slicerec_dict['oar_job_id'] = lease['lease_id']
+ #reserved_list = []
+ #for reserved_node in lease['reserved_nodes']:
+ #reserved_list.append(reserved_node['hostname'])
+ reserved_list = lease['reserved_nodes']
+ #slicerec_dict.update({'node_ids':[lease['reserved_nodes'][n]['hostname'] for n in lease['reserved_nodes']]})
+ slicerec_dict.update({'list_node_ids':{'hostname':reserved_list}})
+ slicerec_dict.update({'node_ids':lease['reserved_nodes']})
+ slicerec_dict.update(fixed_slicerec_dict)
+ slicerec_dict.update({'hrn':\
+ str(fixed_slicerec_dict['slice_hrn'])})
+
+
+ slicerec_dictlist.append(slicerec_dict)
+ logger.debug("SLABDRIVER.PY \tGetSlices slicerec_dict %s slicerec_dictlist %s lease['reserved_nodes'] %s" %(slicerec_dict, slicerec_dictlist,lease['reserved_nodes'] ))
+
+ logger.debug("SLABDRIVER.PY \tGetSlices RETURN slicerec_dictlist %s"\
+ %(slicerec_dictlist))
+
+ return slicerec_dictlist
+
+
+ else:
+
+ slice_list = slab_dbsession.query(SliceSenslab).all()
+ leases_list = self.GetReservedNodes()
+
+
+ slicerec_dictlist = []
+ return_slice_list = []
+ for record in slice_list:
+ return_slice_list.append(record.dump_sqlalchemyobj_to_dict())
+
+ for fixed_slicerec_dict in return_slice_list:
+ slicerec_dict = {}
+ owner = fixed_slicerec_dict['slice_hrn'].split(".")[1].split("_")[0]
+ for lease in leases_list:
+ if owner == lease['user']:
+ slicerec_dict['oar_job_id'] = lease['lease_id']
+ reserved_list = []
+ for reserved_node in lease['reserved_nodes']:
+ reserved_list.append(reserved_node['hostname'])
+ #slicerec_dict.update({'node_ids':{'hostname':reserved_list}})
+ #slicerec_dict.update({'node_ids':[lease['reserved_nodes'][n]['hostname'] for n in lease['reserved_nodes']]})
+ slicerec_dict.update({'node_ids':lease['reserved_nodes']})
+ slicerec_dict.update({'list_node_ids':{'hostname':reserved_list}})
+ slicerec_dict.update(fixed_slicerec_dict)
+ slicerec_dict.update({'hrn':\
+ str(fixed_slicerec_dict['slice_hrn'])})
+ slicerec_dictlist.append(slicerec_dict)
+
+ logger.debug("SLABDRIVER.PY \tGetSlices RETURN slices %s \
+ slice_filter %s " %(return_slice_list, slice_filter))
+
+ #if return_fields_list:
+ #return_slice_list = parse_filter(sliceslist, \
+ #slice_filter,'slice', return_fields_list)
+
+ return slicerec_dictlist
+
+
+ def testbed_name (self): return self.hrn
+
+ # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
+ def aggregate_version (self):
+ version_manager = VersionManager()
+ ad_rspec_versions = []
+ request_rspec_versions = []
+ for rspec_version in version_manager.versions:
+ if rspec_version.content_type in ['*', 'ad']:
+ ad_rspec_versions.append(rspec_version.to_dict())
+ if rspec_version.content_type in ['*', 'request']:
+ request_rspec_versions.append(rspec_version.to_dict())
+ return {
+ 'testbed':self.testbed_name(),
+ 'geni_request_rspec_versions': request_rspec_versions,
+ 'geni_ad_rspec_versions': ad_rspec_versions,
+ }
+
+
+
+
+
+
+ ##
+ # Convert SFA fields to PLC fields for use when registering up updating
+ # registry record in the PLC database
+ #
+ # @param type type of record (user, slice, ...)
+ # @param hrn human readable name
+ # @param sfa_fields dictionary of SFA fields
+ # @param slab_fields dictionary of PLC fields (output)
+
+ def sfa_fields_to_slab_fields(self, sfa_type, hrn, record):
+
+
+ slab_record = {}
+ #for field in record:
+ # slab_record[field] = record[field]
+
+ if sfa_type == "slice":
+ #instantion used in get_slivers ?
+ if not "instantiation" in slab_record:
+ slab_record["instantiation"] = "senslab-instantiated"
+ #slab_record["hrn"] = hrn_to_pl_slicename(hrn)
+ #Unused hrn_to_pl_slicename because Slab's hrn already in the appropriate form SA 23/07/12
+ slab_record["hrn"] = hrn
+ logger.debug("SLABDRIVER.PY sfa_fields_to_slab_fields \
+ slab_record %s " %(slab_record['hrn']))
+ if "url" in record:
+ slab_record["url"] = record["url"]
+ if "description" in record:
+ slab_record["description"] = record["description"]
+ if "expires" in record:
+ slab_record["expires"] = int(record["expires"])
+
+ #nodes added by OAR only and then imported to SFA
+ #elif type == "node":
+ #if not "hostname" in slab_record:
+ #if not "hostname" in record:
+ #raise MissingSfaInfo("hostname")
+ #slab_record["hostname"] = record["hostname"]
+ #if not "model" in slab_record:
+ #slab_record["model"] = "geni"
+
+ #One authority only
+ #elif type == "authority":
+ #slab_record["login_base"] = hrn_to_slab_login_base(hrn)
+
+ #if not "name" in slab_record:
+ #slab_record["name"] = hrn
+
+ #if not "abbreviated_name" in slab_record:
+ #slab_record["abbreviated_name"] = hrn
+
+ #if not "enabled" in slab_record:
+ #slab_record["enabled"] = True
+
+ #if not "is_public" in slab_record:
+ #slab_record["is_public"] = True
+
+ return slab_record
+
+
+
+
+ def __transforms_timestamp_into_date(self, xp_utc_timestamp = None):
+ """ Transforms unix timestamp into valid OAR date format """
+
+ #Used in case of a scheduled experiment (not immediate)
+ #To run an XP immediately, don't specify date and time in RSpec
+ #They will be set to None.
+ if xp_utc_timestamp:
+ #transform the xp_utc_timestamp into server readable time
+ xp_server_readable_date = datetime.fromtimestamp(int(\
+ xp_utc_timestamp)).strftime(self.time_format)
+
+ return xp_server_readable_date
+
+ else:
+ return None
+
+
+
+
+ def LaunchExperimentOnOAR(self, added_nodes, slice_name, \
+ lease_start_time, lease_duration, slice_user=None):
+ lease_dict = {}
+ lease_dict['lease_start_time'] = lease_start_time
+ lease_dict['lease_duration'] = lease_duration
+ lease_dict['added_nodes'] = added_nodes
+ lease_dict['slice_name'] = slice_name
+ lease_dict['slice_user'] = slice_user
+ lease_dict['grain'] = self.GetLeaseGranularity()
+ lease_dict['time_format'] = self.time_format
+
+ def __create_job_structure_request_for_OAR(lease_dict):
+ """ Creates the structure needed for a correct POST on OAR.
+ Makes the timestamp transformation into the appropriate format.
+ Sends the POST request to create the job with the resources in
+ added_nodes.
+
+ """
+
+ nodeid_list = []
+ reqdict = {}
+
+
+ reqdict['workdir'] = '/tmp'
+ reqdict['resource'] = "{network_address in ("
+
+ for node in lease_dict['added_nodes']:
+ logger.debug("\r\n \r\n OARrestapi \t __create_job_structure_request_for_OAR \
+ node %s" %(node))
+
+ # Get the ID of the node
+ nodeid = node
+ reqdict['resource'] += "'" + nodeid + "', "
+ nodeid_list.append(nodeid)
+
+ custom_length = len(reqdict['resource'])- 2
+ reqdict['resource'] = reqdict['resource'][0:custom_length] + \
+ ")}/nodes=" + str(len(nodeid_list))
+
+ def __process_walltime(duration):
+ """ Calculates the walltime in seconds from the duration in H:M:S
+ specified in the RSpec.
+
+ """
+ if duration:
+ # Fixing the walltime by adding a few delays.
+ # First put the walltime in seconds oarAdditionalDelay = 20;
+ # additional delay for /bin/sleep command to
+ # take in account prologue and epilogue scripts execution
+ # int walltimeAdditionalDelay = 240; additional delay
+ desired_walltime = duration
+ total_walltime = desired_walltime + 240 #+4 min Update SA 23/10/12
+ sleep_walltime = desired_walltime # 0 sec added Update SA 23/10/12
+ walltime = []
+ #Put the walltime back in str form
+ #First get the hours
+ walltime.append(str(total_walltime / 3600))
+ total_walltime = total_walltime - 3600 * int(walltime[0])
+ #Get the remaining minutes
+ walltime.append(str(total_walltime / 60))
+ total_walltime = total_walltime - 60 * int(walltime[1])
+ #Get the seconds
+ walltime.append(str(total_walltime))
+
+ else:
+ logger.log_exc(" __process_walltime duration null")
+
+ return walltime, sleep_walltime
+
+
+ walltime, sleep_walltime = \
+ __process_walltime(int(lease_dict['lease_duration'])*lease_dict['grain'])
+
+
+ reqdict['resource'] += ",walltime=" + str(walltime[0]) + \
+ ":" + str(walltime[1]) + ":" + str(walltime[2])
+ reqdict['script_path'] = "/bin/sleep " + str(sleep_walltime)
+
+ #In case of a scheduled experiment (not immediate)
+ #To run an XP immediately, don't specify date and time in RSpec
+ #They will be set to None.
+ if lease_dict['lease_start_time'] is not '0':
+ #Readable time accepted by OAR
+ start_time = datetime.fromtimestamp(int(lease_dict['lease_start_time'])).\
+ strftime(lease_dict['time_format'])
+ reqdict['reservation'] = start_time
+ #If there is not start time, Immediate XP. No need to add special
+ # OAR parameters
+
+
+ reqdict['type'] = "deploy"
+ reqdict['directory'] = ""
+ reqdict['name'] = "SFA_" + lease_dict['slice_user']
+
+ return reqdict
+
+
+ #Create the request for OAR
+ reqdict = __create_job_structure_request_for_OAR(lease_dict)
+ # first step : start the OAR job and update the job
+ logger.debug("SLABDRIVER.PY \tLaunchExperimentOnOAR reqdict %s\
+ \r\n " %(reqdict))
+
+ answer = self.oar.POSTRequestToOARRestAPI('POST_job', \
+ reqdict, slice_user)
+ logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid %s " %(answer))
+ try:
+ jobid = answer['id']
+ except KeyError:
+ logger.log_exc("SLABDRIVER \tLaunchExperimentOnOAR \
+ Impossible to create job %s " %(answer))
+ return
+
+
+ def __configure_experiment(jobid, added_nodes):
+ # second step : configure the experiment
+ # we need to store the nodes in a yaml (well...) file like this :
+ # [1,56,23,14,45,75] with name /tmp/sfa<jobid>.json
+ job_file = open('/tmp/sfa/'+ str(jobid) + '.json', 'w')
+ job_file.write('[')
+ job_file.write(str(added_nodes[0].strip('node')))
+ for node in added_nodes[1:len(added_nodes)] :
+ job_file.write(', '+ node.strip('node'))
+ job_file.write(']')
+ job_file.close()
+ return
+
+ def __launch_senslab_experiment(jobid):
+ # third step : call the senslab-experiment wrapper
+ #command= "java -jar target/sfa-1.0-jar-with-dependencies.jar
+ # "+str(jobid)+" "+slice_user
+ javacmdline = "/usr/bin/java"
+ jarname = \
+ "/opt/senslabexperimentwrapper/sfa-1.0-jar-with-dependencies.jar"
+ #ret=subprocess.check_output(["/usr/bin/java", "-jar", ", \
+ #str(jobid), slice_user])
+ output = subprocess.Popen([javacmdline, "-jar", jarname, str(jobid), \
+ slice_user],stdout=subprocess.PIPE).communicate()[0]
+
+ logger.debug("SLABDRIVER \t __configure_experiment wrapper returns%s " \
+ %(output))
+ return
+
+
+
+ if jobid :
+ logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid %s \
+ added_nodes %s slice_user %s" %(jobid, added_nodes, slice_user))
+
+
+ __configure_experiment(jobid, added_nodes)
+ __launch_senslab_experiment(jobid)
+
+ return
+
+ def AddLeases(self, hostname_list, slice_record, lease_start_time, lease_duration):
+ logger.debug("SLABDRIVER \r\n \r\n \t AddLeases hostname_list %s \
+ slice_record %s lease_start_time %s lease_duration %s "\
+ %( hostname_list, slice_record , lease_start_time, \
+ lease_duration))
+
+ tmp = slice_record['reg-researchers'][0].split(".")
+ username = tmp[(len(tmp)-1)]
+ self.LaunchExperimentOnOAR(hostname_list, slice_record['slice_hrn'], lease_start_time, lease_duration, username)
+ start_time = datetime.fromtimestamp(int(lease_start_time)).strftime(self.time_format)
+ logger.debug("SLABDRIVER \t AddLeases hostname_list start_time %s " %(start_time))
+
+ return
+
+
+ #Delete the jobs from job_senslab table
+ def DeleteSliceFromNodes(self, slice_record):
+
+ self.DeleteJobs(slice_record['oar_job_id'], slice_record['hrn'])
+ return
+
+
+ def GetLeaseGranularity(self):
+ """ Returns the granularity of Senslab testbed.
+ OAR returns seconds for experiments duration.
+ Defined in seconds. """
+
+ grain = 60
+ return grain
+
+ def GetLeases(self, lease_filter_dict=None):
+ unfiltered_reservation_list = self.GetReservedNodes()
+
+ ##Synchronize slice_table of sfa senslab db
+ #self.synchronize_oar_and_slice_table(unfiltered_reservation_list)
+
+ reservation_list = []
+ #Find the slice associated with this user senslab ldap uid
+ logger.debug(" SLABDRIVER.PY \tGetLeases ")
+ #Create user dict first to avoir looking several times for
+ #the same user in LDAP SA 27/07/12
+ resa_user_dict = {}
+ for resa in unfiltered_reservation_list:
+ logger.debug("SLABDRIVER \tGetLeases USER %s"\
+ %(resa['user']))
+ if resa['user'] not in resa_user_dict:
+ logger.debug("SLABDRIVER \tGetLeases userNOTIN ")
+ ldap_info = self.ldap.LdapSearch('(uid='+resa['user']+')')
+ ldap_info = ldap_info[0][1]
+ user = dbsession.query(RegUser).filter_by(email = \
+ ldap_info['mail'][0]).first()
+ #Separated in case user not in database : record_id not defined SA 17/07//12
+ query_slice_info = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = user.record_id)
+ if query_slice_info:
+ slice_info = query_slice_info.first()
+ else:
+ slice_info = None
+
+ resa_user_dict[resa['user']] = {}
+ resa_user_dict[resa['user']]['ldap_info'] = user
+ resa_user_dict[resa['user']]['slice_info'] = slice_info
+
+ logger.debug("SLABDRIVER \tGetLeases resa_user_dict %s"\
+ %(resa_user_dict))
+ for resa in unfiltered_reservation_list:
+
+
+ #Put the slice_urn
+ resa['slice_hrn'] = resa_user_dict[resa['user']]['slice_info'].slice_hrn
+ resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
+ #Put the slice_urn
+ #resa['slice_id'] = hrn_to_urn(slice_info.slice_hrn, 'slice')
+ resa['component_id_list'] = []
+ #Transform the hostnames into urns (component ids)
+ for node in resa['reserved_nodes']:
+ #resa['component_id_list'].append(hostname_to_urn(self.hrn, \
+ #self.root_auth, node['hostname']))
+ slab_xrn = slab_xrn_object(self.root_auth, node)
+ resa['component_id_list'].append(slab_xrn.urn)
+
+ #Filter the reservation list if necessary
+ #Returns all the leases associated with a given slice
+ if lease_filter_dict:
+ logger.debug("SLABDRIVER \tGetLeases lease_filter_dict %s"\
+ %(lease_filter_dict))
+ for resa in unfiltered_reservation_list:
+ if lease_filter_dict['name'] == resa['slice_hrn']:
+ reservation_list.append(resa)
+ else:
+ reservation_list = unfiltered_reservation_list
+
+ logger.debug(" SLABDRIVER.PY \tGetLeases reservation_list %s"\
+ %(reservation_list))
+ return reservation_list
+
+ def augment_records_with_testbed_info (self, sfa_records):
+ return self.fill_record_info (sfa_records)
+
+ def fill_record_info(self, record_list):
+ """
+ Given a SFA record, fill in the senslab specific and SFA specific
+ fields in the record.
+ """
+
+ logger.debug("SLABDRIVER \tfill_record_info records %s " %(record_list))
+ if not isinstance(record_list, list):
+ record_list = [record_list]
+
+ try:
+ for record in record_list:
+ #If the record is a SFA slice record, then add information
+ #about the user of this slice. This kind of
+ #information is in the Senslab's DB.
+ if str(record['type']) == 'slice':
+ #Get slab slice record.
+ recslice_list = self.GetSlices(slice_filter = \
+ str(record['hrn']),\
+ slice_filter_type = 'slice_hrn')
+
+ recuser = dbsession.query(RegRecord).filter_by(record_id = \
+ recslice_list[0]['record_id_user']).first()
+ logger.debug("SLABDRIVER \tfill_record_info TYPE SLICE RECUSER %s " %(recuser))
+ record.update({'PI':[recuser.hrn],
+ 'researcher': [recuser.hrn],
+ 'name':record['hrn'],
+ 'oar_job_id':[],
+ 'node_ids': [],
+ 'person_ids':[recslice_list[0]['record_id_user']],
+ 'geni_urn':'', #For client_helper.py compatibility
+ 'keys':'', #For client_helper.py compatibility
+ 'key_ids':''}) #For client_helper.py compatibility
+
+ try:
+ for rec in recslice_list:
+ record['oar_job_id'].append(rec['oar_job_id'])
+ except KeyError:
+ pass
+
+ logger.debug( "SLABDRIVER.PY \t fill_record_info SLICE \
+ recslice_list %s \r\n \t RECORD %s \r\n \r\n" %(recslice_list,record))
+ if str(record['type']) == 'user':
+ #The record is a SFA user record.
+ #Get the information about his slice from Senslab's DB
+ #and add it to the user record.
+ recslice_list = self.GetSlices(\
+ slice_filter = record['record_id'],\
+ slice_filter_type = 'record_id_user')
+
+ logger.debug( "SLABDRIVER.PY \t fill_record_info TYPE USER \
+ recslice_list %s \r\n \t RECORD %s \r\n" %(recslice_list , record))
+ #Append slice record in records list,
+ #therefore fetches user and slice info again(one more loop)
+ #Will update PIs and researcher for the slice
+ recuser = dbsession.query(RegRecord).filter_by(record_id = \
+ recslice_list[0]['record_id_user']).first()
+ logger.debug( "SLABDRIVER.PY \t fill_record_info USER \
+ recuser %s \r\n \r\n" %(recuser))
+ recslice = {}
+ recslice = recslice_list[0]
+ recslice.update({'PI':[recuser.hrn],
+ 'researcher': [recuser.hrn],
+ 'name':record['hrn'],
+ 'node_ids': [],
+ 'oar_job_id': [],
+ 'person_ids':[recslice_list[0]['record_id_user']]})
+ try:
+ for rec in recslice_list:
+ recslice['oar_job_id'].append(rec['oar_job_id'])
+ except KeyError:
+ pass
+
+ recslice.update({'type':'slice', \
+ 'hrn':recslice_list[0]['slice_hrn']})
+
+
+ #GetPersons takes [] as filters
+ #user_slab = self.GetPersons([{'hrn':recuser.hrn}])
+ user_slab = self.GetPersons([record])
+
+
+ record.update(user_slab[0])
+ #For client_helper.py compatibility
+ record.update( { 'geni_urn':'',
+ 'keys':'',
+ 'key_ids':'' })
+ record_list.append(recslice)
+
+ logger.debug("SLABDRIVER.PY \tfill_record_info ADDING SLICE\
+ INFO TO USER records %s" %(record_list))
+ logger.debug("SLABDRIVER.PY \tfill_record_info END \
+ #record %s \r\n \r\n " %(record))
+
+ except TypeError, error:
+ logger.log_exc("SLABDRIVER \t fill_record_info EXCEPTION %s"\
+ %(error))
+ #logger.debug("SLABDRIVER.PY \t fill_record_info ENDENDEND ")
+
+ return
+
+ #self.fill_record_slab_info(records)
+
+
+
+
+
+ #TODO Update membership? update_membership_list SA 05/07/12
+ #def update_membership_list(self, oldRecord, record, listName, addFunc, \
+ #delFunc):
+ ## get a list of the HRNs tht are members of the old and new records
+ #if oldRecord:
+ #oldList = oldRecord.get(listName, [])
+ #else:
+ #oldList = []
+ #newList = record.get(listName, [])
+
+ ## if the lists are the same, then we don't have to update anything
+ #if (oldList == newList):
+ #return
+
+ ## build a list of the new person ids, by looking up each person to get
+ ## their pointer
+ #newIdList = []
+ #table = SfaTable()
+ #records = table.find({'type': 'user', 'hrn': newList})
+ #for rec in records:
+ #newIdList.append(rec['pointer'])
+
+ ## build a list of the old person ids from the person_ids field
+ #if oldRecord:
+ #oldIdList = oldRecord.get("person_ids", [])
+ #containerId = oldRecord.get_pointer()
+ #else:
+ ## if oldRecord==None, then we are doing a Register, instead of an
+ ## update.
+ #oldIdList = []
+ #containerId = record.get_pointer()
+
+ ## add people who are in the new list, but not the oldList
+ #for personId in newIdList:
+ #if not (personId in oldIdList):
+ #addFunc(self.plauth, personId, containerId)
+
+ ## remove people who are in the old list, but not the new list
+ #for personId in oldIdList:
+ #if not (personId in newIdList):
+ #delFunc(self.plauth, personId, containerId)
+
+ #def update_membership(self, oldRecord, record):
+
+ #if record.type == "slice":
+ #self.update_membership_list(oldRecord, record, 'researcher',
+ #self.users.AddPersonToSlice,
+ #self.users.DeletePersonFromSlice)
+ #elif record.type == "authority":
+ ## xxx TODO
+ #pass
+
+### thierry
+# I don't think you plan on running a component manager at this point
+# let me clean up the mess of ComponentAPI that is deprecated anyways
+
+
+#TODO FUNCTIONS SECTION 04/07/2012 SA
+
+ #TODO : Is UnBindObjectFromPeer still necessary ? Currently does nothing
+ #04/07/2012 SA
+ def UnBindObjectFromPeer(self, auth, object_type, object_id, shortname):
+ """ This method is a hopefully temporary hack to let the sfa correctly
+ detach the objects it creates from a remote peer object. This is
+ needed so that the sfa federation link can work in parallel with
+ RefreshPeer, as RefreshPeer depends on remote objects being correctly
+ marked.
+ Parameters:
+ auth : struct, API authentication structure
+ AuthMethod : string, Authentication method to use
+ object_type : string, Object type, among 'site','person','slice',
+ 'node','key'
+ object_id : int, object_id
+ shortname : string, peer shortname
+ FROM PLC DOC
+
+ """
+ logger.warning("SLABDRIVER \tUnBindObjectFromPeer EMPTY-\
+ DO NOTHING \r\n ")
+ return
+
+ #TODO Is BindObjectToPeer still necessary ? Currently does nothing
+ #04/07/2012 SA
+ def BindObjectToPeer(self, auth, object_type, object_id, shortname=None, \
+ remote_object_id=None):
+ """This method is a hopefully temporary hack to let the sfa correctly
+ attach the objects it creates to a remote peer object. This is needed
+ so that the sfa federation link can work in parallel with RefreshPeer,
+ as RefreshPeer depends on remote objects being correctly marked.
+ Parameters:
+ shortname : string, peer shortname
+ remote_object_id : int, remote object_id, set to 0 if unknown
+ FROM PLC API DOC
+
+ """
+ logger.warning("SLABDRIVER \tBindObjectToPeer EMPTY - DO NOTHING \r\n ")
+ return
+
+ #TODO UpdateSlice 04/07/2012 SA
+ #Funciton should delete and create another job since oin senslab slice=job
+ def UpdateSlice(self, auth, slice_id_or_name, slice_fields=None):
+ """Updates the parameters of an existing slice with the values in
+ slice_fields.
+ Users may only update slices of which they are members.
+ PIs may update any of the slices at their sites, or any slices of
+ which they are members. Admins may update any slice.
+ Only PIs and admins may update max_nodes. Slices cannot be renewed
+ (by updating the expires parameter) more than 8 weeks into the future.
+ Returns 1 if successful, faults otherwise.
+ FROM PLC API DOC
+
+ """
+ logger.warning("SLABDRIVER UpdateSlice EMPTY - DO NOTHING \r\n ")
+ return
+
+ #TODO UpdatePerson 04/07/2012 SA
+ def UpdatePerson(self, auth, person_id_or_email, person_fields=None):
+ """Updates a person. Only the fields specified in person_fields
+ are updated, all other fields are left untouched.
+ Users and techs can only update themselves. PIs can only update
+ themselves and other non-PIs at their sites.
+ Returns 1 if successful, faults otherwise.
+ FROM PLC API DOC
+
+ """
+ logger.warning("SLABDRIVER UpdatePerson EMPTY - DO NOTHING \r\n ")
+ return
+
+ #TODO GetKeys 04/07/2012 SA
+ def GetKeys(self, auth, key_filter=None, return_fields=None):
+ """Returns an array of structs containing details about keys.
+ If key_filter is specified and is an array of key identifiers,
+ or a struct of key attributes, only keys matching the filter
+ will be returned. If return_fields is specified, only the
+ specified details will be returned.
+
+ Admin may query all keys. Non-admins may only query their own keys.
+ FROM PLC API DOC
+
+ """
+ logger.warning("SLABDRIVER GetKeys EMPTY - DO NOTHING \r\n ")
+ return
+
+ #TODO DeleteKey 04/07/2012 SA
+ def DeleteKey(self, auth, key_id):
+ """ Deletes a key.
+ Non-admins may only delete their own keys.
+ Returns 1 if successful, faults otherwise.
+ FROM PLC API DOC
+
+ """
+ logger.warning("SLABDRIVER DeleteKey EMPTY - DO NOTHING \r\n ")
+ return
+
+
+ #TODO : Check rights to delete person
+ def DeletePerson(self, auth, person_record):
+ """ Disable an existing account in senslab LDAP.
+ Users and techs can only delete themselves. PIs can only
+ delete themselves and other non-PIs at their sites.
+ ins can delete anyone.
+ Returns 1 if successful, faults otherwise.
+ FROM PLC API DOC
+
+ """
+ #Disable user account in senslab LDAP
+ ret = self.ldap.LdapMarkUserAsDeleted(person_record)
+ logger.warning("SLABDRIVER DeletePerson %s " %(person_record))
+ return ret
+
+ #TODO Check DeleteSlice, check rights 05/07/2012 SA
+ def DeleteSlice(self, auth, slice_record):
+ """ Deletes the specified slice.
+ Senslab : Kill the job associated with the slice if there is one
+ using DeleteSliceFromNodes.
+ Updates the slice record in slab db to remove the slice nodes.
+
+ Users may only delete slices of which they are members. PIs may
+ delete any of the slices at their sites, or any slices of which
+ they are members. Admins may delete any slice.
+ Returns 1 if successful, faults otherwise.
+ FROM PLC API DOC
+
+ """
+ self.DeleteSliceFromNodes(slice_record)
+ logger.warning("SLABDRIVER DeleteSlice %s "%(slice_record))
+ return
+
+ #TODO AddPerson 04/07/2012 SA
+ #def AddPerson(self, auth, person_fields=None):
+ def AddPerson(self, record):#TODO fixing 28/08//2012 SA
+ """Adds a new account. Any fields specified in records are used,
+ otherwise defaults are used.
+ Accounts are disabled by default. To enable an account,
+ use UpdatePerson().
+ Returns the new person_id (> 0) if successful, faults otherwise.
+ FROM PLC API DOC
+
+ """
+ ret = self.ldap.LdapAddUser(record)
+ logger.warning("SLABDRIVER AddPerson return code %s \r\n ", ret)
+ return
+
+ #TODO AddPersonToSite 04/07/2012 SA
+ def AddPersonToSite (self, auth, person_id_or_email, \
+ site_id_or_login_base=None):
+ """ Adds the specified person to the specified site. If the person is
+ already a member of the site, no errors are returned. Does not change
+ the person's primary site.
+ Returns 1 if successful, faults otherwise.
+ FROM PLC API DOC
+
+ """
+ logger.warning("SLABDRIVER AddPersonToSite EMPTY - DO NOTHING \r\n ")
+ return
+
+ #TODO AddRoleToPerson : Not sure if needed in senslab 04/07/2012 SA
+ def AddRoleToPerson(self, auth, role_id_or_name, person_id_or_email):
+ """Grants the specified role to the person.
+ PIs can only grant the tech and user roles to users and techs at their
+ sites. Admins can grant any role to any user.
+ Returns 1 if successful, faults otherwise.
+ FROM PLC API DOC
+
+ """
+ logger.warning("SLABDRIVER AddRoleToPerson EMPTY - DO NOTHING \r\n ")
+ return
+
+ #TODO AddPersonKey 04/07/2012 SA
+ def AddPersonKey(self, auth, person_id_or_email, key_fields=None):
+ """Adds a new key to the specified account.
+ Non-admins can only modify their own keys.
+ Returns the new key_id (> 0) if successful, faults otherwise.
+ FROM PLC API DOC
+
+ """
+ logger.warning("SLABDRIVER AddPersonKey EMPTY - DO NOTHING \r\n ")
+ return
+
+ def DeleteLeases(self, leases_id_list, slice_hrn ):
+ for job_id in leases_id_list:
+ self.DeleteJobs(job_id, slice_hrn)
+
+ logger.debug("SLABDRIVER DeleteLeases leases_id_list %s slice_hrn %s \
+ \r\n " %(leases_id_list, slice_hrn))
+ return
--- /dev/null
+from sqlalchemy import create_engine
+from sqlalchemy.orm import sessionmaker
+
+from sfa.util.config import Config
+from sfa.util.sfalogging import logger
+
+from sqlalchemy import Column, Integer, String
+from sqlalchemy import Table, MetaData
+from sqlalchemy.ext.declarative import declarative_base
+
+from sqlalchemy.dialects import postgresql
+
+from sqlalchemy.exc import NoSuchTableError
+
+
+#Dict holding the columns names of the table as keys
+#and their type, used for creation of the table
+slice_table = {'record_id_user': 'integer PRIMARY KEY references X ON DELETE \
+CASCADE ON UPDATE CASCADE','oar_job_id':'integer DEFAULT -1', \
+'record_id_slice':'integer', 'slice_hrn':'text NOT NULL'}
+
+#Dict with all the specific senslab tables
+tablenames_dict = {'slice_senslab': slice_table}
+
+
+SlabBase = declarative_base()
+
+class SliceSenslab (SlabBase):
+ """ SQL alchemy class to manipulate slice_senslab table in
+ slab_sfa database.
+
+ """
+ __tablename__ = 'slice_senslab'
+ #record_id_user = Column(Integer, primary_key=True)
+
+ slice_hrn = Column(String, primary_key=True)
+ peer_authority = Column(String, nullable = True)
+ record_id_slice = Column(Integer)
+ record_id_user = Column(Integer)
+
+ #oar_job_id = Column( Integer,default = -1)
+ #node_list = Column(postgresql.ARRAY(String), nullable =True)
+
+ def __init__ (self, slice_hrn =None, record_id_slice=None, \
+ record_id_user= None,peer_authority=None):
+ """
+ Defines a row of the slice_senslab table
+ """
+ if record_id_slice:
+ self.record_id_slice = record_id_slice
+ if slice_hrn:
+ self.slice_hrn = slice_hrn
+ if record_id_user:
+ self.record_id_user = record_id_user
+ if peer_authority:
+ self.peer_authority = peer_authority
+
+
+ def __repr__(self):
+ """Prints the SQLAlchemy record to the format defined
+ by the function.
+ """
+ result = "<Record id user =%s, slice hrn=%s, Record id slice =%s , \
+ peer_authority =%s"% (self.record_id_user, self.slice_hrn, \
+ self.record_id_slice, self.peer_authority)
+ result += ">"
+ return result
+
+ def dump_sqlalchemyobj_to_dict(self):
+ """Transforms a SQLalchemy record object to a python dictionary.
+ Returns the dictionary.
+ """
+
+ dump_dict = {'slice_hrn':self.slice_hrn,
+ 'peer_authority':self.peer_authority,
+ 'record_id':self.record_id_slice,
+ 'record_id_user':self.record_id_user,
+ 'record_id_slice':self.record_id_slice, }
+ return dump_dict
+
+
+class SlabDB:
+ """ SQL Alchemy connection class.
+ From alchemy.py
+ """
+ def __init__(self, config, debug = False):
+ self.sl_base = SlabBase
+ dbname = "slab_sfa"
+ if debug == True :
+ l_echo_pool = True
+ l_echo = True
+ else :
+ l_echo_pool = False
+ l_echo = False
+
+ self.slab_session = None
+ # the former PostgreSQL.py used the psycopg2 directly and was doing
+ #self.connection.set_client_encoding("UNICODE")
+ # it's unclear how to achieve this in sqlalchemy, nor if it's needed
+ # at all
+ # http://www.sqlalchemy.org/docs/dialects/postgresql.html#unicode
+ # we indeed have /var/lib/pgsql/data/postgresql.conf where
+ # this setting is unset, it might be an angle to tweak that if need be
+ # try a unix socket first - omitting the hostname does the trick
+ unix_url = "postgresql+psycopg2://%s:%s@:%s/%s"% \
+ (config.SFA_DB_USER, config.SFA_DB_PASSWORD, \
+ config.SFA_DB_PORT, dbname)
+
+ # the TCP fallback method
+ tcp_url = "postgresql+psycopg2://%s:%s@%s:%s/%s"% \
+ (config.SFA_DB_USER, config.SFA_DB_PASSWORD, config.SFA_DB_HOST, \
+ config.SFA_DB_PORT, dbname)
+ for url in [ unix_url, tcp_url ] :
+ try:
+ self.slab_engine = create_engine (url, echo_pool = \
+ l_echo_pool, echo = l_echo)
+ self.check()
+ self.url = url
+ return
+ except:
+ pass
+ self.slab_engine = None
+ raise Exception, "Could not connect to database"
+
+
+
+ def check (self):
+ """ Cehck if a table exists by trying a selection
+ on the table.
+
+ """
+ self.slab_engine.execute ("select 1").scalar()
+
+
+ def session (self):
+ """
+ Creates a SQLalchemy session. Once the session object is created
+ it should be used throughout the code for all the operations on
+ tables for this given database.
+
+ """
+ if self.slab_session is None:
+ Session = sessionmaker()
+ self.slab_session = Session(bind = self.slab_engine)
+ return self.slab_session
+
+ def close_session(self):
+ """
+ Closes connection to database.
+
+ """
+ if self.slab_session is None: return
+ self.slab_session.close()
+ self.slab_session = None
+
+
+ def exists(self, tablename):
+ """
+ Checks if the table specified as tablename exists.
+
+ """
+
+ try:
+ metadata = MetaData (bind=self.slab_engine)
+ table = Table (tablename, metadata, autoload=True)
+ return True
+
+ except NoSuchTableError:
+ logger.log_exc("SLABPOSTGRES tablename %s does not exists" \
+ %(tablename))
+ return False
+
+
+ def createtable(self):
+ """
+ Creates all the table sof the engine.
+ Uses the global dictionnary holding the tablenames and the table schema.
+
+ """
+
+ logger.debug("SLABPOSTGRES createtable SlabBase.metadata.sorted_tables \
+ %s \r\n engine %s" %(SlabBase.metadata.sorted_tables , slab_engine))
+ SlabBase.metadata.create_all(slab_engine)
+ return
+
+
+
+slab_alchemy = SlabDB(Config())
+slab_engine = slab_alchemy.slab_engine
+slab_dbsession = slab_alchemy.session()
--- /dev/null
+from sfa.util.xrn import get_authority, urn_to_hrn
+from sfa.util.sfalogging import logger
+
+
+MAXINT = 2L**31-1
+
+class SlabSlices:
+
+ rspec_to_slice_tag = {'max_rate':'net_max_rate'}
+
+
+ def __init__(self, driver):
+ self.driver = driver
+
+
+ def get_peer(self, xrn):
+ hrn, hrn_type = urn_to_hrn(xrn)
+ #Does this slice belong to a local site or a peer senslab site?
+ peer = None
+
+ # get this slice's authority (site)
+ slice_authority = get_authority(hrn)
+ site_authority = slice_authority
+ # get this site's authority (sfa root authority or sub authority)
+ #site_authority = get_authority(slice_authority).lower()
+ logger.debug("SLABSLICES \ get_peer slice_authority %s \
+ site_authority %s hrn %s" %(slice_authority, \
+ site_authority, hrn))
+ #This slice belongs to the current site
+ if site_authority == self.driver.root_auth :
+ return None
+ # check if we are already peered with this site_authority, if so
+ #peers = self.driver.GetPeers({})
+ peers = self.driver.GetPeers(peer_filter = slice_authority)
+ for peer_record in peers:
+
+ if site_authority == peer_record.hrn:
+ peer = peer_record
+ logger.debug(" SLABSLICES \tget_peer peer %s " %(peer))
+ return peer
+
+ def get_sfa_peer(self, xrn):
+ hrn, hrn_type = urn_to_hrn(xrn)
+
+ # return the authority for this hrn or None if we are the authority
+ sfa_peer = None
+ slice_authority = get_authority(hrn)
+ site_authority = get_authority(slice_authority)
+
+ if site_authority != self.driver.hrn:
+ sfa_peer = site_authority
+
+ return sfa_peer
+
+
+ def verify_slice_leases(self, sfa_slice, requested_jobs_dict, peer):
+
+
+ #First get the list of current leases from OAR
+ leases = self.driver.GetLeases({'name':sfa_slice['slice_hrn']})
+ logger.debug("SLABSLICES verify_slice_leases requested_jobs_dict %s \
+ leases %s "%(requested_jobs_dict, leases ))
+
+ current_nodes_reserved_by_start_time = {}
+ requested_nodes_by_start_time = {}
+ leases_by_start_time = {}
+ reschedule_jobs_dict = {}
+
+
+ #Create reduced dictionary with key start_time and value
+ # the list of nodes
+ #-for the leases already registered by OAR first
+ # then for the new leases requested by the user
+
+ #Leases already scheduled/running in OAR
+ for lease in leases :
+ current_nodes_reserved_by_start_time[lease['t_from']] = \
+ lease['reserved_nodes']
+ leases_by_start_time[lease['t_from']] = lease
+
+
+ #Requested jobs
+ for start_time in requested_jobs_dict:
+ requested_nodes_by_start_time[int(start_time)] = \
+ requested_jobs_dict[start_time]['hostname']
+ #Check if there is any difference between the leases already
+ #registered in OAR and the requested jobs.
+ #Difference could be:
+ #-Lease deleted in the requested jobs
+ #-Added/removed nodes
+ #-Newly added lease
+
+ logger.debug("SLABSLICES verify_slice_leases \
+ requested_nodes_by_start_time %s \
+ "%(requested_nodes_by_start_time ))
+ #Find all deleted leases
+ start_time_list = \
+ list(set(leases_by_start_time.keys()).\
+ difference(requested_nodes_by_start_time.keys()))
+ deleted_leases = [leases_by_start_time[start_time]['lease_id'] \
+ for start_time in start_time_list]
+
+
+
+ #Find added or removed nodes in exisiting leases
+ for start_time in requested_nodes_by_start_time:
+ logger.debug("SLABSLICES verify_slice_leases start_time %s \
+ "%( start_time))
+ if start_time in current_nodes_reserved_by_start_time:
+
+ if requested_nodes_by_start_time[start_time] == \
+ current_nodes_reserved_by_start_time[start_time]:
+ continue
+
+ else:
+ update_node_set = \
+ set(requested_nodes_by_start_time[start_time])
+ added_nodes = \
+ update_node_set.difference(\
+ current_nodes_reserved_by_start_time[start_time])
+ shared_nodes = \
+ update_node_set.intersection(\
+ current_nodes_reserved_by_start_time[start_time])
+ old_nodes_set = \
+ set(\
+ current_nodes_reserved_by_start_time[start_time])
+ removed_nodes = \
+ old_nodes_set.difference(\
+ requested_nodes_by_start_time[start_time])
+ logger.debug("SLABSLICES verify_slice_leases \
+ shared_nodes %s added_nodes %s removed_nodes %s"\
+ %(shared_nodes, added_nodes,removed_nodes ))
+ #If the lease is modified, delete it before
+ #creating it again.
+ #Add the deleted lease job id in the list
+ #WARNING :rescheduling does not work if there is already
+ # 2 running/scheduled jobs because deleting a job
+ #takes time SA 18/10/2012
+ if added_nodes or removed_nodes:
+ deleted_leases.append(\
+ leases_by_start_time[start_time]['lease_id'])
+ #Reschedule the job
+ if added_nodes or shared_nodes:
+ reschedule_jobs_dict[str(start_time)] = \
+ requested_jobs_dict[str(start_time)]
+
+ else:
+ #New lease
+
+ job = requested_jobs_dict[str(start_time)]
+ logger.debug("SLABSLICES \
+ NEWLEASE slice %s job %s"\
+ %(sfa_slice, job))
+ self.driver.AddLeases(job['hostname'], \
+ sfa_slice, int(job['start_time']), \
+ int(job['duration']))
+
+ #Deleted leases are the ones with lease id not declared in the Rspec
+ if deleted_leases:
+ self.driver.DeleteLeases(deleted_leases, sfa_slice['slice_hrn'])
+ logger.debug("SLABSLICES \
+ verify_slice_leases slice %s deleted_leases %s"\
+ %(sfa_slice, deleted_leases))
+
+
+ if reschedule_jobs_dict :
+ for start_time in reschedule_jobs_dict:
+ job = reschedule_jobs_dict[start_time]
+ self.driver.AddLeases(job['hostname'], \
+ sfa_slice, int(job['start_time']), \
+ int(job['duration']))
+ return leases
+
+ def verify_slice_nodes(self, sfa_slice, requested_slivers, peer):
+ current_slivers = []
+ deleted_nodes = []
+
+ if 'node_ids' in sfa_slice:
+ nodes = self.driver.GetNodes(sfa_slice['list_node_ids'], \
+ ['hostname'])
+ current_slivers = [node['hostname'] for node in nodes]
+
+ # remove nodes not in rspec
+ deleted_nodes = list(set(current_slivers).\
+ difference(requested_slivers))
+ # add nodes from rspec
+ #added_nodes = list(set(requested_slivers).\
+ #difference(current_slivers))
+
+
+ logger.debug("SLABSLICES \tverify_slice_nodes slice %s\
+ \r\n \r\n deleted_nodes %s"\
+ %(sfa_slice, deleted_nodes))
+
+ if deleted_nodes:
+ #Delete the entire experience
+ self.driver.DeleteSliceFromNodes(sfa_slice)
+ #self.driver.DeleteSliceFromNodes(sfa_slice['slice_hrn'], \
+ #deleted_nodes)
+ return nodes
+
+
+
+ def free_egre_key(self):
+ used = set()
+ for tag in self.driver.GetSliceTags({'tagname': 'egre_key'}):
+ used.add(int(tag['value']))
+
+ for i in range(1, 256):
+ if i not in used:
+ key = i
+ break
+ else:
+ raise KeyError("No more EGRE keys available")
+
+ return str(key)
+
+
+
+
+
+
+ def handle_peer(self, site, sfa_slice, persons, peer):
+ if peer:
+ # bind site
+ try:
+ if site:
+ self.driver.BindObjectToPeer('site', site['site_id'], \
+ peer['shortname'], sfa_slice['site_id'])
+ except Exception, error:
+ self.driver.DeleteSite(site['site_id'])
+ raise error
+
+ # bind slice
+ try:
+ if sfa_slice:
+ self.driver.BindObjectToPeer('slice', slice['slice_id'], \
+ peer['shortname'], sfa_slice['slice_id'])
+ except Exception, error:
+ self.driver.DeleteSlice(sfa_slice['slice_id'])
+ raise error
+
+ # bind persons
+ for person in persons:
+ try:
+ self.driver.BindObjectToPeer('person', \
+ person['person_id'], peer['shortname'], \
+ person['peer_person_id'])
+
+ for (key, remote_key_id) in zip(person['keys'], \
+ person['key_ids']):
+ try:
+ self.driver.BindObjectToPeer( 'key', \
+ key['key_id'], peer['shortname'], \
+ remote_key_id)
+ except:
+ self.driver.DeleteKey(key['key_id'])
+ logger.log_exc("failed to bind key: %s \
+ to peer: %s " % (key['key_id'], \
+ peer['shortname']))
+ except Exception, error:
+ self.driver.DeletePerson(person['person_id'])
+ raise error
+
+ return sfa_slice
+
+ #def verify_site(self, slice_xrn, slice_record={}, peer=None, \
+ #sfa_peer=None, options={}):
+ #(slice_hrn, type) = urn_to_hrn(slice_xrn)
+ #site_hrn = get_authority(slice_hrn)
+ ## login base can't be longer than 20 characters
+ ##slicename = hrn_to_pl_slicename(slice_hrn)
+ #authority_name = slice_hrn.split('.')[0]
+ #login_base = authority_name[:20]
+ #logger.debug(" SLABSLICES.PY \tverify_site authority_name %s \
+ #login_base %s slice_hrn %s" \
+ #%(authority_name,login_base,slice_hrn)
+
+ #sites = self.driver.GetSites(login_base)
+ #if not sites:
+ ## create new site record
+ #site = {'name': 'geni.%s' % authority_name,
+ #'abbreviated_name': authority_name,
+ #'login_base': login_base,
+ #'max_slices': 100,
+ #'max_slivers': 1000,
+ #'enabled': True,
+ #'peer_site_id': None}
+ #if peer:
+ #site['peer_site_id'] = slice_record.get('site_id', None)
+ #site['site_id'] = self.driver.AddSite(site)
+ ## exempt federated sites from monitor policies
+ #self.driver.AddSiteTag(site['site_id'], 'exempt_site_until', \
+ #"20200101")
+
+ ### is this still necessary?
+ ### add record to the local registry
+ ##if sfa_peer and slice_record:
+ ##peer_dict = {'type': 'authority', 'hrn': site_hrn, \
+ ##'peer_authority': sfa_peer, 'pointer': \
+ #site['site_id']}
+ ##self.registry.register_peer_object(self.credential, peer_dict)
+ #else:
+ #site = sites[0]
+ #if peer:
+ ## unbind from peer so we can modify if necessary.
+ ## Will bind back later
+ #self.driver.UnBindObjectFromPeer('site', site['site_id'], \
+ #peer['shortname'])
+
+ #return site
+
+ def verify_slice(self, slice_hrn, slice_record, peer, sfa_peer):
+
+ #login_base = slice_hrn.split(".")[0]
+ slicename = slice_hrn
+ slices_list = self.driver.GetSlices(slice_filter = slicename, \
+ slice_filter_type = 'slice_hrn')
+ if slices_list:
+ for sl in slices_list:
+
+ logger.debug("SLABSLICE \tverify_slice slicename %s sl %s \
+ slice_record %s"%(slicename, sl, \
+ slice_record))
+ sfa_slice = sl
+ sfa_slice.update(slice_record)
+ #del slice['last_updated']
+ #del slice['date_created']
+ #if peer:
+ #slice['peer_slice_id'] = slice_record.get('slice_id', None)
+ ## unbind from peer so we can modify if necessary.
+ ## Will bind back later
+ #self.driver.UnBindObjectFromPeer('slice', \
+ #slice['slice_id'], \
+ #peer['shortname'])
+ #Update existing record (e.g. expires field)
+ #it with the latest info.
+ ##if slice_record and slice['expires'] != slice_record['expires']:
+ ##self.driver.UpdateSlice( slice['slice_id'], {'expires' : \
+ #slice_record['expires']})
+ else:
+ logger.debug(" SLABSLICES \tverify_slice Oups \
+ slice_record %s peer %s sfa_peer %s "\
+ %(slice_record, peer,sfa_peer))
+ sfa_slice = {'slice_hrn': slicename,
+ #'url': slice_record.get('url', slice_hrn),
+ #'description': slice_record.get('description', slice_hrn)
+ 'node_list' : [],
+ 'record_id_user' : slice_record['person_ids'][0],
+ 'record_id_slice': slice_record['record_id'],
+ 'peer_authority':str(peer.hrn)
+
+ }
+ # add the slice
+ self.driver.AddSlice(sfa_slice)
+ #slice['slice_id'] = self.driver.AddSlice(slice)
+ logger.debug("SLABSLICES \tverify_slice ADDSLICE OK")
+ #slice['node_ids']=[]
+ #slice['person_ids'] = []
+ #if peer:
+ #slice['peer_slice_id'] = slice_record.get('slice_id', None)
+ # mark this slice as an sfa peer record
+ #if sfa_peer:
+ #peer_dict = {'type': 'slice', 'hrn': slice_hrn,
+ #'peer_authority': sfa_peer, 'pointer': \
+ #slice['slice_id']}
+ #self.registry.register_peer_object(self.credential, peer_dict)
+
+
+
+ return sfa_slice
+
+
+ def verify_persons(self, slice_hrn, slice_record, users, peer, sfa_peer, \
+ options={}):
+ """
+ users is a record list. Records can either be local records
+ or users records from known and trusted federated sites.
+ If the user is from another site that senslab doesn't trust yet,
+ then Resolve will raise an error before getting to create_sliver.
+ """
+ #TODO SA 21/08/12 verify_persons Needs review
+
+
+ users_by_id = {}
+ users_by_hrn = {}
+ #users_dict : dict whose keys can either be the user's hrn or its id.
+ #Values contains only id and hrn
+ users_dict = {}
+
+ #First create dicts by hrn and id for each user in the user record list:
+ for user in users:
+
+ if 'urn' in user and (not 'hrn' in user ) :
+ user['hrn'], user['type'] = urn_to_hrn(user['urn'])
+
+ if 'person_id' in user and 'hrn' in user:
+ users_by_id[user['person_id']] = user
+ users_dict[user['person_id']] = {'person_id':\
+ user['person_id'], 'hrn':user['hrn']}
+
+ users_by_hrn[user['hrn']] = user
+ users_dict[user['hrn']] = {'person_id':user['person_id'], \
+ 'hrn':user['hrn']}
+
+
+ logger.debug( "SLABSLICE.PY \t verify_person \
+ users_dict %s \r\n user_by_hrn %s \r\n \
+ \tusers_by_id %s " \
+ %(users_dict,users_by_hrn, users_by_id))
+
+ existing_user_ids = []
+ existing_user_hrns = []
+ existing_users = []
+ # Check if user is in Senslab LDAP using its hrn.
+ # Assuming Senslab is centralised : one LDAP for all sites,
+ # user_id unknown from LDAP
+ # LDAP does not provide users id, therefore we rely on hrns containing
+ # the login of the user.
+ # If the hrn is not a senslab hrn, the user may not be in LDAP.
+ if users_by_hrn:
+ #Construct the list of filters (list of dicts) for GetPersons
+ filter_user = []
+ for hrn in users_by_hrn:
+ filter_user.append (users_by_hrn[hrn])
+ logger.debug(" SLABSLICE.PY \tverify_person filter_user %s " \
+ %(filter_user))
+ #Check user's in LDAP with GetPersons
+ #Needed because what if the user has been deleted in LDAP but
+ #is still in SFA?
+ existing_users = self.driver.GetPersons(filter_user)
+
+ #User's in senslab LDAP
+ if existing_users:
+ for user in existing_users :
+ existing_user_hrns.append(users_dict[user['hrn']]['hrn'])
+ existing_user_ids.\
+ append(users_dict[user['hrn']]['person_id'])
+
+ # User from another known trusted federated site. Check
+ # if a senslab account matching the email has already been created.
+ else:
+ req = 'mail='
+ if isinstance(users, list):
+
+ req += users[0]['email']
+ else:
+ req += users['email']
+
+ ldap_reslt = self.driver.ldap.LdapSearch(req)
+ if ldap_reslt:
+ logger.debug(" SLABSLICE.PY \tverify_person users \
+ USER already in Senslab \t ldap_reslt %s \
+ "%( ldap_reslt))
+ existing_users.append(ldap_reslt[1])
+
+ else:
+ #User not existing in LDAP
+ #TODO SA 21/08/12 raise smthg to add user or add it auto ?
+ logger.debug(" SLABSLICE.PY \tverify_person users \
+ not in ldap ...NEW ACCOUNT NEEDED %s \r\n \t \
+ ldap_reslt %s " %(users, ldap_reslt))
+
+ requested_user_ids = users_by_id.keys()
+ requested_user_hrns = users_by_hrn.keys()
+ logger.debug("SLABSLICE.PY \tverify_person requested_user_ids %s \
+ user_by_hrn %s " %(requested_user_ids, users_by_hrn))
+
+
+ #Check that the user of the slice in the slice record
+ #matches the existing users
+ try:
+ if slice_record['record_id_user'] in requested_user_ids and \
+ slice_record['PI'][0] in requested_user_hrns:
+ logger.debug(" SLABSLICE \tverify_person \
+ requested_user_ids %s = \
+ slice_record['record_id_user'] %s" \
+ %(requested_user_ids,slice_record['record_id_user']))
+
+ except KeyError:
+ pass
+
+
+ # users to be added, removed or updated
+ #One user in one senslab slice : there should be no need
+ #to remove/ add any user from/to a slice.
+ #However a user from SFA which is not registered in Senslab yet
+ #should be added to the LDAP.
+
+ added_user_hrns = set(requested_user_hrns).\
+ difference(set(existing_user_hrns))
+
+ #self.verify_keys(existing_slice_users, updated_users_list, \
+ #peer, append)
+
+ added_persons = []
+ # add new users
+ for added_user_hrn in added_user_hrns:
+ added_user = users_dict[added_user_hrn]
+ #hrn, type = urn_to_hrn(added_user['urn'])
+ person = {
+ #'first_name': added_user.get('first_name', hrn),
+ #'last_name': added_user.get('last_name', hrn),
+ 'first_name': added_user['first_name'],
+ 'last_name': added_user['last_name'],
+ 'person_id': added_user['person_id'],
+ 'peer_person_id': None,
+ 'keys': [],
+ 'key_ids': added_user.get('key_ids', []),
+
+ }
+ person['person_id'] = self.driver.AddPerson(person)
+ if peer:
+ person['peer_person_id'] = added_user['person_id']
+ added_persons.append(person)
+
+ # enable the account
+ self.driver.UpdatePerson(person['person_id'], {'enabled': True})
+
+ # add person to site
+ #self.driver.AddPersonToSite(added_user_id, login_base)
+
+ #for key_string in added_user.get('keys', []):
+ #key = {'key':key_string, 'key_type':'ssh'}
+ #key['key_id'] = self.driver.AddPersonKey(person['person_id'], \
+ # key)
+ #person['keys'].append(key)
+
+ # add the registry record
+ #if sfa_peer:
+ #peer_dict = {'type': 'user', 'hrn': hrn, 'peer_authority': \
+ #sfa_peer, \
+ #'pointer': person['person_id']}
+ #self.registry.register_peer_object(self.credential, peer_dict)
+ #for added_slice_user_hrn in \
+ #added_slice_user_hrns.union(added_user_hrns):
+ #self.driver.AddPersonToSlice(added_slice_user_hrn, \
+ #slice_record['name'])
+ #for added_slice_user_id in \
+ #added_slice_user_ids.union(added_user_ids):
+ # add person to the slice
+ #self.driver.AddPersonToSlice(added_slice_user_id, \
+ #slice_record['name'])
+ # if this is a peer record then it
+ # should already be bound to a peer.
+ # no need to return worry about it getting bound later
+
+ return added_persons
+
+ #Unused
+ def verify_keys(self, persons, users, peer, options={}):
+ # existing keys
+ key_ids = []
+ for person in persons:
+ key_ids.extend(person['key_ids'])
+ keylist = self.driver.GetKeys(key_ids, ['key_id', 'key'])
+ keydict = {}
+ for key in keylist:
+ keydict[key['key']] = key['key_id']
+ existing_keys = keydict.keys()
+ persondict = {}
+ for person in persons:
+ persondict[person['email']] = person
+
+ # add new keys
+ requested_keys = []
+ updated_persons = []
+ for user in users:
+ user_keys = user.get('keys', [])
+ updated_persons.append(user)
+ for key_string in user_keys:
+ requested_keys.append(key_string)
+ if key_string not in existing_keys:
+ key = {'key': key_string, 'key_type': 'ssh'}
+ try:
+ if peer:
+ person = persondict[user['email']]
+ self.driver.UnBindObjectFromPeer('person', \
+ person['person_id'], peer['shortname'])
+ key['key_id'] = \
+ self.driver.AddPersonKey(user['email'], key)
+ if peer:
+ key_index = user_keys.index(key['key'])
+ remote_key_id = user['key_ids'][key_index]
+ self.driver.BindObjectToPeer('key', \
+ key['key_id'], peer['shortname'], \
+ remote_key_id)
+
+ finally:
+ if peer:
+ self.driver.BindObjectToPeer('person', \
+ person['person_id'], peer['shortname'], \
+ user['person_id'])
+
+ # remove old keys (only if we are not appending)
+ append = options.get('append', True)
+ if append == False:
+ removed_keys = set(existing_keys).difference(requested_keys)
+ for existing_key_id in keydict:
+ if keydict[existing_key_id] in removed_keys:
+
+ if peer:
+ self.driver.UnBindObjectFromPeer('key', \
+ existing_key_id, peer['shortname'])
+ self.driver.DeleteKey(existing_key_id)
+
+
+ #def verify_slice_attributes(self, slice, requested_slice_attributes, \
+ #append=False, admin=False):
+ ## get list of attributes users ar able to manage
+ #filter = {'category': '*slice*'}
+ #if not admin:
+ #filter['|roles'] = ['user']
+ #slice_attributes = self.driver.GetTagTypes(filter)
+ #valid_slice_attribute_names = [attribute['tagname'] \
+ #for attribute in slice_attributes]
+
+ ## get sliver attributes
+ #added_slice_attributes = []
+ #removed_slice_attributes = []
+ #ignored_slice_attribute_names = []
+ #existing_slice_attributes = self.driver.GetSliceTags({'slice_id': \
+ #slice['slice_id']})
+
+ ## get attributes that should be removed
+ #for slice_tag in existing_slice_attributes:
+ #if slice_tag['tagname'] in ignored_slice_attribute_names:
+ ## If a slice already has a admin only role
+ ## it was probably given to them by an
+ ## admin, so we should ignore it.
+ #ignored_slice_attribute_names.append(slice_tag['tagname'])
+ #else:
+ ## If an existing slice attribute was not
+ ## found in the request it should
+ ## be removed
+ #attribute_found=False
+ #for requested_attribute in requested_slice_attributes:
+ #if requested_attribute['name'] == slice_tag['tagname'] \
+ #and requested_attribute['value'] == slice_tag['value']:
+ #attribute_found=True
+ #break
+
+ #if not attribute_found and not append:
+ #removed_slice_attributes.append(slice_tag)
+
+ ## get attributes that should be added:
+ #for requested_attribute in requested_slice_attributes:
+ ## if the requested attribute wasn't found we should add it
+ #if requested_attribute['name'] in valid_slice_attribute_names:
+ #attribute_found = False
+ #for existing_attribute in existing_slice_attributes:
+ #if requested_attribute['name'] == \
+ #existing_attribute['tagname'] and \
+ #requested_attribute['value'] == \
+ #existing_attribute['value']:
+ #attribute_found=True
+ #break
+ #if not attribute_found:
+ #added_slice_attributes.append(requested_attribute)
+
+
+ ## remove stale attributes
+ #for attribute in removed_slice_attributes:
+ #try:
+ #self.driver.DeleteSliceTag(attribute['slice_tag_id'])
+ #except Exception, error:
+ #self.logger.warn('Failed to remove sliver attribute. name: \
+ #%s, value: %s, node_id: %s\nCause:%s'\
+ #% (name, value, node_id, str(error)))
+
+ ## add requested_attributes
+ #for attribute in added_slice_attributes:
+ #try:
+ #self.driver.AddSliceTag(slice['name'], attribute['name'], \
+ #attribute['value'], attribute.get('node_id', None))
+ #except Exception, error:
+ #self.logger.warn('Failed to add sliver attribute. name: %s, \
+ #value: %s, node_id: %s\nCause:%s'\
+ #% (name, value, node_id, str(error)))
+
+
--- /dev/null
+###########################################################################
+# Copyright (C) 2012 by
+# <savakian@sfa2.grenoble.senslab.info>
+#
+# Copyright: See COPYING file that comes with this distribution
+#
+###########################################################################
+#LDAP import
+from sfa.senslab.LDAPapi import *
+import ldap.modlist as modlist
+import ldap as L
+
+#logger sfa
+from sfa.util.sfalogging import logger
+
+#OAR imports
+from datetime import datetime
+from dateutil import tz
+from time import strftime,gmtime
+from sfa.senslab.OARrestapi import OARrestapi
+
+#Test slabdriver
+from sfa.senslab.slabdriver import SlabDriver
+from sfa.util.config import Config
+
+import sys
+
+
+
+def parse_options():
+
+ #arguments supplied
+ if len(sys.argv) > 1 :
+ options_list = sys.argv[1:]
+ #For each valid option, execute the associated function
+ #(defined in the dictionnary supported_options)
+ job_id = 1
+ valid_options_dict = {}
+ value_list = []
+ #Passing options to the script should be done like this :
+ #-10 OAR -2 SlabDriver
+ for option in options_list:
+ if option in supported_options:
+ #update the values used for the fonctions associated
+ #with the options
+
+ valid_options_dict[option] = value_list
+ #empty the values list for next option
+ value_list = []
+ print valid_options_dict
+ else:
+ if option[0] == '-':
+ value_list.append(option[1:])
+ print "value_list",value_list
+
+
+ return valid_options_dict
+
+def TestLdap(job_id = None):
+ logger.setLevelDebug()
+
+ ldap = LDAPapi()
+ ret = ldap.conn.connect(bind=True)
+ ldap.conn.close()
+ print "TEST ldap.conn.connect(bind=True)" , ret
+
+ ret = ldap.conn.connect(bind=False)
+ ldap.conn.close()
+ print "TEST ldap.conn.connect(bind=False)", ret
+
+
+ ret = ldap.LdapSearch()
+ print "TEST ldap.LdapSearch ALL",ret
+
+ ret = ldap.LdapSearch('(uid=avakian)', [])
+ print "\r\n TEST ldap.LdapSearch ids = avakian",ret
+
+
+ password = ldap.generate_password()
+ print "\r\n TEST generate_password ",password
+
+ maxi = ldap.find_max_uidNumber()
+ print "\r\n TEST find_max_uidNumber " , maxi
+
+ data = {}
+ data['last_name'] = "Drake"
+ data['first_name']="Tim"
+ data['givenName']= data['first_name']
+ data['mail'] = "robin@arkham.fr"
+
+ record={}
+ record['hrn'] = 'senslab2.drake'
+ record['last_name'] = "Drake"
+ record['first_name']="Tim"
+ record['mail'] = "robin@arkham.fr"
+
+
+
+ #login = ldap.generate_login(data)
+ #print "\r\n Robin \tgenerate_login ", ret
+
+ #ret = ldap.LdapAddUser(data)
+ #print "\r\n Robin \tLdapAddUser ", ret
+
+ #req_ldap = '(uid=' + login + ')'
+ #ret = ldap.LdapSearch(req_ldap, [])
+ #print "\r\n Robin \tldap.LdapSearch ids = %s %s"%(login,ret )
+
+ #password = "Thridrobin"
+ #enc = ldap.encrypt_password(password)
+ #print "\r\n Robin \tencrypt_password ", enc
+
+ #ret = ldap.LdapModifyUser(record, {'userPassword':enc})
+ #print "\r\n Robin \tChange password LdapModifyUser ", ret
+
+ #dn = 'uid=' + login + ',' + ldap.baseDN
+ #ret = ldap.LdapDelete(dn)
+ #print "\r\n Robin \tLdapDelete ", ret
+
+ datanight = {}
+ datanight['last_name'] = "Grayson"
+ datanight['first_name']="Dick"
+ datanight['givenName']= datanight['first_name']
+ datanight['mail'] = "nightwing@arkham.fr"
+
+
+ record_night = {}
+ record_night['hrn'] = 'senslab2.grayson'
+ record_night['last_name'] = datanight['last_name']
+ record_night['first_name'] = datanight['first_name']
+ record_night['mail'] = datanight['mail']
+
+ ret = ldap.LdapFindUser(record_night)
+ print "\r\n Nightwing \tldap.LdapFindHrn %s : %s"%(record_night,ret)
+
+ ret = ldap.LdapSearch('(uid=grayson)', [])
+ print "\r\n Nightwing \tldap.LdapSearch ids = %s %s"%('grayson',ret )
+
+ ret = ldap.LdapAddUser(datanight)
+ print "\r\n Nightwing \tLdapAddUser ", ret
+
+ ret = ldap.LdapResetPassword(record_night)
+ print "\r\n Nightwing \tLdapResetPassword de %s : %s "%(record_night,ret)
+
+ ret = ldap.LdapDeleteUser(record_night)
+ print "\r\n Nightwing \tLdapDeleteUser ", ret
+
+
+ record_avakian = {}
+ record_avakian['hrn']= 'senslab2.avakian'
+ record_avakian['last_name'] = 'avakian'
+ record_avakian['first_name'] = 'sandrine'
+ record_avakian['mail'] = 'sandrine.avakian@inria.fr'
+ pubkey = "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAwSUkJ+cr3xM47h8lFkIXJoJhg4wHakTaLJmgTXkzvUmQsQeFB2MjUZ6WAelMXj/EFz2+XkK+bcWNXwfbrLptJQ+XwGpPZlu9YV/kzO63ghVrAyEg0+p7Pn1TO9f1ZYg4R6JfP/3qwH1AsE+X3PNpIewsuEIKwd2wUCJDf5RXJTpl39GizcBFemrRqgs0bdqAN/vUT9YvtWn8fCYR5EfJHVXOK8P1KmnbuGZpk7ryz21pDMlgw13+8aYB+LPkxdv5zG54A5c6o9N3zOCblvRFWaNBqathS8y04cOYWPmyu+Q0Xccwi7vM3Ktm8RoJw+raQNwsmneJOm6KXKnjoOQeiQ== savakian@sfa2.grenoble.senslab.info"
+ ret = ldap.LdapModifyUser(record_avakian, {'sshPublicKey':pubkey})
+ print "\r\n Sandrine \tChange pubkey LdapModifyUser ", ret
+
+ #record_myslice = {}
+ #record_myslice['hrn']= 'senslab2.myslice'
+ #record_myslice['last_name'] = 'myslice'
+ #record_myslice['first_name'] = 'myslice'
+ #record_myslice['mail'] = 'nturro@inria.fr'
+ #pubkeymyslice = "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAuyRPwn8PZxjdhu+ciRuPyM0eVBn7XS7i3tym9F30UVhaCd09a/UEmGn7WJZdfsxV3hXqG1Wc766FEst97NuzHzELSuvy/rT96J0UHG4wae4pnzOLd6NwFdZh7pkPsgHMHxK9ALVE68Puu+EDSOB5bBZ9Q624wCIGxEpmuS/+X+dDBTKgG5Hi0WA1uKJwhLSbbXb38auh4FlYgXPsdpljTIJatt+zGL0Zsy6fdrsVRc5W8kr3/SmE4OMNyabKBNyxioSEuYhRSjoQAHnYoevEjZniP8IzscKK7qwelzGUfnJEzexikhsQamhAFti2ReiFfoHBRZxnSc49ioH7Kaci5w== root@rhoecos3.ipv6.lip6.fr"
+
+ #pubkeytestuser = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDYS8tzufciTm6GdNUGHQc64OfTxFebMYUwh/Jl04IPTvjjr26uakbM0M2v33HxZ5Q7PnmPN9pB/w+a+f7a7J4cNs/tApOMg2hb6UrLaOrdnDMOs4KZlfElyDsF3Zx5QwxPYvzsKADAbDVoX4NF9PttuDLdm2l3nLSvm89jfla00GBg+K8grdOCHyYZVX/Wt7kxhXDK3AidQhKJgn+iD5GxvtWMBE+7S5kJGdRW1W10lSLBW3+VNsCrKJB2s8L55Xz/l2HNBScU7T0VcMQJrFxEXKzLPagZsMz0lfLzHESoGHIZ3Tz85DfECbTtMxLts/4KoAEc3EE+PYr2VDeAggDx testuser@myslice"
+
+
+
+ #password = "ReptileFight"
+ #enc = ldap.encrypt_password(password)
+ #print "\r\n sandrine \tencrypt_password ", enc
+
+ #ret = ldap.LdapModifyUser(record_avakian, {'userPassword':enc})
+ #print "\r\n sandrine \tChange password LdapModifyUser ", ret
+ return
+
+
+def get_stuff(oar, uri):
+ import httplib
+ import json
+ headers = {}
+ data = json.dumps({})
+
+ headers['X-REMOTE_IDENT'] = 'avakian'
+
+ headers['content-length'] = '0' #seems that it does not work if we don't add this
+
+
+ conn = httplib.HTTPConnection(oar.oarserver['ip'],oar.oarserver['port'])
+ conn.request("GET",uri,data , headers )
+ resp = ( conn.getresponse()).read()
+ #logger.debug("OARrestapi: \t GETRequestToOARRestAPI resp %s" %( resp))
+ conn.close()
+
+
+ js = json.loads(resp)
+ return js
+
+
+
+
+def TestOAR(job_id = None):
+
+ if isinstance(job_id,list) and len(job_id) == 1:
+ job_id = job_id[0]
+
+ if job_id is None :
+ job_id = 1
+
+ oar = OARrestapi()
+ jobs = oar.parser.SendRequest("GET_reserved_nodes", username = 'avakian')
+ print "\r\n OAR GET_reserved_nodes ",jobs
+
+
+
+ jobs = oar.parser.SendRequest("GET_jobs")
+ print "\r\n OAR GET_jobs ",jobs
+
+
+ jobs = oar.parser.SendRequest("GET_jobs_id", job_id, 'avakian')
+ print "\r\n OAR GET_jobs_id ",jobs
+
+ uri = '/oarapi/jobs/details.json?state=Running,Waiting,Launching&user=avakian'
+ raw_json = get_stuff(oar,uri)
+ print "\r\nOAR ", uri, raw_json, "\r\n KKK \t",raw_json.keys()
+
+ uri = '/oarapi/jobs/' + job_id +'.json'
+ raw_json = get_stuff(oar,uri)
+ print "\r\n OAR ",uri,raw_json, "\r\n KKK \t",raw_json.keys()
+
+ uri = '/oarapi/jobs/' + job_id + '/resources.json'
+ raw_json = get_stuff(oar,uri)
+ print "\r\n OAR ",uri, raw_json, "\r\n KKK \t",raw_json.keys()
+
+ time_format = "%Y-%m-%d %H:%M:%S"
+
+ server_timestamp,server_tz = oar.parser.SendRequest("GET_timezone")
+
+ print "\r\n OAR GetTimezone ",server_timestamp, server_tz
+ print(datetime.fromtimestamp(int(server_timestamp)).strftime('%Y-%m-%d %H:%M:%S'))
+
+ uri = '/oarapi/resources/full.json'
+ raw_json = get_stuff(oar,uri)
+ print "\r\n OAR ",uri, raw_json, "\r\n KKK \t",raw_json.keys()
+
+ uri = '/oarapi/jobs.json?user=avakian'
+ raw_json = get_stuff(oar,uri)
+ print "\r\nOAR ", uri, raw_json, "\r\n KKK \t",raw_json.keys()
+ return
+
+def TestSlabDriver(job_id = '1'):
+ if isinstance(job_id,list) and len(job_id) == 1:
+ job_id = job_id[0]
+ slabdriver = SlabDriver(Config())
+ nodes = slabdriver.GetReservedNodes(username='avakian')
+ print "\r\n \r\n" ,nodes
+
+ l = slabdriver.GetSlices(slice_filter = '269', slice_filter_type = 'record_id_user')
+
+
+ print "\r\n \r\nGetSlices" ,l
+
+ persons = slabdriver.GetPersons()
+ print "\r\n \r\n GetPersons" ,persons
+ #slabdriver.DeleteJobs(job_id,'senslab2.avakian_slice')
+
+
+def TestSfi(arg = None):
+ import os
+ print " ================= SFI.PY RESOURCES ============="
+ listing = os.system("sfi.py list senslab2")
+
+ print
+ resources = os.system("sfi.py resources")
+
+ print
+ slab = os.system("sfi.py resources -r slab")
+
+ print
+ resourcesall = os.system("sfi.py resources -l all")
+
+
+ print "================= SFI.PY RESOURCES -R SLAB -L ALL ============="
+ slaball = os.system("sfi.py resources -r slab -l all")
+ filename = "/home/savakian/flab-sfa/avakian_adv.rspec"
+ rspecfile = open(filename,"w")
+ r = os.popen("sfi.py resources -l all")
+ for i in r.readlines():
+ rspecfile.write(i)
+ rspecfile.close()
+
+ print " ================= SFI.PY SHOW SLICE ============="
+ slices_rec = os.system("sfi.py resources senslab2.avakian_slice")
+
+ print " ================= SFI.PY SHOW USER ============="
+ show_slice = os.system("sfi.py show senslab2.avakian_slice")
+
+ print " ================= SFI.PY SHOW NODE ============="
+ show = os.system("sfi.py show senslab2.avakian")
+
+ print " ================= SFI.PY SLICES ============="
+ show_node = os.system("sfi.py show senslab2.node67.grenoble.senslab.info")
+
+ print " ================= SFI.PY LIST SLICE ============="
+ slices = os.system("sfi.py slices")
+
+ print " ================= SFI.PY STATUS SLICE ============="
+ status_slice = os.system("sfi.py status senslab2.avakian_slice")
+
+ print " ================= SFI.PY DELETE SLICE ============="
+ status_slice = os.system("sfi.py delete senslab2.avakian_slice")
+
+ print " ================= SFI.PY CREATE SLICE ============="
+ create = os.system("sfi.py create senslab2.avakian_slice /home/savakian/flab-sfa/rspec_sfa.rspec")
+
+
+def RunAll():
+ TestLdap()
+ TestOAR()
+
+
+supported_options = {
+ 'OAR' : TestOAR,
+ 'LDAP': TestLdap,
+ 'driver': TestSlabDriver,
+ 'sfi':TestSfi,
+ 'all' : RunAll }
+
+def main():
+ opts = parse_options()
+ print opts
+ for opt in opts:
+ supported_options[opt](opts[opt])
+
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
--- /dev/null
+#!/bin/bash
+if (( ! $# == 2 ))
+then
+ echo " Usage : bash_test takes 2 arguments : one jobid and one of the following:"
+ echo " LDAP/ OAR / driver "
+ echo $#
+ exit
+fi
+
+sfi.py list senslab2
+echo " ================= SFI.PY RESOURCES ============="
+sfi.py resources
+
+echo " ================= SFI.PY RESOURCES -R SLAB ============="
+sfi.py resources -r slab
+
+echo " ================= SFI.PY RESOURCES -L ALL ============="
+sfi.py resources -l all
+
+echo " ================= SFI.PY RESOURCES -R SLAB -L ALL ============="
+sfi.py resources -r slab -l all
+
+echo " ================= SFI.PY RESOURCES -L ALL > avakian_adv.rspec ============="
+sfi.py resources -l all > /home/savakian/flab-sfa/avakian_adv.rspec
+
+echo " ================= SFI.PY RESOURCES avakian_adv.rspec ============="
+sfi.py resources senslab2.avakian_slice
+
+
+echo " ================= SFI.PY SHOW SLICE ============="
+sfi.py show senslab2.avakian_slice
+
+echo " ================= SFI.PY SHOW USER ============="
+sfi.py show senslab2.avakian
+
+echo " ================= SFI.PY SHOW NODE ============="
+sfi.py show senslab2.node67.grenoble.senslab.info
+
+echo " ================= SFI.PY SLICES ============="
+sfi.py slices
+
+echo " ================= SFI.PY STATUS SLICE ============="
+sfi.py status senslab2.avakian_slice
+
+echo " ================= SFI.PY CREATE SLICE ============="
+sfi.py create senslab2.avakian_slice /home/savakian/flab-sfa/avakian_adv.rspec
+
+# echo " ================= SFI.PY DELETE SLICE ============="
+# sfi.py delete senslab2.avakian_slice
+
+echo "\r\n"
+echo " PYTHON TEST ", $1, $2
peer_gids = []
if not new_hrns:
return
-
trusted_certs_dir = api.config.get_trustedroots_dir()
- for new_hrn in new_hrns:
+ for new_hrn in new_hrns:
if not new_hrn: continue
# the gid for this interface should already be installed
if new_hrn == api.config.SFA_INTERFACE_HRN: continue
if 'sfa' not in server_version:
logger.info("get_trusted_certs: skipping non sfa aggregate: %s" % new_hrn)
continue
-
trusted_gids = ReturnValue.get_value(interface.get_trusted_certs())
if trusted_gids:
# the gid we want should be the first one in the list,
hierarchy = Hierarchy()
auth_info = hierarchy.get_interface_auth_info()
server_key_file = auth_info.get_privkey_filename()
- server_cert_file = auth_info.get_gid_filename()
-
+ server_cert_file = auth_info.get_gid_filename()
# ensure interface cert is present in trusted roots dir
trusted_roots = TrustedRoots(config.get_trustedroots_dir())
trusted_roots.add_gid(GID(filename=server_cert_file))
self.client_cred = Credential(string = cred)
self.client_gid = self.client_cred.get_gid_caller()
self.object_gid = self.client_cred.get_gid_object()
-
# make sure the client_gid is not blank
if not self.client_gid:
raise MissingCallerGID(self.client_cred.get_subject())
self.verifyPeerCert(self.peer_cert, self.client_gid)
# make sure the client is allowed to perform the operation
- if operation:
+ if operation:
if not self.client_cred.can_perform(operation):
raise InsufficientRights(operation)
if self.trusted_cert_list:
self.client_cred.verify(self.trusted_cert_file_list, self.config.SFA_CREDENTIAL_SCHEMA)
+
else:
raise MissingTrustedRoots(self.config.get_trustedroots_dir())
# This check does not apply to trusted peers
trusted_peers = [gid.get_hrn() for gid in self.trusted_cert_list]
if hrn and self.client_gid.get_hrn() not in trusted_peers:
+
target_hrn = self.object_gid.get_hrn()
if not hrn == target_hrn:
raise PermissionError("Target hrn: %s doesn't match specified hrn: %s " % \
@param name human readable name to test
"""
object_hrn = self.object_gid.get_hrn()
- if object_hrn == name:
- return
- if name.startswith(object_hrn + "."):
+ #strname = str(name).strip("['']")
+ if object_hrn == name:
+ #if object_hrn == strname:
+ return
+ if name.startswith(object_hrn + ".") :
+ #if strname.startswith((object_hrn + ".")) is True:
return
#if name.startswith(get_authority(name)):
#return
-
+
raise PermissionError(name)
def determine_user_rights(self, caller_hrn, reg_record):
# sfa should not depend on sfatables
# if the sfatables.runtime import fails, just define run_sfatables as identity
-
+import sys
try:
from sfatables.runtime import SFATablesRules
"""
if not context_callback:
context_callback = fetch_context
-
+
chain = chain.upper()
rules = SFATablesRules(chain)
if rules.sorted_rule_list:
#!/usr/bin/python
+# just checking write access on repo
import sys
import unittest