import subprocess
from datetime import datetime
-from time import gmtime
from sfa.util.faults import SliverDoesNotExist, UnknownSfaType
from sfa.util.sfalogging import logger
from sfa.rspecs.version_manager import VersionManager
from sfa.rspecs.rspec import RSpec
-from sfa.util.xrn import hrn_to_urn, urn_to_sliver_id, get_leaf
+from sfa.util.xrn import hrn_to_urn
## thierry: everything that is API-related (i.e. handling incoming requests)
from sfa.senslab.OARrestapi import OARrestapi
from sfa.senslab.LDAPapi import LDAPapi
-from sfa.senslab.slabpostgres import SlabDB, slab_dbsession, SliceSenslab, \
- JobSenslab
+from sfa.senslab.slabpostgres import SlabDB, slab_dbsession, SliceSenslab
+
from sfa.senslab.slabaggregate import SlabAggregate, slab_xrn_to_hostname, \
slab_xrn_object
from sfa.senslab.slabslices import SlabSlices
# GetNodes or GetSites sorts of calls directly
# and thus minimize the differences in the managers with the pl version
class SlabDriver(Driver):
-
+ """ Senslab Driver class inherited from Driver generic class.
+
+ Contains methods compliant with the SFA standard and the testbed
+ infrastructure (calls to LDAP and OAR).
+ """
def __init__(self, config):
Driver.__init__ (self, config)
self.config = config
self.hrn = config.SFA_INTERFACE_HRN
-
self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
-
self.oar = OARrestapi()
self.ldap = LDAPapi()
self.time_format = "%Y-%m-%d %H:%M:%S"
- self.db = SlabDB(config,debug = True)
+ self.db = SlabDB(config, debug = True)
self.cache = None
"""
#First get the slice with the slice hrn
- sl = self.GetSlices(slice_filter = slice_hrn, \
+ slice_list = self.GetSlices(slice_filter = slice_hrn, \
slice_filter_type = 'slice_hrn')
- if len(sl) is 0:
+ if len(slice_list) is 0:
raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
- if isinstance(sl,list):
- sl = sl[0]
-
-
- top_level_status = 'unknown'
- nodes_in_slice = sl['node_ids']
+ #Slice has the same slice hrn for each slice in the slice/lease list
+ #So fetch the info on the user once
+ one_slice = slice_list[0]
recuser = dbsession.query(RegRecord).filter_by(record_id = \
- sl['record_id_user']).first()
- sl.update({'user':recuser.hrn})
- if len(nodes_in_slice) is 0:
- raise SliverDoesNotExist("No slivers allocated ")
- else:
- top_level_status = 'ready'
+ one_slice['record_id_user']).first()
- logger.debug("Slabdriver - sliver_status Sliver status urn %s hrn %s sl\
- %s \r\n " %(slice_urn, slice_hrn, sl))
-
- if sl['oar_job_id'] is not []:
- #A job is running on Senslab for this slice
- # report about the local nodes that are in the slice only
-
- nodes_all = self.GetNodes({'hostname':nodes_in_slice},
- ['node_id', 'hostname','site','boot_state'])
- nodeall_byhostname = dict([(n['hostname'], n) for n in nodes_all])
+ #Make a list of all the nodes hostnames in use for this slice
+ slice_nodes_list = []
+ for sl in slice_list:
+ for node in sl['node_ids']:
+ slice_nodes_list.append(node['hostname'])
+ #Get all the corresponding nodes details
+ nodes_all = self.GetNodes({'hostname':slice_nodes_list},
+ ['node_id', 'hostname','site','boot_state'])
+ nodeall_byhostname = dict([(n['hostname'], n) for n in nodes_all])
+
+
+
+ for sl in slice_list:
+ #For compatibility
+ top_level_status = 'empty'
result = {}
+ result.fromkeys(['geni_urn','pl_login','geni_status','geni_resources'],None)
+ result['pl_login'] = recuser.hrn
+ logger.debug("Slabdriver - sliver_status Sliver status urn %s hrn %s sl\
+ %s \r\n " %(slice_urn, slice_hrn, sl))
+ try:
+ nodes_in_slice = sl['node_ids']
+ except KeyError:
+ #No job in the slice
+ result['geni_status'] = top_level_status
+ result['geni_resources'] = []
+ return result
+
+ top_level_status = 'ready'
+
+ #A job is running on Senslab for this slice
+ # report about the local nodes that are in the slice only
+
result['geni_urn'] = slice_urn
- result['pl_login'] = sl['user'] #For compatibility
+
#timestamp = float(sl['startTime']) + float(sl['walltime'])
#result['pl_expires'] = strftime(self.time_format, \
#gmtime(float(timestamp)))
#result['slab_expires'] = strftime(self.time_format,\
- #gmtime(float(timestamp)))
+ #gmtime(float(timestamp)))
resources = []
- for node in nodeall_byhostname:
+ for node in sl['node_ids']:
res = {}
#res['slab_hostname'] = node['hostname']
#res['slab_boot_state'] = node['boot_state']
- res['pl_hostname'] = nodeall_byhostname[node]['hostname']
- res['pl_boot_state'] = nodeall_byhostname[node]['boot_state']
+ res['pl_hostname'] = node['hostname']
+ res['pl_boot_state'] = nodeall_byhostname[node['hostname']]['boot_state']
#res['pl_last_contact'] = strftime(self.time_format, \
#gmtime(float(timestamp)))
- sliver_id = urn_to_sliver_id(slice_urn, sl['record_id_slice'], \
- nodeall_byhostname[node]['node_id'])
+ sliver_id = Xrn(slice_urn, type='slice', \
+ id=nodeall_byhostname[node['hostname']]['node_id'], \
+ authority=self.hrn).urn
+
res['geni_urn'] = sliver_id
- if nodeall_byhostname[node]['boot_state'] == 'Alive':
+ if nodeall_byhostname[node['hostname']]['boot_state'] == 'Alive':
res['geni_status'] = 'ready'
else:
result['geni_status'] = top_level_status
result['geni_resources'] = resources
logger.debug("SLABDRIVER \tsliver_statusresources %s res %s "\
- %(resources,res))
+ %(resources,res))
return result
-
-
- def synchronize_oar_and_slice_table(self, slice_hrn = None):
- #Get list of leases
- oar_leases_list = self.GetReservedNodes()
-
- logger.debug("SLABDRIVER \tsynchronize_oar_and_slice_table :\
- oar_leases_list %s\r\n" %( oar_leases_list))
- #Get list of slices/leases . multiple entry per user depending
- #on number of jobs
- #At this point we don't have the slice_hrn so that's why
- #we are calling Getslices, which holds a field with slice_hrn
-
- if slice_hrn :
- sfa_slices_list = self.GetSlices(slice_filter = slice_hrn, \
- slice_filter_type = 'slice_hrn')
- self.synchronize_oar_and_slice_table_for_slice_hrn(slice_hrn, \
- oar_leases_list, sfa_slices_list)
- else :
- sfa_slices_list = self.GetSlices()
-
- sfa_slices_dict_by_slice_hrn = {}
- for sfa_slice in sfa_slices_list:
- if sfa_slice['slice_hrn'] not in sfa_slices_dict_by_slice_hrn:
- sfa_slices_dict_by_slice_hrn[sfa_slice['slice_hrn']] = []
-
- sfa_slices_dict_by_slice_hrn[sfa_slice['slice_hrn']].\
- append(sfa_slice)
-
-
- for slice_hrn in sfa_slices_dict_by_slice_hrn:
- list_slices_sfa = sfa_slices_dict_by_slice_hrn[slice_hrn]
- self.synchronize_oar_and_slice_table_for_slice_hrn(slice_hrn, \
- oar_leases_list, list_slices_sfa)
-
- return
-
-
- def synchronize_oar_and_slice_table_for_slice_hrn(self,slice_hrn, \
- oar_leases_list, sfa_slices_list):
-
- #Get list of slices/leases .
- #multiple entry per user depending on number of jobs
-
- sfa_slices_dict = {}
- oar_leases_dict = {}
- login = slice_hrn.split(".")[1].split("_")[0]
-
- #Create dictionnaries based on the tuple user login/ job id
- #for the leases list and the slices list
-
- for sl in sfa_slices_list:
- if sl['oar_job_id'] != [] :
- #one entry in the dictionnary for each jobid/login, one login
- #can have multiple jobs running
- for oar_jobid in sl['oar_job_id']:
- if (login, oar_jobid) not in sfa_slices_dict:
- sfa_slices_dict[(login,oar_jobid)] = sl
-
- for lease in oar_leases_list:
- if (lease['user'], lease['lease_id']) not in oar_leases_dict:
- oar_leases_dict[(lease['user'], lease['lease_id'])] = lease
-
- #Find missing entries in the sfa slices list dict by comparing
- #the keys in both dictionnaries
- #Add the missing entries in the slice sneslab table
-
- for lease in oar_leases_dict :
- if lease not in sfa_slices_dict and login == lease[0]:
- #if lease in GetReservedNodes not in GetSlices
- #and the login of the job running matches then update the db
- #for this login
- #First get the list of nodes hostnames for this job
- oar_reserved_nodes_listdict = \
- oar_leases_dict[lease]['reserved_nodes']
- oar_reserved_nodes_list = []
- for node_dict in oar_reserved_nodes_listdict:
- oar_reserved_nodes_list.append(node_dict['hostname'])
- #And update the db with slice hrn, job id and node list
- self.db.add_job(slice_hrn, lease[1], oar_reserved_nodes_list)
-
- for lease in sfa_slices_dict:
- #Job is now terminated or in Error,
- #either way ot is not going to run again
- #Remove it from the db
- if lease not in oar_leases_dict:
- self.db.delete_job( slice_hrn, lease[1])
- return
def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, \
users, options):
# parse rspec
rspec = RSpec(rspec_string)
- logger.debug("SLABDRIVER.PY \tcreate_sliver \trspec.version %s " \
- %(rspec.version))
+ logger.debug("SLABDRIVER.PY \t create_sliver \tr spec.version %s slice_record %s " \
+ %(rspec.version,slice_record))
- self.synchronize_oar_and_slice_table(slice_hrn)
+ #self.synchronize_oar_and_slice_table(slice_hrn)
# ensure site record exists?
# ensure slice record exists
#Removed options to verify_slice SA 14/08/12
if not sfa_slice_list:
return 1
- sfa_slice = sfa_slice_list[0]
+ #Delete all in the slice
+ for sfa_slice in sfa_slice_list:
+
- logger.debug("SLABDRIVER.PY delete_sliver slice %s" %(sfa_slice))
- slices = SlabSlices(self)
- # determine if this is a peer slice
-
- peer = slices.get_peer(slice_hrn)
- #TODO delete_sliver SA : UnBindObjectFromPeer should be
- #used when there is another
- #senslab testbed, which is not the case 14/08/12 .
+ logger.debug("SLABDRIVER.PY delete_sliver slice %s" %(sfa_slice))
+ slices = SlabSlices(self)
+ # determine if this is a peer slice
- logger.debug("SLABDRIVER.PY delete_sliver peer %s" %(peer))
- try:
- if peer:
- self.UnBindObjectFromPeer('slice', \
- sfa_slice['record_id_slice'], peer,None)
- self.DeleteSliceFromNodes(sfa_slice)
- finally:
- if peer:
- self.BindObjectToPeer('slice', sfa_slice['record_id_slice'], \
- peer, sfa_slice['peer_slice_id'])
- return 1
+ peer = slices.get_peer(slice_hrn)
+ #TODO delete_sliver SA : UnBindObjectFromPeer should be
+ #used when there is another
+ #senslab testbed, which is not the case 14/08/12 .
+
+ logger.debug("SLABDRIVER.PY delete_sliver peer %s" %(peer))
+ try:
+ if peer:
+ self.UnBindObjectFromPeer('slice', \
+ sfa_slice['record_id_slice'], peer,None)
+ self.DeleteSliceFromNodes(sfa_slice)
+ finally:
+ if peer:
+ self.BindObjectToPeer('slice', sfa_slice['record_id_slice'], \
+ peer, sfa_slice['peer_slice_id'])
+ return 1
def AddSlice(self, slice_record):
return slice_urns
- #No site or node register supported
+
def register (self, sfa_record, hrn, pub_key):
- record_type = sfa_record['type']
- slab_record = self.sfa_fields_to_slab_fields(record_type, hrn, \
- sfa_record)
-
-
- if record_type == 'slice':
- acceptable_fields = ['url', 'instantiation', 'name', 'description']
- for key in slab_record.keys():
- if key not in acceptable_fields:
- slab_record.pop(key)
- logger.debug("SLABDRIVER.PY register")
- slices = self.GetSlices(slice_filter =slab_record['hrn'], \
- slice_filter_type = 'slice_hrn')
- if not slices:
- pointer = self.AddSlice(slab_record)
- else:
- pointer = slices[0]['slice_id']
-
- elif record_type == 'user':
- persons = self.GetPersons([sfa_record])
- #persons = self.GetPersons([sfa_record['hrn']])
- if not persons:
- pointer = self.AddPerson(dict(sfa_record))
- #add in LDAP
- else:
- pointer = persons[0]['person_id']
-
- #Does this make sense to senslab ?
- #if 'enabled' in sfa_record and sfa_record['enabled']:
- #self.UpdatePerson(pointer, \
- #{'enabled': sfa_record['enabled']})
-
- #TODO register Change this AddPersonToSite stuff 05/07/2012 SA
- # add this person to the site only if
- # she is being added for the first
- # time by sfa and doesnt already exist in plc
- if not persons or not persons[0]['site_ids']:
- login_base = get_leaf(sfa_record['authority'])
- self.AddPersonToSite(pointer, login_base)
-
- # What roles should this user have?
- #TODO : DElete this AddRoleToPerson 04/07/2012 SA
- #Function prototype is :
- #AddRoleToPerson(self, auth, role_id_or_name, person_id_or_email)
- #what's the pointer doing here?
- self.AddRoleToPerson('user', pointer)
- # Add the user's key
- if pub_key:
- self.AddPersonKey(pointer, {'key_type' : 'ssh', \
- 'key' : pub_key})
-
- #No node adding outside OAR
-
- return pointer
+ """
+ Adding new user, slice, node or site should not be handled
+ by SFA.
+
+ Adding nodes = OAR
+ Adding users = LDAP Senslab
+ Adding slice = Import from LDAP users
+ Adding site = OAR
+ """
+ return -1
#No site or node record update allowed
def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
reqdict['method'] = "delete"
reqdict['strval'] = str(job_id)
- self.db.delete_job(slice_hrn, job_id)
+
answer = self.oar.POSTRequestToOARRestAPI('DELETE_jobs_id', \
reqdict,username)
logger.debug("SLABDRIVER \tDeleteJobs jobid %s \r\n answer %s \
#hostname_list.append(oar_id_node_dict[resource_id]['hostname'])
return hostname_dict_list
- def GetReservedNodes(self):
+ def GetReservedNodes(self,username = None):
#Get the nodes in use and the reserved nodes
reservation_dict_list = \
- self.oar.parser.SendRequest("GET_reserved_nodes")
+ self.oar.parser.SendRequest("GET_reserved_nodes", username = username)
for resa in reservation_dict_list:
return return_site_list
- #warning return_fields_list paramr emoved (Not used)
+
+
+
def GetSlices(self, slice_filter = None, slice_filter_type = None):
#def GetSlices(self, slice_filter = None, slice_filter_type = None, \
#return_fields_list = None):
slicerec = {}
slicerec_dict = {}
authorized_filter_types_list = ['slice_hrn', 'record_id_user']
+ slicerec_dictlist = []
+
if slice_filter_type in authorized_filter_types_list:
- #Get list of slices based on the slice hrn
- if slice_filter_type == 'slice_hrn':
-
- login = slice_filter.split(".")[1].split("_")[0]
-
- #DO NOT USE RegSlice - reg_researchers to get the hrn of the user
- #otherwise will mess up the RegRecord in Resolve, don't know
- #why - SA 08/08/2012
-
- #Only one entry for one user = one slice in slice_senslab table
- slicerec = slab_dbsession.query(SliceSenslab).filter_by(slice_hrn = slice_filter).first()
-
- #Get slice based on user id
- if slice_filter_type == 'record_id_user':
- slicerec = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = slice_filter).first()
-
- if slicerec is None:
- return []
- #slicerec_dictlist = []
- slicerec_dict = slicerec.dump_sqlalchemyobj_to_dict()
- if login is None :
- login = slicerec_dict['slice_hrn'].split(".")[1].split("_")[0]
-
- #for record in slicerec:
- #slicerec_dictlist.append(record.dump_sqlalchemyobj_to_dict())
- #if login is None :
- #login = slicerec_dictlist[0]['slice_hrn'].split(".")[1].split("_")[0]
-
- #One slice can have multiple jobs
- sqljob_list = slab_dbsession.query(JobSenslab).filter_by( slice_hrn=slicerec_dict['slice_hrn']).all()
- job_list = []
- for job in sqljob_list:
- job_list.append(job.dump_sqlalchemyobj_to_dict())
+
+ def __get_slice_records(slice_filter = None, slice_filter_type = None):
+
+ login = None
+ #Get list of slices based on the slice hrn
+ if slice_filter_type == 'slice_hrn':
+
+ login = slice_filter.split(".")[1].split("_")[0]
+
+ #DO NOT USE RegSlice - reg_researchers to get the hrn of the user
+ #otherwise will mess up the RegRecord in Resolve, don't know
+ #why - SA 08/08/2012
+
+ #Only one entry for one user = one slice in slice_senslab table
+ slicerec = slab_dbsession.query(SliceSenslab).filter_by(slice_hrn = slice_filter).first()
+
+ #Get slice based on user id
+ if slice_filter_type == 'record_id_user':
+ slicerec = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = slice_filter).first()
+
+ if slicerec is None:
+ return login, []
+ else:
+ fixed_slicerec_dict = slicerec.dump_sqlalchemyobj_to_dict()
+
+ if login is None :
+ login = fixed_slicerec_dict['slice_hrn'].split(".")[1].split("_")[0]
+ return login, fixed_slicerec_dict
- logger.debug("\r\n SLABDRIVER \tGetSlices login %s \
+
+
+
+ login, fixed_slicerec_dict = __get_slice_records(slice_filter, slice_filter_type)
+ logger.debug(" SLABDRIVER \tGetSlices login %s \
slice record %s" \
- %(login, slicerec_dict))
+ %(login, fixed_slicerec_dict))
+
- #Several jobs for one slice
- slicerec_dict['oar_job_id'] = []
- for job in job_list :
- #if slicerec_dict['oar_job_id'] is not -1:
+
+ #One slice can have multiple jobs
+
+ leases_list = self.GetReservedNodes(username = login)
+ #If no job is running or no job scheduled
+ if leases_list == [] :
+ return [fixed_slicerec_dict]
+
+ #Several jobs for one slice
+ for lease in leases_list :
+ slicerec_dict = {}
+
+
#Check with OAR the status of the job if a job id is in
#the slice record
- rslt = self.GetJobsResources(job['oar_job_id'], \
- username = login)
- logger.debug("SLABDRIVER.PY \tGetSlices rslt fromn GetJobsResources %s"\
- %(rslt))
- if rslt :
- slicerec_dict['oar_job_id'].append(job['oar_job_id'])
- slicerec_dict.update(rslt)
- slicerec_dict.update({'hrn':\
- str(slicerec_dict['slice_hrn'])})
- #If GetJobsResources is empty, this means the job is
- #now in the 'Terminated' state
- #Update the slice record
- else :
- self.db.delete_job(slice_filter, job['oar_job_id'])
- slicerec_dict.\
- update({'hrn':str(slicerec_dict['slice_hrn'])})
-
- try:
- slicerec_dict['node_ids'] = job['node_list']
- except KeyError:
- pass
- logger.debug("SLABDRIVER.PY \tGetSlices RETURN slicerec_dict %s"\
- %(slicerec_dict))
-
- return [slicerec_dict]
+ slicerec_dict['oar_job_id'] = lease['lease_id']
+ slicerec_dict.update({'node_ids':lease['reserved_nodes']})
+ slicerec_dict.update(fixed_slicerec_dict)
+ slicerec_dict.update({'hrn':\
+ str(fixed_slicerec_dict['slice_hrn'])})
+
+
+ slicerec_dictlist.append(slicerec_dict)
+ logger.debug("SLABDRIVER.PY \tGetSlices slicerec_dict %s slicerec_dictlist %s" %(slicerec_dict, slicerec_dictlist))
+
+ logger.debug("SLABDRIVER.PY \tGetSlices RETURN slicerec_dictlist %s"\
+ %(slicerec_dictlist))
+
+ return slicerec_dictlist
+
else:
+
slice_list = slab_dbsession.query(SliceSenslab).all()
- sqljob_list = slab_dbsession.query(JobSenslab).all()
+ leases_list = self.GetReservedNodes()
- job_list = []
- for job in sqljob_list:
- job_list.append(job.dump_sqlalchemyobj_to_dict())
-
+
+ slicerec_dictlist = []
return_slice_list = []
for record in slice_list:
return_slice_list.append(record.dump_sqlalchemyobj_to_dict())
- for slicerec_dict in return_slice_list:
- slicerec_dict['oar_job_id'] = []
- for job in job_list:
- if slicerec_dict['slice_hrn'] in job:
- slicerec_dict['oar_job_id'].append(job['oar_job_id'])
-
+ for fixed_slicerec_dict in return_slice_list:
+ slicerec_dict = {}
+ owner = fixed_slicerec_dict['slice_hrn'].split(".")[1].split("_")[0]
+ for lease in leases_list:
+ if owner == lease['user']:
+ slicerec_dict['oar_job_id'] = lease['lease_id']
+ slicerec_dict.update({'node_ids':lease['reserved_nodes']})
+ slicerec_dict.update(fixed_slicerec_dict)
+ slicerec_dict.update({'hrn':\
+ str(fixed_slicerec_dict['slice_hrn'])})
+ slicerec_dictlist.append(slicerec_dict)
+
logger.debug("SLABDRIVER.PY \tGetSlices RETURN slices %s \
slice_filter %s " %(return_slice_list, slice_filter))
#return_slice_list = parse_filter(sliceslist, \
#slice_filter,'slice', return_fields_list)
- return return_slice_list
-
-
-
+ return slicerec_dictlist
def testbed_name (self): return self.hrn
if jobid :
logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid %s \
added_nodes %s slice_user %s" %(jobid, added_nodes, slice_user))
- self.db.add_job( slice_name, jobid, added_nodes)
+
__configure_experiment(jobid, added_nodes)
__launch_senslab_experiment(jobid)
#Delete the jobs from job_senslab table
def DeleteSliceFromNodes(self, slice_record):
- for job_id in slice_record['oar_job_id']:
- self.DeleteJobs(job_id, slice_record['hrn'])
+ self.DeleteJobs(slice_record['oar_job_id'], slice_record['hrn'])
return
record.update({'PI':[recuser.hrn],
'researcher': [recuser.hrn],
'name':record['hrn'],
- 'oar_job_id':[rec['oar_job_id'] for rec in recslice_list],
+ 'oar_job_id':[],
'node_ids': [],
'person_ids':[recslice_list[0]['record_id_user']],
'geni_urn':'', #For client_helper.py compatibility
'keys':'', #For client_helper.py compatibility
'key_ids':''}) #For client_helper.py compatibility
+
+ try:
+ for rec in recslice_list:
+ record['oar_job_id'].append(rec['oar_job_id'])
+ except KeyError:
+ pass
- #for rec in recslice_list:
- #record['oar_job_id'].append(rec['oar_job_id'])
logger.debug( "SLABDRIVER.PY \t fill_record_info SLICE \
recslice_list %s \r\n \t RECORD %s \r\n \r\n" %(recslice_list,record))
if str(record['type']) == 'user':
'researcher': [recuser.hrn],
'name':record['hrn'],
'node_ids': [],
- 'oar_job_id': [rec['oar_job_id'] for rec in recslice_list],
- 'person_ids':[recslice_list[0]['record_id_user']]})
+ 'oar_job_id': [],
+ 'person_ids':[recslice_list[0]['record_id_user']]})
+ try:
+ for rec in recslice_list:
+ recslice['oar_job_id'].append(rec['oar_job_id'])
+ except KeyError:
+ pass
+
recslice.update({'type':'slice', \
'hrn':recslice_list[0]['slice_hrn']})
- #for rec in recslice_list:
- #recslice['oar_job_id'].append(rec['oar_job_id'])
+
#GetPersons takes [] as filters
#user_slab = self.GetPersons([{'hrn':recuser.hrn}])
return
#TODO AddPerson 04/07/2012 SA
- def AddPerson(self, auth, person_fields=None):
- """Adds a new account. Any fields specified in person_fields are used,
+ #def AddPerson(self, auth, person_fields=None):
+ def AddPerson(self, record):#TODO fixing 28/08//2012 SA
+ """Adds a new account. Any fields specified in records are used,
otherwise defaults are used.
Accounts are disabled by default. To enable an account,
use UpdatePerson().
FROM PLC API DOC
"""
- logger.warning("SLABDRIVER AddPerson EMPTY - DO NOTHING \r\n ")
+ ret = self.ldap.LdapAddUser(record)
+ logger.warning("SLABDRIVER AddPerson return code %s \r\n ", ret)
return
#TODO AddPersonToSite 04/07/2012 SA