import subprocess
+import os
from datetime import datetime
from sfa.util.faults import SliverDoesNotExist, UnknownSfaType
from sfa.util.sfalogging import logger
-
from sfa.storage.alchemy import dbsession
from sfa.storage.model import RegRecord, RegUser, RegSlice
-
-from sfa.trust.credential import Credential
+from sqlalchemy.orm import joinedload
from sfa.managers.driver import Driver
from sfa.rspecs.version_manager import VersionManager
from sfa.rspecs.rspec import RSpec
-from sfa.util.xrn import hrn_to_urn, get_authority
+from sfa.util.xrn import Xrn, hrn_to_urn, get_authority
## thierry: everything that is API-related (i.e. handling incoming requests)
from sfa.senslab.OARrestapi import OARrestapi
from sfa.senslab.LDAPapi import LDAPapi
-from sfa.senslab.slabpostgres import SlabDB, slab_dbsession, SliceSenslab
+from sfa.senslab.slabpostgres import SlabDB, slab_dbsession, SenslabXP
+
from sfa.senslab.slabaggregate import SlabAggregate, slab_xrn_to_hostname, \
slab_xrn_object
# this inheritance scheme is so that the driver object can receive
# GetNodes or GetSites sorts of calls directly
# and thus minimize the differences in the managers with the pl version
+
+
+
class SlabDriver(Driver):
""" Senslab Driver class inherited from Driver generic class.
self.oar = OARrestapi()
self.ldap = LDAPapi()
self.time_format = "%Y-%m-%d %H:%M:%S"
- self.db = SlabDB(config, debug = True)
+ self.db = SlabDB(config, debug = False)
+ self.grain = 600 # 10 mins lease
self.cache = None
if len(slice_list) is 0:
raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
- #Slice has the same slice hrn for each slice in the slice/lease list
- #So fetch the info on the user once
+ #Used for fetching the user info witch comes along the slice info
one_slice = slice_list[0]
- recuser = dbsession.query(RegRecord).filter_by(record_id = \
- one_slice['record_id_user']).first()
+
#Make a list of all the nodes hostnames in use for this slice
slice_nodes_list = []
- for single_slice in slice_list:
- for node in single_slice['node_ids']:
- slice_nodes_list.append(node['hostname'])
+ #for single_slice in slice_list:
+ #for node in single_slice['node_ids']:
+ #slice_nodes_list.append(node['hostname'])
+ for node in one_slice:
+ slice_nodes_list.append(node['hostname'])
#Get all the corresponding nodes details
nodes_all = self.GetNodes({'hostname':slice_nodes_list},
result = {}
result.fromkeys(\
['geni_urn','pl_login','geni_status','geni_resources'], None)
- result['pl_login'] = recuser.hrn
+ result['pl_login'] = one_slice['reg_researchers']['hrn']
logger.debug("Slabdriver - sliver_status Sliver status \
urn %s hrn %s single_slice %s \r\n " \
%(slice_urn, slice_hrn, single_slice))
- try:
- nodes_in_slice = single_slice['node_ids']
- except KeyError:
+
+ if 'node_ids' not in single_slice:
#No job in the slice
result['geni_status'] = top_level_status
result['geni_resources'] = []
result['geni_resources'] = resources
logger.debug("SLABDRIVER \tsliver_statusresources %s res %s "\
%(resources,res))
- return result
-
- def get_user(self, hrn):
+ return result
+
+ @staticmethod
+ def get_user( hrn):
+ #def get_user(self, hrn):
return dbsession.query(RegRecord).filter_by(hrn = hrn).first()
if users:
slice_record = users[0].get('slice_record', {})
- logger.debug("SLABDRIVER.PY \t create_sliver \t\
- slice_record %s \r\n \r\n users %s" \
- %(slice_record, users))
+ logger.debug("SLABDRIVER.PY \t ===============create_sliver \t\
+ creds %s \r\n \r\n users %s" \
+ %(creds, users))
slice_record['user'] = {'keys':users[0]['keys'], \
'email':users[0]['email'], \
'hrn':slice_record['reg-researchers'][0]}
rspec.version.get_slice_attributes()
logger.debug("SLABDRIVER.PY create_sliver slice %s " %(sfa_slice))
-
-
-
-
# add/remove slice from nodes
requested_slivers = [node.get('component_id') \
requested_lease_list = []
logger.debug("SLABDRIVER.PY \tcreate_sliver AVANTLEASE " )
- rspec_requested_leases = rspec.version.get_leases()
+
for lease in rspec.version.get_leases():
single_requested_lease = {}
logger.debug("SLABDRIVER.PY \tcreate_sliver lease %s " %(lease))
single_requested_lease['start_time'] = \
lease.get('start_time')
single_requested_lease['duration'] = lease.get('duration')
-
- requested_lease_list.append(single_requested_lease)
-
- logger.debug("SLABDRIVER.PY \tcreate_sliver APRESLEASE" )
- #dCreate dict of leases by start_time, regrouping nodes reserved
+ #Check the experiment's duration is valid before adding
+ #the lease to the requested leases list
+ duration_in_seconds = \
+ int(single_requested_lease['duration'])*60
+ if duration_in_seconds > self.GetLeaseGranularity():
+ requested_lease_list.append(single_requested_lease)
+
+ #Create dict of leases by start_time, regrouping nodes reserved
#at the same
#time, for the same amount of time = one job on OAR
requested_job_dict = {}
slices.verify_slice_leases(sfa_slice, \
requested_job_dict, peer)
- return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
+ return aggregate.get_rspec(slice_xrn=slice_urn, login=sfa_slice['login'], version=rspec.version)
def delete_sliver (self, slice_urn, slice_hrn, creds, options):
peer, sfa_slice['peer_slice_id'])
return 1
-
- def AddSlice(self, slice_record, user_record):
- """Add slice to the sfa tables and senslab table only if the user
- already exists in senslab database(user already registered in LDAP).
- There is no way to separate adding the slice to the tesbed
- and then importing it from the testbed to SFA because of
- senslab's architecture. Therefore, sfa tables are updated here.
+ @staticmethod
+ def AddSlice(slice_record, user_record):
+ """Add slice to the sfa tables. Called by verify_slice
+ during lease/sliver creation.
"""
sfa_record = RegSlice(hrn=slice_record['slice_hrn'],
dbsession.commit()
#Update the senslab table with the new slice
- slab_slice = SliceSenslab( slice_hrn = slice_record['slice_hrn'], \
- record_id_slice = sfa_record.record_id , \
- record_id_user = slice_record['record_id_user'], \
- peer_authority = slice_record['peer_authority'])
+ #slab_slice = SenslabXP( slice_hrn = slice_record['slice_hrn'], \
+ #record_id_slice = sfa_record.record_id , \
+ #record_id_user = slice_record['record_id_user'], \
+ #peer_authority = slice_record['peer_authority'])
- logger.debug("SLABDRIVER.PY \tAddSlice slice_record %s \
- slab_slice %s sfa_record %s" \
- %(slice_record,slab_slice, sfa_record))
- slab_dbsession.add(slab_slice)
- slab_dbsession.commit()
+ #logger.debug("SLABDRIVER.PY \tAddSlice slice_record %s \
+ #slab_slice %s sfa_record %s" \
+ #%(slice_record,slab_slice, sfa_record))
+ #slab_dbsession.add(slab_slice)
+ #slab_dbsession.commit()
return
# first 2 args are None in case of resource discovery
if options.get('info'):
version_string = version_string + "_" + \
options.get('info', 'default')
+
+ # Adding the list_leases option to the caching key
+ if options.get('list_leases'):
+ version_string = version_string + "_"+options.get('list_leases', 'default')
+
+ # Adding geni_available to caching key
+ if options.get('geni_available'):
+ version_string = version_string + "_" + str(options.get('geni_available'))
# look in cache first
#if cached_requested and self.cache and not slice_hrn:
#panos: passing user-defined options
aggregate = SlabAggregate(self)
- origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
- options.update({'origin_hrn':origin_hrn})
+ #origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
+ #options.update({'origin_hrn':origin_hrn})
rspec = aggregate.get_rspec(slice_xrn=slice_urn, \
version=rspec_version, options=options)
slices = self.GetSlices()
logger.debug("SLABDRIVER.PY \tlist_slices hrn %s \r\n \r\n" %(slices))
- slice_hrns = [slab_slice['slice_hrn'] for slab_slice in slices]
+ slice_hrns = [slab_slice['hrn'] for slab_slice in slices]
slice_urns = [hrn_to_urn(slice_hrn, 'slice') \
for slice_hrn in slice_hrns]
if new_key:
# must check this key against the previous one if it exists
- persons = self.GetPersons([pointer], ['key_ids'])
+ persons = self.GetPersons(['key_ids'])
person = persons[0]
keys = person['key_ids']
keys = self.GetKeys(person['key_ids'])
- #TODO clean GetPeers. 05/07/12SA
- def GetPeers (self, auth = None, peer_filter=None, return_fields_list=None):
+ #TODO clean GetPeers. 05/07/12SA
+ @staticmethod
+ def GetPeers ( auth = None, peer_filter=None, return_fields_list=None):
existing_records = {}
existing_hrns_by_types = {}
answer = self.oar.POSTRequestToOARRestAPI('DELETE_jobs_id', \
reqdict,username)
logger.debug("SLABDRIVER \tDeleteJobs jobid %s \r\n answer %s \
- username %s" %(job_id,answer, username))
+ username %s" %(job_id, answer, username))
return answer
#hostname_list.append(oar_id_node_dict[resource_id]['hostname'])
return hostname_dict_list
- def GetReservedNodes(self,username = None):
+ def GetReservedNodes(self, username = None):
#Get the nodes in use and the reserved nodes
reservation_dict_list = \
self.oar.parser.SendRequest("GET_reserved_nodes", \
return return_site_list
+ @staticmethod
+ def _sql_get_slice_info( slice_filter ):
+ #DO NOT USE RegSlice - reg_researchers to get the hrn
+ #of the user otherwise will mess up the RegRecord in
+ #Resolve, don't know why - SA 08/08/2012
+
+ #Only one entry for one user = one slice in slab_xp table
+ #slicerec = dbsession.query(RegRecord).filter_by(hrn = slice_filter).first()
+ raw_slicerec = dbsession.query(RegSlice).options(joinedload('reg_researchers')).filter_by(hrn = slice_filter).first()
+ #raw_slicerec = dbsession.query(RegRecord).filter_by(hrn = slice_filter).first()
+ if raw_slicerec:
+ #load_reg_researcher
+ #raw_slicerec.reg_researchers
+ raw_slicerec = raw_slicerec.__dict__
+ logger.debug(" SLABDRIVER \t get_slice_info slice_filter %s \
+ raw_slicerec %s"%(slice_filter, raw_slicerec))
+ slicerec = raw_slicerec
+ #only one researcher per slice so take the first one
+ #slicerec['reg_researchers'] = raw_slicerec['reg_researchers']
+ #del slicerec['reg_researchers']['_sa_instance_state']
+ return slicerec
+
+ else :
+ return None
+
+ @staticmethod
+ def _sql_get_slice_info_from_user(slice_filter ):
+ #slicerec = dbsession.query(RegRecord).filter_by(record_id = slice_filter).first()
+ raw_slicerec = dbsession.query(RegUser).options(joinedload('reg_slices_as_researcher')).filter_by(record_id = slice_filter).first()
+ #raw_slicerec = dbsession.query(RegRecord).filter_by(record_id = slice_filter).first()
+ #Put it in correct order
+ user_needed_fields = ['peer_authority', 'hrn', 'last_updated', 'classtype', 'authority', 'gid', 'record_id', 'date_created', 'type', 'email', 'pointer']
+ slice_needed_fields = ['peer_authority', 'hrn', 'last_updated', 'classtype', 'authority', 'gid', 'record_id', 'date_created', 'type', 'pointer']
+ if raw_slicerec:
+ #raw_slicerec.reg_slices_as_researcher
+ raw_slicerec = raw_slicerec.__dict__
+ slicerec = {}
+ slicerec = \
+ dict([(k, raw_slicerec['reg_slices_as_researcher'][0].__dict__[k]) \
+ for k in slice_needed_fields])
+ slicerec['reg_researchers'] = dict([(k, raw_slicerec[k]) \
+ for k in user_needed_fields])
+ #TODO Handle multiple slices for one user SA 10/12/12
+ #for now only take the first slice record associated to the rec user
+ ##slicerec = raw_slicerec['reg_slices_as_researcher'][0].__dict__
+ #del raw_slicerec['reg_slices_as_researcher']
+ #slicerec['reg_researchers'] = raw_slicerec
+ ##del slicerec['_sa_instance_state']
+
+ return slicerec
+
+ else:
+ return None
+
+ def _get_slice_records(self, slice_filter = None, \
+ slice_filter_type = None):
+
+ #login = None
+
+ #Get list of slices based on the slice hrn
+ if slice_filter_type == 'slice_hrn':
+
+ #if get_authority(slice_filter) == self.root_auth:
+ #login = slice_filter.split(".")[1].split("_")[0]
+
+ slicerec = self._sql_get_slice_info(slice_filter)
+
+ if slicerec is None:
+ return None
+ #return login, None
+
+ #Get slice based on user id
+ if slice_filter_type == 'record_id_user':
+
+ slicerec = self._sql_get_slice_info_from_user(slice_filter)
-
- def GetSlices(self, slice_filter = None, slice_filter_type = None):
+ if slicerec:
+ fixed_slicerec_dict = slicerec
+ #At this point if the there is no login it means
+ #record_id_user filter has been used for filtering
+ #if login is None :
+ ##If theslice record is from senslab
+ #if fixed_slicerec_dict['peer_authority'] is None:
+ #login = fixed_slicerec_dict['hrn'].split(".")[1].split("_")[0]
+ #return login, fixed_slicerec_dict
+ return fixed_slicerec_dict
+
+ def GetSlices(self, slice_filter = None, slice_filter_type = None, login=None):
""" Get the slice records from the slab db.
Returns a slice ditc if slice_filter and slice_filter_type
are specified.
specified.
"""
- login = None
+ #login = None
authorized_filter_types_list = ['slice_hrn', 'record_id_user']
return_slicerec_dictlist = []
#First try to get information on the slice based on the filter provided
if slice_filter_type in authorized_filter_types_list:
-
-
- def __get_slice_records(slice_filter = None, \
- slice_filter_type = None):
-
- login = None
- #Get list of slices based on the slice hrn
- if slice_filter_type == 'slice_hrn':
-
- login = slice_filter.split(".")[1].split("_")[0]
-
- #DO NOT USE RegSlice - reg_researchers to get the hrn
- #of the user otherwise will mess up the RegRecord in
- #Resolve, don't know why - SA 08/08/2012
-
- #Only one entry for one user = one slice in slice_senslab table
- slicerec = slab_dbsession.query(SliceSenslab).filter_by(slice_hrn = slice_filter).first()
-
- if slicerec is None:
- return login, None
-
- #Get slice based on user id
- if slice_filter_type == 'record_id_user':
- slicerec = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = slice_filter).first()
-
-
- if slicerec:
- fixed_slicerec_dict = slicerec.dump_sqlalchemyobj_to_dict()
- #At this point if the there is no login it means
- #record_id_user filter has been used for filtering
- if login is None :
- login = fixed_slicerec_dict['slice_hrn'].split(".")[1].split("_")[0]
- return login, fixed_slicerec_dict
-
-
-
- login, fixed_slicerec_dict = \
- __get_slice_records(slice_filter, slice_filter_type)
+ fixed_slicerec_dict = \
+ self._get_slice_records(slice_filter, slice_filter_type)
+ #login, fixed_slicerec_dict = \
+ #self._get_slice_records(slice_filter, slice_filter_type)
logger.debug(" SLABDRIVER \tGetSlices login %s \
slice record %s slice_filter %s slice_filter_type %s "\
%(login, fixed_slicerec_dict,slice_filter, slice_filter_type))
#Now we have the slice record fixed_slicerec_dict, get the
#jobs associated to this slice
- leases_list = self.GetReservedNodes(username = login)
-
+ #leases_list = self.GetReservedNodes(username = login)
+ leases_list = self.GetLeases(login = login)
#If no job is running or no job scheduled
#return only the slice record
if leases_list == [] and fixed_slicerec_dict:
slicerec_dict['oar_job_id'] = lease['lease_id']
slicerec_dict.update({'list_node_ids':{'hostname':reserved_list}})
slicerec_dict.update({'node_ids':lease['reserved_nodes']})
+
#Update lease dict with the slice record
if fixed_slicerec_dict:
+ fixed_slicerec_dict['oar_job_id'] = []
+ fixed_slicerec_dict['oar_job_id'].append(slicerec_dict['oar_job_id'])
slicerec_dict.update(fixed_slicerec_dict)
- slicerec_dict.update({'hrn':\
- str(fixed_slicerec_dict['slice_hrn'])})
+ #slicerec_dict.update({'hrn':\
+ #str(fixed_slicerec_dict['slice_hrn'])})
return_slicerec_dictlist.append(slicerec_dict)
else:
#Get all slices from the senslab sfa database ,
- #put them in dict format
- query_slice_list = slab_dbsession.query(SliceSenslab).all()
+ #put them in dict format
+ #query_slice_list = dbsession.query(RegRecord).all()
+ query_slice_list = dbsession.query(RegSlice).options(joinedload('reg_researchers')).all()
+ #query_slice_list = dbsession.query(RegRecord).filter_by(type='slice').all()
+ #query_slice_list = slab_dbsession.query(SenslabXP).all()
return_slicerec_dictlist = []
- for record in query_slice_list:
- return_slicerec_dictlist.append(record.dump_sqlalchemyobj_to_dict())
+ for record in query_slice_list:
+ tmp = record.__dict__
+ tmp['reg_researchers'] = tmp['reg_researchers'][0].__dict__
+ #del tmp['reg_researchers']['_sa_instance_state']
+ return_slicerec_dictlist.append(tmp)
+ #return_slicerec_dictlist.append(record.__dict__)
#Get all the jobs reserved nodes
leases_list = self.GetReservedNodes()
for fixed_slicerec_dict in return_slicerec_dictlist:
slicerec_dict = {}
- owner = fixed_slicerec_dict['slice_hrn'].split(".")[1].split("_")[0]
+ #Check if the slice belongs to a senslab user
+ if fixed_slicerec_dict['peer_authority'] is None:
+ owner = fixed_slicerec_dict['hrn'].split(".")[1].split("_")[0]
+ else:
+ owner = None
for lease in leases_list:
if owner == lease['user']:
slicerec_dict['oar_job_id'] = lease['lease_id']
slicerec_dict.update({'node_ids':lease['reserved_nodes']})
slicerec_dict.update({'list_node_ids':{'hostname':reserved_list}})
slicerec_dict.update(fixed_slicerec_dict)
- slicerec_dict.update({'hrn':\
- str(fixed_slicerec_dict['slice_hrn'])})
+ #slicerec_dict.update({'hrn':\
+ #str(fixed_slicerec_dict['slice_hrn'])})
#return_slicerec_dictlist.append(slicerec_dict)
fixed_slicerec_dict.update(slicerec_dict)
return return_slicerec_dictlist
- def testbed_name (self): return self.hrn
+ def testbed_name (self):
+ return self.hrn
# 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
def aggregate_version (self):
# @param hrn human readable name
# @param sfa_fields dictionary of SFA fields
# @param slab_fields dictionary of PLC fields (output)
-
- def sfa_fields_to_slab_fields(self, sfa_type, hrn, record):
+ @staticmethod
+ def sfa_fields_to_slab_fields(sfa_type, hrn, record):
slab_record = {}
lease_dict['slice_user'] = slice_user
lease_dict['grain'] = self.GetLeaseGranularity()
lease_dict['time_format'] = self.time_format
+
def __create_job_structure_request_for_OAR(lease_dict):
""" Creates the structure needed for a correct POST on OAR.
return reqdict
-
+ logger.debug("SLABDRIVER.PY \tLaunchExperimentOnOAR slice_user %s\
+ \r\n " %(slice_user))
#Create the request for OAR
reqdict = __create_job_structure_request_for_OAR(lease_dict)
# first step : start the OAR job and update the job
except KeyError:
logger.log_exc("SLABDRIVER \tLaunchExperimentOnOAR \
Impossible to create job %s " %(answer))
- return
+ return None
def __configure_experiment(jobid, added_nodes):
# second step : configure the experiment
# we need to store the nodes in a yaml (well...) file like this :
# [1,56,23,14,45,75] with name /tmp/sfa<jobid>.json
- job_file = open('/tmp/sfa/'+ str(jobid) + '.json', 'w')
+ tmp_dir = '/tmp/sfa/'
+ if not os.path.exists(tmp_dir):
+ os.makedirs(tmp_dir)
+ job_file = open(tmp_dir + str(jobid) + '.json', 'w')
job_file.write('[')
job_file.write(str(added_nodes[0].strip('node')))
for node in added_nodes[1:len(added_nodes)] :
__configure_experiment(jobid, added_nodes)
__launch_senslab_experiment(jobid)
- return
+ return jobid
+
def AddLeases(self, hostname_list, slice_record, \
lease_start_time, lease_duration):
%( hostname_list, slice_record , lease_start_time, \
lease_duration))
- tmp = slice_record['reg-researchers'][0].split(".")
- username = tmp[(len(tmp)-1)]
- self.LaunchExperimentOnOAR(hostname_list, slice_record['slice_hrn'], \
+ #tmp = slice_record['reg-researchers'][0].split(".")
+ username = slice_record['login']
+ #username = tmp[(len(tmp)-1)]
+ job_id = self.LaunchExperimentOnOAR(hostname_list, slice_record['hrn'], \
lease_start_time, lease_duration, username)
start_time = datetime.fromtimestamp(int(lease_start_time)).strftime(self.time_format)
+ end_time = lease_start_time + lease_duration
+
+ import logging, logging.handlers
+ from sfa.util.sfalogging import _SfaLogger
+ logger.debug("SLABDRIVER \r\n \r\n \t AddLeases TURN ON LOGGING SQL %s %s %s "%(slice_record['hrn'], job_id, end_time))
+ sql_logger = _SfaLogger(loggername = 'sqlalchemy.engine', level=logging.DEBUG)
+ logger.debug("SLABDRIVER \r\n \r\n \t AddLeases %s %s %s " %(type(slice_record['hrn']), type(job_id), type(end_time)))
+
+ slab_ex_row = SenslabXP(slice_hrn = slice_record['hrn'], \
+ job_id = job_id, end_time= end_time)
+
+ logger.debug("SLABDRIVER \r\n \r\n \t AddLeases slab_ex_row %s" \
+ %(slab_ex_row))
+ slab_dbsession.add(slab_ex_row)
+ slab_dbsession.commit()
+
logger.debug("SLABDRIVER \t AddLeases hostname_list start_time %s " %(start_time))
return
#Delete the jobs from job_senslab table
def DeleteSliceFromNodes(self, slice_record):
-
- self.DeleteJobs(slice_record['oar_job_id'], slice_record['hrn'])
+ for job_id in slice_record['oar_job_id']:
+ self.DeleteJobs(job_id, slice_record['hrn'])
return
def GetLeaseGranularity(self):
""" Returns the granularity of Senslab testbed.
OAR returns seconds for experiments duration.
- Defined in seconds. """
+ Defined in seconds.
+ Experiments which last less than 10 min are invalid"""
+
+
+ return self.grain
+
+
+ @staticmethod
+ def update_jobs_in_slabdb( job_oar_list, jobs_psql):
+ #Get all the entries in slab_xp table
+
+
+ jobs_psql = set(jobs_psql)
+ kept_jobs = set(job_oar_list).intersection(jobs_psql)
+ logger.debug ( "\r\n \t\ update_jobs_in_slabdb jobs_psql %s \r\n \t \
+ job_oar_list %s kept_jobs %s "%(jobs_psql, job_oar_list, kept_jobs))
+ deleted_jobs = set(jobs_psql).difference(kept_jobs)
+ deleted_jobs = list(deleted_jobs)
+ if len(deleted_jobs) > 0:
+ slab_dbsession.query(SenslabXP).filter(SenslabXP.job_id.in_(deleted_jobs)).delete(synchronize_session='fetch')
+ slab_dbsession.commit()
+
+ return
+
- grain = 60
- return grain
- def GetLeases(self, lease_filter_dict=None):
+ def GetLeases(self, lease_filter_dict=None, login=None):
- unfiltered_reservation_list = self.GetReservedNodes()
+ unfiltered_reservation_list = self.GetReservedNodes(login)
reservation_list = []
#Find the slice associated with this user senslab ldap uid
- logger.debug(" SLABDRIVER.PY \tGetLeases unfiltered_reservation_list %s " %(unfiltered_reservation_list))
+ logger.debug(" SLABDRIVER.PY \tGetLeases login %s\
+ unfiltered_reservation_list %s " %(login, unfiltered_reservation_list))
#Create user dict first to avoid looking several times for
#the same user in LDAP SA 27/07/12
resa_user_dict = {}
+ job_oar_list = []
+
+ jobs_psql_query = slab_dbsession.query(SenslabXP).all()
+ jobs_psql_dict = [ (row.job_id, row.__dict__ )for row in jobs_psql_query ]
+ jobs_psql_dict = dict(jobs_psql_dict)
+ logger.debug("SLABDRIVER \tGetLeases jobs_psql_dict %s"\
+ %(jobs_psql_dict))
+ jobs_psql_id_list = [ row.job_id for row in jobs_psql_query ]
+
+
+
for resa in unfiltered_reservation_list:
logger.debug("SLABDRIVER \tGetLeases USER %s"\
- %(resa['user']))
- if resa['user'] not in resa_user_dict:
- logger.debug("SLABDRIVER \tGetLeases userNOTIN ")
- ldap_info = self.ldap.LdapSearch('(uid='+resa['user']+')')
- if ldap_info:
- ldap_info = ldap_info[0][1]
- user = dbsession.query(RegUser).filter_by(email = \
- ldap_info['mail'][0]).first()
- #Separated in case user not in database :
- #record_id not defined SA 17/07//12
- query_slice_info = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = user.record_id)
- if query_slice_info:
- slice_info = query_slice_info.first()
- else:
- slice_info = None
+ %(resa['user']))
+ #Cosntruct list of jobs (runing, waiting..) in oar
+ job_oar_list.append(resa['lease_id'])
+ #If there is information on the job in SLAB DB (slice used and job id)
+ if resa['lease_id'] in jobs_psql_dict:
+ job_info = jobs_psql_dict[resa['lease_id']]
+ logger.debug("SLABDRIVER \tGetLeases resa_user_dict %s"\
+ %(resa_user_dict))
+ resa['slice_hrn'] = job_info['slice_hrn']
+ resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
+
+ #Assume it is a senslab slice:
+ else:
+ resa['slice_id'] = hrn_to_urn(self.root_auth+'.'+ resa['user'] +"_slice" , 'slice')
+ #if resa['user'] not in resa_user_dict:
+ #logger.debug("SLABDRIVER \tGetLeases userNOTIN ")
+ #ldap_info = self.ldap.LdapSearch('(uid='+resa['user']+')')
+ #if ldap_info:
+ #ldap_info = ldap_info[0][1]
+ ##Get the backref :relationship table reg-researchers
+ #user = dbsession.query(RegUser).options(joinedload('reg_slices_as_researcher')).filter_by(email = \
+ #ldap_info['mail'][0])
+ #if user:
+ #user = user.first()
+ #user = user.__dict__
+ #slice_info = user['reg_slices_as_researcher'][0].__dict__
+ ##Separated in case user not in database :
+ ##record_id not defined SA 17/07//12
+
+ ##query_slice_info = slab_dbsession.query(SenslabXP).filter_by(record_id_user = user.record_id)
+ ##if query_slice_info:
+ ##slice_info = query_slice_info.first()
+ ##else:
+ ##slice_info = None
- resa_user_dict[resa['user']] = {}
- resa_user_dict[resa['user']]['ldap_info'] = user
- resa_user_dict[resa['user']]['slice_info'] = slice_info
- #else:
- #del unfiltered_reservation_list[unfiltered_reservation_list.index(resa)]
-
+ #resa_user_dict[resa['user']] = {}
+ #resa_user_dict[resa['user']]['ldap_info'] = user
+ #resa_user_dict[resa['user']]['slice_info'] = slice_info
-
- logger.debug("SLABDRIVER \tGetLeases resa_user_dict %s"\
- %(resa_user_dict))
- for resa in unfiltered_reservation_list:
-
-
- #Put the slice_urn
- if resa['user'] in resa_user_dict:
- resa['slice_hrn'] = resa_user_dict[resa['user']]['slice_info'].slice_hrn
- resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
- #Put the slice_urn
- #resa['slice_id'] = hrn_to_urn(slice_info.slice_hrn, 'slice')
+ #resa['slice_hrn'] = resa_user_dict[resa['user']]['slice_info']['hrn']
+ #resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
+
+
resa['component_id_list'] = []
+ resa['hrn'] = Xrn(resa['slice_id']).get_hrn()
#Transform the hostnames into urns (component ids)
for node in resa['reserved_nodes']:
#resa['component_id_list'].append(hostname_to_urn(self.hrn, \
#self.root_auth, node['hostname']))
slab_xrn = slab_xrn_object(self.root_auth, node)
resa['component_id_list'].append(slab_xrn.urn)
-
- #Filter the reservation list if necessary
- #Returns all the leases associated with a given slice
- if lease_filter_dict:
- logger.debug("SLABDRIVER \tGetLeases lease_filter_dict %s"\
- %(lease_filter_dict))
- for resa in unfiltered_reservation_list:
- if lease_filter_dict['name'] == resa['slice_hrn']:
- reservation_list.append(resa)
- else:
+
+ if lease_filter_dict:
+ logger.debug("SLABDRIVER \tGetLeases resa_ %s \r\n leasefilter %s"\
+ %(resa,lease_filter_dict))
+
+ if lease_filter_dict['name'] == resa['hrn']:
+ reservation_list.append(resa)
+
+ if lease_filter_dict is None:
reservation_list = unfiltered_reservation_list
+ #else:
+ #del unfiltered_reservation_list[unfiltered_reservation_list.index(resa)]
+
+
+ self.update_jobs_in_slabdb(job_oar_list, jobs_psql_id_list)
+
+ #for resa in unfiltered_reservation_list:
+
+
+ ##Put the slice_urn
+ #if resa['user'] in resa_user_dict:
+ #resa['slice_hrn'] = resa_user_dict[resa['user']]['slice_info']['hrn']
+ #resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
+ ##Put the slice_urn
+ ##resa['slice_id'] = hrn_to_urn(slice_info.slice_hrn, 'slice')
+ #resa['component_id_list'] = []
+ ##Transform the hostnames into urns (component ids)
+ #for node in resa['reserved_nodes']:
+ ##resa['component_id_list'].append(hostname_to_urn(self.hrn, \
+ ##self.root_auth, node['hostname']))
+ #slab_xrn = slab_xrn_object(self.root_auth, node)
+ #resa['component_id_list'].append(slab_xrn.urn)
+
+ ##Filter the reservation list if necessary
+ ##Returns all the leases associated with a given slice
+ #if lease_filter_dict:
+ #logger.debug("SLABDRIVER \tGetLeases lease_filter_dict %s"\
+ #%(lease_filter_dict))
+ #for resa in unfiltered_reservation_list:
+ #if lease_filter_dict['name'] == resa['slice_hrn']:
+ #reservation_list.append(resa)
+ #else:
+ #reservation_list = unfiltered_reservation_list
logger.debug(" SLABDRIVER.PY \tGetLeases reservation_list %s"\
%(reservation_list))
#about the user of this slice. This kind of
#information is in the Senslab's DB.
if str(record['type']) == 'slice':
- #Get slab slice record.
- recslice_list = self.GetSlices(slice_filter = \
- str(record['hrn']),\
- slice_filter_type = 'slice_hrn')
-
- recuser = dbsession.query(RegRecord).filter_by(record_id = \
- recslice_list[0]['record_id_user']).first()
- logger.debug("SLABDRIVER \tfill_record_info TYPE SLICE RECUSER %s " %(recuser))
- record.update({'PI':[recuser.hrn],
- 'researcher': [recuser.hrn],
+ if 'reg_researchers' in record and \
+ isinstance(record['reg_researchers'], list) :
+ record['reg_researchers'] = record['reg_researchers'][0].__dict__
+ record.update({'PI':[record['reg_researchers']['hrn']],
+ 'researcher': [record['reg_researchers']['hrn']],
'name':record['hrn'],
'oar_job_id':[],
'node_ids': [],
- 'person_ids':[recslice_list[0]['record_id_user']],
+ 'person_ids':[record['reg_researchers']['record_id']],
'geni_urn':'', #For client_helper.py compatibility
'keys':'', #For client_helper.py compatibility
'key_ids':''}) #For client_helper.py compatibility
+
+
+ #Get slab slice record.
+ recslice_list = self.GetSlices(slice_filter = \
+ str(record['hrn']),\
+ slice_filter_type = 'slice_hrn')
+ #recuser = recslice_list[0]['reg_researchers']
+ ##recuser = dbsession.query(RegRecord).filter_by(record_id = \
+ ##recslice_list[0]['record_id_user']).first()
+
+ #record.update({'PI':[recuser['hrn']],
+ #'researcher': [recuser['hrn']],
+ #'name':record['hrn'],
+ #'oar_job_id':[],
+ #'node_ids': [],
+ #'person_ids':[recslice_list[0]['reg_researchers']['record_id']],
+ #'geni_urn':'', #For client_helper.py compatibility
+ #'keys':'', #For client_helper.py compatibility
+ #'key_ids':''}) #For client_helper.py compatibility
+ logger.debug("SLABDRIVER \tfill_record_info \
+ TYPE SLICE RECUSER record['hrn'] %s ecord['oar_job_id']\
+ %s " %(record['hrn'], record['oar_job_id']))
try:
- for rec in recslice_list:
- record['oar_job_id'].append(rec['oar_job_id'])
+ for rec in recslice_list:
+ logger.debug("SLABDRIVER\r\n \t \t fill_record_info oar_job_id %s " %(rec['oar_job_id']))
+ #record['oar_job_id'].append(rec['oar_job_id'])
+ #del record['_sa_instance_state']
+ del record['reg_researchers']
record['node_ids'] = [ self.root_auth + hostname for hostname in rec['node_ids']]
except KeyError:
pass
logger.debug( "SLABDRIVER.PY \t fill_record_info SLICE \
- recslice_list %s \r\n \t RECORD %s \r\n \r\n" %(recslice_list,record))
+ recslice_list %s \r\n \t RECORD %s \r\n \
+ \r\n" %(recslice_list, record))
if str(record['type']) == 'user':
#The record is a SFA user record.
#Get the information about his slice from Senslab's DB
#Append slice record in records list,
#therefore fetches user and slice info again(one more loop)
#Will update PIs and researcher for the slice
- recuser = dbsession.query(RegRecord).filter_by(record_id = \
- recslice_list[0]['record_id_user']).first()
+ #recuser = dbsession.query(RegRecord).filter_by(record_id = \
+ #recslice_list[0]['record_id_user']).first()
+ recuser = recslice_list[0]['reg_researchers']
logger.debug( "SLABDRIVER.PY \t fill_record_info USER \
recuser %s \r\n \r\n" %(recuser))
recslice = {}
recslice = recslice_list[0]
- recslice.update({'PI':[recuser.hrn],
- 'researcher': [recuser.hrn],
+ recslice.update({'PI':[recuser['hrn']],
+ 'researcher': [recuser['hrn']],
'name':record['hrn'],
'node_ids': [],
'oar_job_id': [],
- 'person_ids':[recslice_list[0]['record_id_user']]})
+ 'person_ids':[recuser['record_id']]})
try:
for rec in recslice_list:
recslice['oar_job_id'].append(rec['oar_job_id'])
pass
recslice.update({'type':'slice', \
- 'hrn':recslice_list[0]['slice_hrn']})
+ 'hrn':recslice_list[0]['hrn']})
#GetPersons takes [] as filters
logger.debug("SLABDRIVER.PY \tfill_record_info ADDING SLICE\
INFO TO USER records %s" %(record_list))
+
logger.debug("SLABDRIVER.PY \tfill_record_info END \
- #record %s \r\n \r\n " %(record))
+ record %s \r\n \r\n " %(record))
except TypeError, error:
logger.log_exc("SLABDRIVER \t fill_record_info EXCEPTION %s"\
#TODO : Is UnBindObjectFromPeer still necessary ? Currently does nothing
#04/07/2012 SA
- def UnBindObjectFromPeer(self, auth, object_type, object_id, shortname):
+ @staticmethod
+ def UnBindObjectFromPeer( auth, object_type, object_id, shortname):
""" This method is a hopefully temporary hack to let the sfa correctly
detach the objects it creates from a remote peer object. This is
needed so that the sfa federation link can work in parallel with
return
#TODO UpdatePerson 04/07/2012 SA
- def UpdatePerson(self, auth, person_id_or_email, person_fields=None):
+ def UpdatePerson(self, slab_hrn, federated_hrn, person_fields=None):
"""Updates a person. Only the fields specified in person_fields
are updated, all other fields are left untouched.
Users and techs can only update themselves. PIs can only update
FROM PLC API DOC
"""
- logger.warning("SLABDRIVER UpdatePerson EMPTY - DO NOTHING \r\n ")
+ #new_row = FederatedToSenslab(slab_hrn, federated_hrn)
+ #slab_dbsession.add(new_row)
+ #slab_dbsession.commit()
+
+ logger.debug("SLABDRIVER UpdatePerson EMPTY - DO NOTHING \r\n ")
return
#TODO GetKeys 04/07/2012 SA
return
#TODO DeleteKey 04/07/2012 SA
- def DeleteKey(self, auth, key_id):
+ def DeleteKey(self, key_id):
""" Deletes a key.
Non-admins may only delete their own keys.
Returns 1 if successful, faults otherwise.
#TODO : Check rights to delete person
- def DeletePerson(self, auth, person_record):
+ def DeletePerson(self, person_record):
""" Disable an existing account in senslab LDAP.
Users and techs can only delete themselves. PIs can only
delete themselves and other non-PIs at their sites.
return ret
#TODO Check DeleteSlice, check rights 05/07/2012 SA
- def DeleteSlice(self, auth, slice_record):
+ def DeleteSlice(self, slice_record):
""" Deletes the specified slice.
Senslab : Kill the job associated with the slice if there is one
using DeleteSliceFromNodes.
logger.warning("SLABDRIVER DeleteSlice %s "%(slice_record))
return
+ def __add_person_to_db(self, user_dict):
+
+ check_if_exists = dbsession.query(RegUser).filter_by(email = user_dict['email']).first()
+ #user doesn't exists
+ if not check_if_exists:
+ logger.debug("__add_person_to_db \t Adding %s \r\n \r\n \
+ _________________________________________________________________________\
+ " %(user_dict['hrn']))
+ user_record = RegUser(hrn =user_dict['hrn'] , pointer= '-1', authority=get_authority(hrn), \
+ email= user_dict['email'], gid = None)
+ user_record.reg_keys = [RegKey(user_dict['pkey'])]
+ user_record.just_created()
+ dbsession.add (user_record)
+ dbsession.commit()
+ return
+
#TODO AddPerson 04/07/2012 SA
#def AddPerson(self, auth, person_fields=None):
def AddPerson(self, record):#TODO fixing 28/08//2012 SA
"""
ret = self.ldap.LdapAddUser(record)
logger.debug("SLABDRIVER AddPerson return code %s \r\n "%(ret))
+ self.__add_person_to_db(record)
return ret['uid']
#TODO AddPersonToSite 04/07/2012 SA
logger.warning("SLABDRIVER AddPersonKey EMPTY - DO NOTHING \r\n ")
return
- def DeleteLeases(self, leases_id_list, slice_hrn ):
+ def DeleteLeases(self, leases_id_list, slice_hrn ):
+ logger.debug("SLABDRIVER DeleteLeases leases_id_list %s slice_hrn %s \
+ \r\n " %(leases_id_list, slice_hrn))
for job_id in leases_id_list:
self.DeleteJobs(job_id, slice_hrn)
- logger.debug("SLABDRIVER DeleteLeases leases_id_list %s slice_hrn %s \
- \r\n " %(leases_id_list, slice_hrn))
+
return