class SlabSlices:
rspec_to_slice_tag = {'max_rate':'net_max_rate'}
-
- #def __init__(self, api, ttl = .5, origin_hrn=None):
- #self.api = api
- ##filepath = path + os.sep + filename
- #self.policy = Policy(self.api)
- #self.origin_hrn = origin_hrn
- #self.registry = api.registries[api.hrn]
- #self.credential = api.getCredential()
- #self.nodes = []
- #self.persons = []
-
-
+
+
def __init__(self, driver):
self.driver = driver
- ##Used in SFACE?
- #def get_slivers(self, xrn, node=None):
- #hrn, hrn_type = urn_to_hrn(xrn)
-
- #slice_name = hrn_to_pl_slicename(hrn)
- ## XX Should we just call PLCAPI.GetSliceTicket(slice_name) instead
- ## of doing all of this?
- ##return self.api.driver.GetSliceTicket(self.auth, slice_name)
-
-
-
- #sfa_slice = self.driver.GetSlices(slice_filter = slice_name, \
- # slice_filter_type = 'slice_hrn')
-
-
- ## Get user information
- ##TODO
- #alchemy_person = dbsession.query(RegRecord).filter_by(record_id = \
- #sfa_slice['record_id_user']).first()
-
- #slivers = []
- #sliver_attributes = []
-
- #if sfa_slice['oar_job_id'] is not -1:
- #nodes_all = self.driver.GetNodes({'hostname': \
- #sfa_slice['node_ids']},
- #['node_id', 'hostname','site','boot_state'])
- #nodeall_byhostname = dict([(n['hostname'], n) for n in nodes_all])
- #nodes = sfa_slice['node_ids']
-
- #for node in nodes:
- ##for sliver_attribute in filter(lambda a: a['node_id'] == \
- #node['node_id'], slice_tags):
- #sliver_attribute['tagname'] = 'slab-tag'
- #sliver_attribute['value'] = 'slab-value'
- #sliver_attributes.append(sliver_attribute['tagname'])
- #attributes.append({'tagname': sliver_attribute['tagname'],
- #'value': sliver_attribute['value']})
-
- ## set nodegroup slice attributes
- #for slice_tag in filter(lambda a: a['nodegroup_id'] \
- #in node['nodegroup_ids'], slice_tags):
- ## Do not set any nodegroup slice attributes for
- ## which there is at least one sliver attribute
- ## already set.
- #if slice_tag not in slice_tags:
- #attributes.append({'tagname': slice_tag['tagname'],
- #'value': slice_tag['value']})
-
- #for slice_tag in filter(lambda a: a['node_id'] is None, \
- #slice_tags):
- ## Do not set any global slice attributes for
- ## which there is at least one sliver attribute
- ## already set.
- #if slice_tag['tagname'] not in sliver_attributes:
- #attributes.append({'tagname': slice_tag['tagname'],
- #'value': slice_tag['value']})
-
- ## XXX Sanity check; though technically this should
- ## be a system invariant
- ## checked with an assertion
- #if sfa_slice['expires'] > MAXINT: sfa_slice['expires']= MAXINT
-
- #slivers.append({
- #'hrn': hrn,
- #'name': sfa_slice['name'],
- #'slice_id': sfa_slice['slice_id'],
- #'instantiation': sfa_slice['instantiation'],
- #'expires': sfa_slice['expires'],
- #'keys': keys,
- #'attributes': attributes
- #})
-
- #return slivers
-
-
-
-
-
- #return slivers
+
def get_peer(self, xrn):
hrn, hrn_type = urn_to_hrn(xrn)
#Does this slice belong to a local site or a peer senslab site?
sfa_peer = site_authority
return sfa_peer
+
-
- def verify_slice_leases(self, sfa_slice, requested_jobs_dict, kept_leases, \
- peer):
+ def verify_slice_leases(self, sfa_slice, requested_jobs_dict, peer):
#First get the list of current leases from OAR
- leases = self.driver.GetLeases({'name':sfa_slice['name']})
- #leases = self.driver.GetLeases({'name':sfa_slice['name']}, ['lease_id'])
- if leases :
- current_leases = [lease['lease_id'] for lease in leases]
- #Deleted leases are the ones with lease id not declared in the Rspec
- deleted_leases = list(set(current_leases).difference(kept_leases))
-
- try:
- if peer:
- #peer = RegAuyhority object is unsubscriptable
- #TODO :UnBindObjectFromPeer Quick and dirty auth='senslab2 SA 27/07/12
- self.driver.UnBindObjectFromPeer('senslab2', 'slice', \
- sfa_slice['record_id_slice'], peer.hrn)
+ leases = self.driver.GetLeases({'name':sfa_slice['slice_hrn']})
+ logger.debug("SLABSLICES verify_slice_leases requested_jobs_dict %s \
+ leases %s "%(requested_jobs_dict, leases ))
+
+ current_nodes_reserved_by_start_time = {}
+ requested_nodes_by_start_time = {}
+ leases_by_start_time = {}
+ reschedule_jobs_dict = {}
+
+
+ #Create reduced dictionary with key start_time and value
+ # the list of nodes
+ #-for the leases already registered by OAR first
+ # then for the new leases requested by the user
+
+ #Leases already scheduled/running in OAR
+ for lease in leases :
+ current_nodes_reserved_by_start_time[lease['t_from']] = \
+ lease['reserved_nodes']
+ leases_by_start_time[lease['t_from']] = lease
+
+
+ #Requested jobs
+ for start_time in requested_jobs_dict:
+ requested_nodes_by_start_time[int(start_time)] = \
+ requested_jobs_dict[start_time]['hostname']
+ #Check if there is any difference between the leases already
+ #registered in OAR and the requested jobs.
+ #Difference could be:
+ #-Lease deleted in the requested jobs
+ #-Added/removed nodes
+ #-Newly added lease
+
+ logger.debug("SLABSLICES verify_slice_leases \
+ requested_nodes_by_start_time %s \
+ "%(requested_nodes_by_start_time ))
+ #Find all deleted leases
+ start_time_list = \
+ list(set(leases_by_start_time.keys()).\
+ difference(requested_nodes_by_start_time.keys()))
+ deleted_leases = [leases_by_start_time[start_time]['lease_id'] \
+ for start_time in start_time_list]
+
+
+
+ #Find added or removed nodes in exisiting leases
+ for start_time in requested_nodes_by_start_time:
+ logger.debug("SLABSLICES verify_slice_leases start_time %s \
+ "%( start_time))
+ if start_time in current_nodes_reserved_by_start_time:
- self.driver.DeleteLeases(deleted_leases, \
- sfa_slice['name'])
-
- #TODO : catch other exception?
- except KeyError:
- logger.log_exc('Failed to add/remove slice leases')
+ if requested_nodes_by_start_time[start_time] == \
+ current_nodes_reserved_by_start_time[start_time]:
+ continue
- #Add new leases
- for start_time in requested_jobs_dict:
- job = requested_jobs_dict[start_time]
- self.driver.AddLeases(job['hostname'], \
- sfa_slice, int(job['start_time']), \
- int(job['duration']))
-
+ else:
+ update_node_set = \
+ set(requested_nodes_by_start_time[start_time])
+ added_nodes = \
+ update_node_set.difference(\
+ current_nodes_reserved_by_start_time[start_time])
+ shared_nodes = \
+ update_node_set.intersection(\
+ current_nodes_reserved_by_start_time[start_time])
+ old_nodes_set = \
+ set(\
+ current_nodes_reserved_by_start_time[start_time])
+ removed_nodes = \
+ old_nodes_set.difference(\
+ requested_nodes_by_start_time[start_time])
+ logger.debug("SLABSLICES verify_slice_leases \
+ shared_nodes %s added_nodes %s removed_nodes %s"\
+ %(shared_nodes, added_nodes,removed_nodes ))
+ #If the lease is modified, delete it before
+ #creating it again.
+ #Add the deleted lease job id in the list
+ #WARNING :rescheduling does not work if there is already
+ # 2 running/scheduled jobs because deleting a job
+ #takes time SA 18/10/2012
+ if added_nodes or removed_nodes:
+ deleted_leases.append(\
+ leases_by_start_time[start_time]['lease_id'])
+ #Reschedule the job
+ if added_nodes or shared_nodes:
+ reschedule_jobs_dict[str(start_time)] = \
+ requested_jobs_dict[str(start_time)]
+
+ else:
+ #New lease
+
+ job = requested_jobs_dict[str(start_time)]
+ logger.debug("SLABSLICES \
+ NEWLEASE slice %s job %s"\
+ %(sfa_slice, job))
+ self.driver.AddLeases(job['hostname'], \
+ sfa_slice, int(job['start_time']), \
+ int(job['duration']))
+
+ #Deleted leases are the ones with lease id not declared in the Rspec
+ if deleted_leases:
+ self.driver.DeleteLeases(deleted_leases, sfa_slice['slice_hrn'])
+ logger.debug("SLABSLICES \
+ verify_slice_leases slice %s deleted_leases %s"\
+ %(sfa_slice, deleted_leases))
+
+
+ if reschedule_jobs_dict :
+ for start_time in reschedule_jobs_dict:
+ job = reschedule_jobs_dict[start_time]
+ self.driver.AddLeases(job['hostname'], \
+ sfa_slice, int(job['start_time']), \
+ int(job['duration']))
return leases
def verify_slice_nodes(self, sfa_slice, requested_slivers, peer):
current_slivers = []
deleted_nodes = []
-
- if sfa_slice['node_ids']:
- nodes = self.driver.GetNodes(sfa_slice['node_ids'], ['hostname'])
+
+ if 'node_ids' in sfa_slice:
+ nodes = self.driver.GetNodes(sfa_slice['list_node_ids'], \
+ ['hostname'])
current_slivers = [node['hostname'] for node in nodes]
# remove nodes not in rspec
deleted_nodes = list(set(current_slivers).\
difference(requested_slivers))
# add nodes from rspec
- #added_nodes = list(set(requested_slivers).difference(current_slivers))
+ #added_nodes = list(set(requested_slivers).\
+ #difference(current_slivers))
+
- #Update the table with the nodes that populate the slice
logger.debug("SLABSLICES \tverify_slice_nodes slice %s\
\r\n \r\n deleted_nodes %s"\
- %(sfa_slice,deleted_nodes))
+ %(sfa_slice, deleted_nodes))
if deleted_nodes:
- self.driver.DeleteSliceFromNodes(sfa_slice['name'], \
- deleted_nodes)
+ #Delete the entire experience
+ self.driver.DeleteSliceFromNodes(sfa_slice)
+ #self.driver.DeleteSliceFromNodes(sfa_slice['slice_hrn'], \
+ #deleted_nodes)
return nodes
if slices_list:
for sl in slices_list:
- logger.debug("SLABSLICE \tverify_slice slicename %s sl %s \
- slice_record %s"%(slicename, sl, slice_record))
+ logger.debug("SLABSLICE \tverify_slice slicename %s slices_list %s sl %s \
+ slice_record %s"%(slicename, slices_list,sl, \
+ slice_record))
sfa_slice = sl
sfa_slice.update(slice_record)
#del slice['last_updated']
#slice['peer_slice_id'] = slice_record.get('slice_id', None)
## unbind from peer so we can modify if necessary.
## Will bind back later
- #self.driver.UnBindObjectFromPeer('slice', slice['slice_id'], \
- #peer['shortname'])
+ #self.driver.UnBindObjectFromPeer('slice', \
+ #slice['slice_id'], \
+ #peer['shortname'])
#Update existing record (e.g. expires field)
#it with the latest info.
##if slice_record and slice['expires'] != slice_record['expires']:
def verify_persons(self, slice_hrn, slice_record, users, peer, sfa_peer, \
options={}):
+ """
+ users is a record list. Records can either be local records
+ or users records from known and trusted federated sites.
+ If the user is from another site that senslab doesn't trust yet,
+ then Resolve will raise an error before getting to create_sliver.
+ """
#TODO SA 21/08/12 verify_persons Needs review
- users_by_id = {}
- users_by_hrn = {}
+
+
+ users_by_id = {}
+ users_by_hrn = {}
+ #users_dict : dict whose keys can either be the user's hrn or its id.
+ #Values contains only id and hrn
users_dict = {}
-
+
+ #First create dicts by hrn and id for each user in the user record list:
for user in users:
if 'urn' in user and (not 'hrn' in user ) :
'hrn':user['hrn']}
- logger.debug( "SLABSLICE.PY \tverify_person \
+ logger.debug( "SLABSLICE.PY \t verify_person \
users_dict %s \r\n user_by_hrn %s \r\n \
\tusers_by_id %s " \
%(users_dict,users_by_hrn, users_by_id))
existing_user_ids = []
existing_user_hrns = []
existing_users = []
- #Check if user is in Senslab LDAP using its hrn.
- #Assuming Senslab is centralised : one LDAP for all sites,
+ # Check if user is in Senslab LDAP using its hrn.
+ # Assuming Senslab is centralised : one LDAP for all sites,
# user_id unknown from LDAP
- # LDAP does not provide users id, therefore we rely on hrns
+ # LDAP does not provide users id, therefore we rely on hrns containing
+ # the login of the user.
# If the hrn is not a senslab hrn, the user may not be in LDAP.
if users_by_hrn:
- #Construct the list of filters for GetPersons
+ #Construct the list of filters (list of dicts) for GetPersons
filter_user = []
for hrn in users_by_hrn:
filter_user.append (users_by_hrn[hrn])
logger.debug(" SLABSLICE.PY \tverify_person filter_user %s " \
- %(filter_user))
- existing_users = self.driver.GetPersons(filter_user)
+ %(filter_user))
+ #Check user's in LDAP with GetPersons
+ #Needed because what if the user has been deleted in LDAP but
+ #is still in SFA?
+ existing_users = self.driver.GetPersons(filter_user)
+
+ #User's in senslab LDAP
if existing_users:
for user in existing_users :
existing_user_hrns.append(users_dict[user['hrn']]['hrn'])
existing_user_ids.\
append(users_dict[user['hrn']]['person_id'])
- #User from another federated site ,
- #does not have a senslab account yet?
- #Check in the LDAP if we know email,
- #maybe he has multiple SFA accounts = multiple hrns.
- #Check before adding them to LDAP
+ # User from another known trusted federated site. Check
+ # if a senslab account matching the email has already been created.
else:
req = 'mail='
if isinstance(users, list):
+
req += users[0]['email']
else:
req += users['email']
ldap_reslt = self.driver.ldap.LdapSearch(req)
if ldap_reslt:
logger.debug(" SLABSLICE.PY \tverify_person users \
- USER already in Senslab \t ldap_reslt %s "%( ldap_reslt))
+ USER already in Senslab \t ldap_reslt %s \
+ "%( ldap_reslt))
existing_users.append(ldap_reslt[1])
else:
#User not existing in LDAP
- #TODO SA 21/08/12 raise something to add user or add it auto ?
+ #TODO SA 21/08/12 raise smthg to add user or add it auto ?
logger.debug(" SLABSLICE.PY \tverify_person users \
- not in ldap ...NEW ACCOUNT NEEDED %s \r\n \t ldap_reslt %s " \
- %(users, ldap_reslt))
+ not in ldap ...NEW ACCOUNT NEEDED %s \r\n \t \
+ ldap_reslt %s " %(users, ldap_reslt))
requested_user_ids = users_by_id.keys()
requested_user_hrns = users_by_hrn.keys()
except KeyError:
pass
- #existing_slice_user_hrns = [user['hrn'] for \
- #user in existing_slice_users]
-
+
# users to be added, removed or updated
#One user in one senslab slice : there should be no need
#to remove/ add any user from/to a slice.
#value: %s, node_id: %s\nCause:%s'\
#% (name, value, node_id, str(error)))
-
\ No newline at end of file
+