import time
-
-
from sfa.util.xrn import hrn_to_urn, urn_to_hrn
from sfa.rspecs.rspec import RSpec
from sfa.rspecs.elements.versions.slabv1Node import SlabPosition
from sfa.rspecs.elements.location import Location
from sfa.rspecs.elements.hardware_type import HardwareType
-#from sfa.rspecs.elements.login import Login
-#from sfa.rspecs.elements.services import Services
+from sfa.rspecs.elements.login import Login
+from sfa.rspecs.elements.services import Services
from sfa.rspecs.elements.sliver import Sliver
from sfa.rspecs.elements.lease import Lease
from sfa.rspecs.elements.granularity import Granularity
slices = self.driver.GetSlices(slice_filter= str(slice_name), \
slice_filter_type = 'slice_hrn')
- logger.debug("Slabaggregate api \tget_slice_and_slivers slices %s self.driver.hrn %s" \
- %(slices, self.driver.hrn))
+ logger.debug("Slabaggregate api \tget_slice_and_slivers \
+ slices %s self.driver.hrn %s" \
+ %(slices, self.driver.hrn))
if not slices:
return (sfa_slice, slivers)
#if isinstance(sfa_slice, list):
#and therfore, node allocated to this slice
for sfa_slice in slices:
try:
-
- for node in sfa_slice['node_ids']:
- sliver_xrn = Xrn(slice_urn, type='sliver', id=node)
- sliver_xrn.set_authority(self.driver.hrn)
- #node_id = self.driver.root_auth + '.' + node_id
- sliver = Sliver({'sliver_id':sliver_xrn.urn,
- 'name': sfa_slice['slice_hrn'],
- 'type': 'slab-node',
- 'tags': []})
-
- slivers[node] = sliver
+ node_ids_list = sfa_slice['node_ids']
except KeyError:
logger.log_exc("SLABAGGREGATE \t \
get_slice_and_slivers KeyError ")
+ continue
+
+ for node in node_ids_list:
+ sliver_xrn = Xrn(slice_urn, type='sliver', id=node)
+ sliver_xrn.set_authority(self.driver.hrn)
+ #node_id = self.driver.root_auth + '.' + node_id
+ sliver = Sliver({'sliver_id':sliver_xrn.urn,
+ 'name': sfa_slice['slice_hrn'],
+ 'type': 'slab-node',
+ 'tags': []})
+
+ slivers[node] = sliver
+
#Add default sliver attribute :
#connection information for senslab
ldap_username = tmp[1].split('_')[0]
vmaddr = 'ssh ' + ldap_username + '@grenoble.senslab.info'
slivers['default_sliver'] = {'vm': vmaddr , 'login': ldap_username}
- ## sort sliver attributes by node id
- ##tags = self.driver.GetSliceTags({'slice_tag_id': slice['slice_tag_ids']})
- ##for tag in tags:
- ### most likely a default/global sliver attribute (node_id == None)
- ##if tag['node_id'] not in slivers:
- ##sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, slice['slice_id'], ""),
- ##'name': 'slab-vm',
- ##'tags': []})
- ##slivers[tag['node_id']] = sliver
- ##slivers[tag['node_id']]['tags'].append(tag)
+
logger.debug("SLABAGGREGATE api get_slice_and_slivers slivers %s "\
%(slivers))
return (slices, slivers)
#if slice_xrn:
#if not slices or not slices['node_ids']:
#return ([],[])
- tags_filter = {}
+ #tags_filter = {}
# get the granularity in second for the reservation system
grain = self.driver.GetLeaseGranularity()
for node in nodes:
nodes_dict[node['node_id']] = node
-
+ #logger.debug("SLABAGGREGATE api get_nodes nodes %s "\
+ #%(nodes ))
# get sites
#sites_dict = self.get_sites({'site_id': site_ids})
# get interfaces
# Make a list of all the nodes in the slice before getting their attributes
rspec_nodes = []
slice_nodes_list = []
- logger.debug("SLABAGGREGATE api get_rspec slice_nodes_list %s "\
+ logger.debug("SLABAGGREGATE api get_nodes slice_nodes_list %s "\
%(slices ))
if slices:
for one_slice in slices:
- slice_nodes_list = one_slice['node_ids']
+ try:
+ slice_nodes_list = one_slice['node_ids']
+ except KeyError:
+ pass
#for node in one_slice['node_ids']:
#slice_nodes_list.append(node)
reserved_nodes = self.driver.GetNodesCurrentlyInUse()
- logger.debug("SLABAGGREGATE api get_rspec slice_nodes_list %s "\
+ logger.debug("SLABAGGREGATE api get_nodes slice_nodes_list %s "\
%(slice_nodes_list))
for node in nodes:
# skip whitelisted nodes
#if not slice or slice['slice_id'] not in node['slice_ids_whitelist']:
#continue
#rspec_node = Node()
- logger.debug("SLABAGGREGATE api get_rspec node %s "\
- %(node))
+ #logger.debug("SLABAGGREGATE api get_nodes node %s "\
+ #%(node))
if slice_nodes_list == [] or node['hostname'] in slice_nodes_list:
rspec_node = SlabNode()
# Senslab's nodes are federated : there is only one authority
# for all Senslab sites, registered in SFA.
- # Removing the part including the site in authority_id SA 27/07/12
+ # Removing the part including the site
+ # in authority_id SA 27/07/12
rspec_node['authority_id'] = rspec_node['component_manager_id']
- # do not include boot state (<available> element) in the manifest rspec
+ # do not include boot state (<available> element)
+ #in the manifest rspec
rspec_node['boot_state'] = node['boot_state']
# only doing this because protogeni rspec needs
# to advertise available initscripts
- #rspec_node['pl_initscripts'] = None
# add site/interface info to nodes.
# assumes that sites, interfaces and tags have already been prepared.
#site = sites_dict[node['site_id']]
try:
position[field] = node[field]
except KeyError, error :
- logger.log_exc("SLABAGGREGATE\t get_rspec position %s "%(error))
+ logger.log_exc("SLABAGGREGATE\t get_nodes \
+ position %s "%(error))
rspec_node['position'] = position
#rspec_node['interfaces'] = []
- #tags = [PLTag(node_tags[tag_id]) for tag_id in node['node_tag_ids']]
# Granularity
granularity = Granularity({'grain': grain})
rspec_node['granularity'] = granularity
rspec_node['slivers'] = [sliver]
# slivers always provide the ssh service
- #login = Login({'authentication': 'ssh-keys', 'hostname': node['hostname'], 'port':'22', 'username': sliver['name']})
- #service = Services({'login': login})
- #rspec_node['services'] = [service]
+ login = Login({'authentication': 'ssh-keys', 'hostname': node['hostname'], 'port':'22', 'username': sliver['name']})
+ service = Services({'login': login})
+ rspec_node['services'] = [service]
rspec_nodes.append(rspec_node)
return (rspec_nodes)
now = int(time.time())
lease_filter = {'clip': now }
-
- #self.driver.synchronize_oar_and_slice_table()
+
#if slice_record:
#lease_filter.update({'name': slice_record['name']})
return_fields = ['lease_id', 'hostname', 'site_id', \
if lease_option in ['all', 'resources']:
#if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'leases':
nodes = self.get_nodes(slices, slivers)
- logger.debug("SlabAggregate \tget_rspec **** \
- nodes %s \r\n" %(nodes))
#In case creating a job, slice_xrn is not set to None
rspec.version.add_nodes(nodes)
if slice_xrn :
leases = self.get_leases(slices)
rspec.version.add_leases(leases)
- logger.debug("SlabAggregate \tget_rspec ******* rspec_toxml %s \r\n"\
- %(rspec.toxml()))
+ #logger.debug("SlabAggregate \tget_rspec ******* rspec_toxml %s \r\n"\
+ #%(rspec.toxml()))
return rspec.toxml()
-
-
# thierry : note
# this inheritance scheme is so that the driver object can receive
# GetNodes or GetSites sorts of calls directly
#For compatibility
top_level_status = 'empty'
result = {}
- result.fromkeys(['geni_urn','pl_login','geni_status','geni_resources'],None)
+ result.fromkeys(\
+ ['geni_urn','pl_login','geni_status','geni_resources'], None)
result['pl_login'] = recuser.hrn
- logger.debug("Slabdriver - sliver_status Sliver status urn %s hrn %s sl\
- %s \r\n " %(slice_urn, slice_hrn, sl))
+ logger.debug("Slabdriver - sliver_status Sliver status \
+ urn %s hrn %s sl %s \r\n " \
+ %(slice_urn, slice_hrn, sl))
try:
nodes_in_slice = sl['node_ids']
except KeyError:
#res['slab_boot_state'] = node['boot_state']
res['pl_hostname'] = node['hostname']
- res['pl_boot_state'] = nodeall_byhostname[node['hostname']]['boot_state']
+ res['pl_boot_state'] = \
+ nodeall_byhostname[node['hostname']]['boot_state']
#res['pl_last_contact'] = strftime(self.time_format, \
#gmtime(float(timestamp)))
sliver_id = Xrn(slice_urn, type='slice', \
# parse rspec
rspec = RSpec(rspec_string)
- logger.debug("SLABDRIVER.PY \t create_sliver \tr spec.version %s slice_record %s " \
- %(rspec.version,slice_record))
+ logger.debug("SLABDRIVER.PY \t create_sliver \tr spec.version \
+ %s slice_record %s " \
+ %(rspec.version,slice_record))
# ensure site record exists?
# ensure slice record exists
# add/remove leases
requested_lease_list = []
- kept_leases = []
+
logger.debug("SLABDRIVER.PY \tcreate_sliver AVANTLEASE " )
+ rspec_requested_leases = rspec.version.get_leases()
for lease in rspec.version.get_leases():
single_requested_lease = {}
logger.debug("SLABDRIVER.PY \tcreate_sliver lease %s " %(lease))
if not lease.get('lease_id'):
single_requested_lease['hostname'] = \
- slab_xrn_to_hostname(lease.get('component_id').strip())
+ slab_xrn_to_hostname(\
+ lease.get('component_id').strip())
single_requested_lease['start_time'] = lease.get('start_time')
single_requested_lease['duration'] = lease.get('duration')
- else:
- kept_leases.append(int(lease['lease_id']))
+
if single_requested_lease.get('hostname'):
requested_lease_list.append(single_requested_lease)
- logger.debug("SLABDRIVER.PY \tcreate_sliver requested_job_dict %s " %(requested_job_dict))
+ logger.debug("SLABDRIVER.PY \tcreate_sliver requested_job_dict %s "\
+ %(requested_job_dict))
#verify_slice_leases returns the leases , but the return value is unused
#here. Removed SA 13/08/12
slices.verify_slice_leases(sfa_slice, \
- requested_job_dict, kept_leases, peer)
+ requested_job_dict, peer)
return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
try:
if peer:
self.UnBindObjectFromPeer('slice', \
- sfa_slice['record_id_slice'], peer,None)
+ sfa_slice['record_id_slice'], \
+ peer, None)
self.DeleteSliceFromNodes(sfa_slice)
finally:
if peer:
- self.BindObjectToPeer('slice', sfa_slice['record_id_slice'], \
- peer, sfa_slice['peer_slice_id'])
+ self.BindObjectToPeer('slice', \
+ sfa_slice['record_id_slice'], \
+ peer, sfa_slice['peer_slice_id'])
return 1
return sfa_peer
+ #def verify_slice_leases(self, sfa_slice, requested_jobs_dict, peer):
+
+
+ ##First get the list of current leases from OAR
+ #leases = self.driver.GetLeases({'name':sfa_slice['slice_hrn']})
+ #logger.debug("SLABSLICES verify_slice_leases requested_jobs_dict %s \
+ #leases %s "%(requested_jobs_dict, leases ))
+
+ #current_nodes_reserved_by_start_time = {}
+ #requested_nodes_by_start_time = {}
+ #leases_by_start_time = {}
+ #reschedule_jobs_dict = {}
+
+
+
+ #if leases :
+
+ ##Create reduced dictionary with key start_time and value
+ ## the list of nodes
+ ##-for the leases already registered by OAR first
+ ## then for the new leases requested by the user
+
+ ##Leases already scheduled/running in OAR
+ #for lease in leases :
+ #current_nodes_reserved_by_start_time[lease['t_from']] = \
+ #lease['reserved_nodes']
+ #leases_by_start_time[lease['t_from']] = lease
+
+
+ ##Requested jobs
+ #for start_time in requested_jobs_dict:
+ #requested_nodes_by_start_time[int(start_time)] = \
+ #requested_jobs_dict[start_time]['hostname']
+ ##Check if there is any difference between the leases already
+ ##registered in OAR and the requested jobs.
+ ##Difference could be:
+ ##-Lease deleted in the requested jobs
+ ##-Added/removed nodes
+ ##-Newly added lease
+
+
+ ##Find all deleted leases
+ #start_time_list = \
+ #list(set(leases_by_start_time.keys()).\
+ #difference(requested_nodes_by_start_time.keys()))
+ #deleted_leases = [leases_by_start_time[start_time]['lease_id'] \
+ #for start_time in start_time_list]
+
+
+
+ ##Find added or removed nodes in exisiting leases
+ #for start_time in requested_nodes_by_start_time:
+ #if start_time in current_nodes_reserved_by_start_time:
+
+ #if requested_nodes_by_start_time[start_time] == \
+ #current_nodes_reserved_by_start_time[start_time]:
+ #continue
+
+ #else:
+ #update_node_set = \
+ #set(requested_nodes_by_start_time[start_time])
+ #added_nodes = \
+ #update_node_set.difference(\
+ #current_nodes_reserved_by_start_time[start_time])
+ #shared_nodes = \
+ #update_node_set.intersection(\
+ #current_nodes_reserved_by_start_time[start_time])
+ #old_nodes_set = \
+ #set(\
+ #current_nodes_reserved_by_start_time[start_time])
+ #removed_nodes = \
+ #old_nodes_set.difference(\
+ #requested_nodes_by_start_time[start_time])
+ #logger.debug("SLABSLICES verify_slice_leases \
+ #shared_nodes %s added_nodes %s removed_nodes %s"\
+ #%(shared_nodes, added_nodes,removed_nodes ))
+ ##If the lease is modified, delete it before
+ ##creating it again.
+ ##Add the deleted lease job id in the list
+ ##WARNING :rescheduling does not work if there is already
+ ## 2 running/scheduled jobs because deleting a job
+ ##takes time SA 18/10/2012
+ #if added_nodes or removed_nodes:
+ #deleted_leases.append(\
+ #leases_by_start_time[start_time]['lease_id'])
+ ##Reschedule the job
+ #if added_nodes or shared_nodes:
+ #reschedule_jobs_dict[str(start_time)] = \
+ #requested_jobs_dict[str(start_time)]
+
+ #else:
+ ##New lease
+ #job = requested_jobs_dict[str(start_time)]
+ #self.driver.AddLeases(job['hostname'], \
+ #sfa_slice, int(job['start_time']), \
+ #int(job['duration']))
+
+ ##Deleted leases are the ones with lease id not declared in the Rspec
+ #if deleted_leases:
+ #self.driver.DeleteLeases(deleted_leases, sfa_slice['slice_hrn'])
+ #logger.debug("SLABSLICES \
+ #verify_slice_leases slice %s deleted_leases %s"\
+ #%(sfa_slice, deleted_leases))
+
+
+ #if reschedule_jobs_dict :
+ #for start_time in reschedule :
+ #job = reschedule_jobs_dict[start_time]
+ #self.driver.AddLeases(job['hostname'], \
+ #sfa_slice, int(job['start_time']), \
+ #int(job['duration']))
+ #return leases
+
+
def verify_slice_leases(self, sfa_slice, requested_jobs_dict, peer):
logger.debug("SLABSLICES verify_slice_leases requested_jobs_dict %s \
leases %s "%(requested_jobs_dict, leases ))
+ current_nodes_reserved_by_start_time = {}
+ requested_nodes_by_start_time = {}
+ leases_by_start_time = {}
+ reschedule_jobs_dict = {}
+
+
+ #Create reduced dictionary with key start_time and value
+ # the list of nodes
+ #-for the leases already registered by OAR first
+ # then for the new leases requested by the user
- if leases :
- current_nodes_reserved_by_start_time = {}
- requested_nodes_by_start_time = {}
- leases_by_start_time = {}
- #Create reduced dictionary with key start_time and value
- # the list of nodes
- #-for the leases already registered by OAR first
- # then for the new leases requested by the user
+ #Leases already scheduled/running in OAR
+ for lease in leases :
+ current_nodes_reserved_by_start_time[lease['t_from']] = \
+ lease['reserved_nodes']
+ leases_by_start_time[lease['t_from']] = lease
- #Leases already scheduled/running in OAR
- for lease in leases :
- current_nodes_reserved_by_start_time[lease['t_from']] = \
- lease['reserved_nodes']
- leases_by_start_time[lease['t_from']] = lease
-
- #Requested jobs
- for start_time in requested_jobs_dict:
- requested_nodes_by_start_time[int(start_time)] = \
- requested_jobs_dict[start_time]['hostname']
-
- #Check if there is any difference between the leases already
- #registered in OAR and the requested jobs.
- #Difference could be:
- #-Lease deleted in the requested jobs
- #-Added/removed nodes
- #-Newly added lease
+
+ #Requested jobs
+ for start_time in requested_jobs_dict:
+ requested_nodes_by_start_time[int(start_time)] = \
+ requested_jobs_dict[start_time]['hostname']
+ #Check if there is any difference between the leases already
+ #registered in OAR and the requested jobs.
+ #Difference could be:
+ #-Lease deleted in the requested jobs
+ #-Added/removed nodes
+ #-Newly added lease
+
+ logger.debug("SLABSLICES verify_slice_leases requested_nodes_by_start_time %s \
+ "%(requested_nodes_by_start_time ))
+ #Find all deleted leases
+ start_time_list = \
+ list(set(leases_by_start_time.keys()).\
+ difference(requested_nodes_by_start_time.keys()))
+ deleted_leases = [leases_by_start_time[start_time]['lease_id'] \
+ for start_time in start_time_list]
+
- #Find all deleted leases
- start_time_list = \
- list(set(leases_by_start_time.keys()).\
- difference(requested_nodes_by_start_time.keys()))
- deleted_leases = [leases_by_start_time[start_time]['lease_id'] \
- for start_time in start_time_list]
+ #Find added or removed nodes in exisiting leases
+ for start_time in requested_nodes_by_start_time:
+ logger.debug("SLABSLICES verify_slice_leases start_time %s \
+ "%( start_time))
+ if start_time in current_nodes_reserved_by_start_time:
+
+ if requested_nodes_by_start_time[start_time] == \
+ current_nodes_reserved_by_start_time[start_time]:
+ continue
+
+ else:
+ update_node_set = \
+ set(requested_nodes_by_start_time[start_time])
+ added_nodes = \
+ update_node_set.difference(\
+ current_nodes_reserved_by_start_time[start_time])
+ shared_nodes = \
+ update_node_set.intersection(\
+ current_nodes_reserved_by_start_time[start_time])
+ old_nodes_set = \
+ set(\
+ current_nodes_reserved_by_start_time[start_time])
+ removed_nodes = \
+ old_nodes_set.difference(\
+ requested_nodes_by_start_time[start_time])
+ logger.debug("SLABSLICES verify_slice_leases \
+ shared_nodes %s added_nodes %s removed_nodes %s"\
+ %(shared_nodes, added_nodes,removed_nodes ))
+ #If the lease is modified, delete it before
+ #creating it again.
+ #Add the deleted lease job id in the list
+ #WARNING :rescheduling does not work if there is already
+ # 2 running/scheduled jobs because deleting a job
+ #takes time SA 18/10/2012
+ if added_nodes or removed_nodes:
+ deleted_leases.append(\
+ leases_by_start_time[start_time]['lease_id'])
+ #Reschedule the job
+ if added_nodes or shared_nodes:
+ reschedule_jobs_dict[str(start_time)] = \
+ requested_jobs_dict[str(start_time)]
-
- reschedule_jobs_dict = {}
- #Find added or removed nodes in exisiting leases
- for start_time in requested_nodes_by_start_time:
- if start_time in current_nodes_reserved_by_start_time:
-
- if requested_nodes_by_start_time[start_time] == \
- current_nodes_reserved_by_start_time[start_time]:
- continue
+ else:
+ #New lease
- else:
- update_node_set = \
- set(requested_nodes_by_start_time[start_time])
- added_nodes = \
- update_node_set.difference(\
- current_nodes_reserved_by_start_time[start_time])
- shared_nodes = \
- update_node_set.intersection(\
- current_nodes_reserved_by_start_time[start_time])
- old_nodes_set = \
- set(\
- current_nodes_reserved_by_start_time[start_time])
- removed_nodes = \
- old_nodes_set.difference(\
- requested_nodes_by_start_time[start_time])
- logger.debug("SLABSLICES verify_slice_leases \
- shared_nodes %s added_nodes %s removed_nodes %s"\
- %(shared_nodes, added_nodes,removed_nodes ))
- #If the lease is modified, delete it before
- #creating it again.
- #Add the deleted lease job id in the list
- #WARNING :rescheduling does not work if there is already
- # 2 running/scheduled jobs because deleting a job
- #takes time SA 18/10/2012
- if added_nodes or removed_nodes:
- deleted_leases.append(\
- leases_by_start_time[start_time]['lease_id'])
- #Reschedule the job
- if added_nodes or shared_nodes:
- reschedule_jobs_dict[str(start_time)] = \
- requested_jobs_dict[str(start_time)]
-
- else:
- #New lease
- job = requested_jobs_dict[str(start_time)]
- self.driver.AddLeases(job['hostname'], \
- sfa_slice, int(job['start_time']), \
- int(job['duration']))
-
- #Deleted leases are the ones with lease id not declared in the Rspec
- if deleted_leases:
- self.driver.DeleteLeases(deleted_leases, sfa_slice['slice_hrn'])
- logger.debug("SLABSLICES \
- verify_slice_leases slice %s deleted_leases %s"\
- %(sfa_slice, deleted_leases))
-
-
- if reschedule_jobs_dict :
- for start_time in reschedule :
- job = reschedule_jobs_dict[start_time]
+ job = requested_jobs_dict[str(start_time)]
+ logger.debug("SLABSLICES \
+ NEWLEASE slice %s job %s"\
+ %(sfa_slice, job))
self.driver.AddLeases(job['hostname'], \
- sfa_slice, int(job['start_time']), \
- int(job['duration']))
+ sfa_slice, int(job['start_time']), \
+ int(job['duration']))
+
+ #Deleted leases are the ones with lease id not declared in the Rspec
+ if deleted_leases:
+ self.driver.DeleteLeases(deleted_leases, sfa_slice['slice_hrn'])
+ logger.debug("SLABSLICES \
+ verify_slice_leases slice %s deleted_leases %s"\
+ %(sfa_slice, deleted_leases))
+
+
+ if reschedule_jobs_dict :
+ for start_time in reschedule :
+ job = reschedule_jobs_dict[start_time]
+ self.driver.AddLeases(job['hostname'], \
+ sfa_slice, int(job['start_time']), \
+ int(job['duration']))
return leases
def verify_slice_nodes(self, sfa_slice, requested_slivers, peer):