import sys
import subprocess
-import datetime
-from time import gmtime, strftime
+
+from datetime import datetime
+from dateutil import tz
+from time import strftime,gmtime
from sfa.util.faults import MissingSfaInfo , SliverDoesNotExist
from sfa.util.sfalogging import logger
from sfa.storage.alchemy import dbsession
from sfa.storage.model import RegRecord
-
-from sfa.trust.certificate import *
-from sfa.trust.credential import *
+from sfa.trust.credential import Credential
from sfa.trust.gid import GID
from sfa.managers.driver import Driver
# is taken care of
# SlabDriver should be really only about talking to the senslab testbed
-## thierry : please avoid wildcard imports :)
+
from sfa.senslab.OARrestapi import OARrestapi
from sfa.senslab.LDAPapi import LDAPapi
-from sfa.senslab.parsing import parse_filter
from sfa.senslab.slabpostgres import SlabDB, slab_dbsession,SliceSenslab
from sfa.senslab.slabaggregate import SlabAggregate
from sfa.senslab.slabslices import SlabSlices
-def list_to_dict(recs, key):
- """
- convert a list of dictionaries into a dictionary keyed on the
- specified dictionary key
- """
- # print>>sys.stderr, " \r\n \t\t 1list_to_dict : rec %s \r\n \t\t list_to_dict key %s" %(recs,key)
- keys = [rec[key] for rec in recs]
- #print>>sys.stderr, " \r\n \t\t list_to_dict : rec %s \r\n \t\t list_to_dict keys %s" %(recs,keys)
- return dict(zip(keys, recs))
+
# thierry : note
# this inheritance scheme is so that the driver object can receive
self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
-
- print >>sys.stderr, "\r\n_____________ SFA SENSLAB DRIVER \r\n"
- # thierry - just to not break the rest of this code
-
-
- #self.oar = OARapi()
self.oar = OARrestapi()
self.ldap = LDAPapi()
- #self.users = SenslabImportUsers()
self.time_format = "%Y-%m-%d %H:%M:%S"
self.db = SlabDB(config)
- #self.logger=sfa_logger()
self.cache=None
-
+
def sliver_status(self,slice_urn,slice_hrn):
- # receive a status request for slice named urn/hrn urn:publicid:IDN+senslab+nturro_slice hrn senslab.nturro_slice
- # shall return a structure as described in
- # http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
- # NT : not sure if we should implement this or not, but used by sface.
+ """Receive a status request for slice named urn/hrn
+ urn:publicid:IDN+senslab+nturro_slice hrn senslab.nturro_slice
+ shall return a structure as described in
+ http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
+ NT : not sure if we should implement this or not, but used by sface.
-
+ """
+
+ #First get the slice with the slice hrn
sl = self.GetSlices(slice_filter= slice_hrn, filter_type = 'slice_hrn')
- if len(slices) is 0:
+ if len(sl) is 0:
raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
- #sl = slices[0]
- print >>sys.stderr, "\r\n \r\n_____________ Sliver status urn %s hrn %s slices %s \r\n " %(slice_urn,slice_hrn,slices)
+
+ nodes_in_slice = sl['node_ids']
+ if len(nodes_in_slice) is 0:
+ raise SliverDoesNotExist("No slivers allocated ")
+
+ logger.debug("Slabdriver - sliver_status Sliver status urn %s hrn %s sl\
+ %s \r\n " %(slice_urn,slice_hrn,sl) )
+
if sl['oar_job_id'] is not -1:
-
- # report about the local nodes only
- nodes = self.GetNodes({'hostname':sl['node_ids']},
- ['node_id', 'hostname','name','boot_state'])
- if len(nodes) is 0:
- raise SliverDoesNotExist("No slivers allocated ")
-
-
- site_logins = [node['name'] for node in nodes]
-
+ #A job is running on Senslab for this slice
+ # report about the local nodes that are in the slice only
+
+ nodes_all = self.GetNodes({'hostname':nodes_in_slice},
+ ['node_id', 'hostname','site','boot_state'])
+ nodeall_byhostname = dict([(n['hostname'], n) for n in nodes_all])
+
+
result = {}
top_level_status = 'unknown'
if nodes:
top_level_status = 'ready'
result['geni_urn'] = slice_urn
- result['slab_login'] = sl['job_user']
+ result['pl_login'] = sl['job_user'] #For compatibility
+
- timestamp = float(sl['startTime']) + float(sl['walltime'])
- result['slab_expires'] = strftime(self.time_format, gmtime(float(timestamp)))
+ timestamp = float(sl['startTime']) + float(sl['walltime'])
+ result['pl_expires'] = strftime(self.time_format, \
+ gmtime(float(timestamp)))
+ #result['slab_expires'] = strftime(self.time_format,\
+ #gmtime(float(timestamp)))
resources = []
for node in nodes:
res = {}
- res['slab_hostname'] = node['hostname']
- res['slab_boot_state'] = node['boot_state']
+ #res['slab_hostname'] = node['hostname']
+ #res['slab_boot_state'] = node['boot_state']
- sliver_id = urn_to_sliver_id(slice_urn, sl['record_id_slice'], node['node_id'])
- res['geni_urn'] = sliver_id
- if node['boot_state'] == 'Alive':
+ res['pl_hostname'] = nodeall_byhostname[node]['hostname']
+ res['pl_boot_state'] = nodeall_byhostname[node]['boot_state']
+ res['pl_last_contact'] = strftime(self.time_format, \
+ gmtime(float(timestamp)))
+ sliver_id = urn_to_sliver_id(slice_urn, sl['record_id_slice'], \
+ nodeall_byhostname[node]['node_id'])
+ res['geni_urn'] = sliver_id
+ if nodeall_byhostname[node]['boot_state'] == 'Alive':
+
res['geni_status'] = 'ready'
else:
res['geni_status'] = 'failed'
def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
+ print>>sys.stderr, "\r\n \r\n \t=============================== SLABDRIVER.PY create_sliver "
aggregate = SlabAggregate(self)
-
+
slices = SlabSlices(self)
peer = slices.get_peer(slice_hrn)
sfa_peer = slices.get_sfa_peer(slice_hrn)
slice_record=None
-
-
+
if not isinstance(creds, list):
creds = [creds]
-
-
+
if users:
slice_record = users[0].get('slice_record', {})
# parse rspec
rspec = RSpec(rspec_string)
- requested_attributes = rspec.version.get_slice_attributes()
+ print>>sys.stderr, "\r\n \r\n \t=============================== SLABDRIVER.PY create_sliver ============================rspec.version %s " %(rspec.version)
+
- # ensure site record exists
- #site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer, options=options)
+ # ensure site record exists?
# ensure slice record exists
slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer, options=options)
+ requested_attributes = rspec.version.get_slice_attributes()
+
+ if requested_attributes:
+ for attrib_dict in requested_attributes:
+ if 'timeslot' in attrib_dict and attrib_dict['timeslot'] is not None:
+ slice.update({'timeslot':attrib_dict['timeslot']})
+ print >>sys.stderr, "\r\n \r\n \t=============================== SLABDRIVER.PY create_sliver ..... slice %s " %(slice)
# ensure person records exists
persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer, options=options)
- # ensure slice attributes exists
- #slices.verify_slice_attributes(slice, requested_attributes, options=options)
+ # ensure slice attributes exists?
+
- # add/remove slice from nodes
+ # add/remove slice from nodes
+ print >>sys.stderr, "\r\n \r\n \t=============================== SLABDRIVER.PY create_sliver ..... "
+
requested_slivers = [node.get('component_name') for node in rspec.version.get_nodes_with_slivers()]
+ print >>sys.stderr, "\r\n \r\n \t=============================== ........... requested_slivers ============================requested_slivers %s " %(requested_slivers)
nodes = slices.verify_slice_nodes(slice, requested_slivers, peer)
-
-
- # handle MyPLC peer association.
- # only used by plc and ple.
- #slices.handle_peer(site, slice, persons, peer)
return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
def delete_sliver (self, slice_urn, slice_hrn, creds, options):
- slices = self.GetSlices(slice_filter= slice_hrn, filter_type = 'slice_hrn')
- if not slices:
+ slice = self.GetSlices(slice_filter= slice_hrn, filter_type = 'slice_hrn')
+ print>>sys.stderr, "\r\n \r\n \t\t SLABDRIVER.PY delete_sliver slice %s" %(slice)
+ if not slice:
return 1
- slice = slices[0]
-
+
+ slices = SlabSlices(self)
# determine if this is a peer slice
# xxx I wonder if this would not need to use PlSlices.get_peer instead
# in which case plc.peers could be deprecated as this here
# is the only/last call to this last method in plc.peers
- peer = peers.get_peer(self, slice_hrn)
+ peer = slices.get_peer(slice_hrn)
try:
if peer:
- self.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
- self.DeleteSliceFromNodes(slice_hrn, slice['node_ids'])
+ self.UnBindObjectFromPeer('slice', slice['record_id_slice'], peer)
+ self.DeleteSliceFromNodes(slice)
finally:
if peer:
self.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
return 1
-
-
+ def AddSlice(self, slice_record):
+ slab_slice = SliceSenslab( slice_hrn = slice_record['slice_hrn'], record_id_slice= slice_record['record_id_slice'] , record_id_user= slice_record['record_id_user'], peer_authority = slice_record['peer_authority'])
+ print>>sys.stderr, "\r\n \r\n \t\t\t =======SLABDRIVER.PY AddSlice slice_record %s slab_slice %s" %(slice_record,slab_slice)
+ slab_dbsession.add(slab_slice)
+ slab_dbsession.commit()
+ return
+
# first 2 args are None in case of resource discovery
def list_resources (self, slice_urn, slice_hrn, creds, options):
#cached_requested = options.get('cached', True)
#return rspec
#panos: passing user-defined options
- #print "manager options = ",options
+
aggregate = SlabAggregate(self)
origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
- print>>sys.stderr, " \r\n \r\n \t SLABDRIVER get_rspec origin_hrn %s" %(origin_hrn)
options.update({'origin_hrn':origin_hrn})
- print>>sys.stderr, " \r\n \r\n \t SLABDRIVER get_rspec options %s" %(options)
rspec = aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version,
options=options)
-
+ print>>sys.stderr, " \r\n \r\n \t SLABDRIVER list_resources rspec "
# cache the result
#if self.cache and not slice_hrn:
#logger.debug("Slab.ListResources: stores advertisement in cache")
type = sfa_record['type']
slab_record = self.sfa_fields_to_slab_fields(type, hrn, sfa_record)
- #if type == 'authority':
- #sites = self.shell.GetSites([slab_record['login_base']])
- #if not sites:
- #pointer = self.shell.AddSite(slab_record)
- #else:
- #pointer = sites[0]['site_id']
-
+
if type == 'slice':
acceptable_fields=['url', 'instantiation', 'name', 'description']
for key in slab_record.keys():
self.AddPersonKey(pointer, {'key_type' : 'ssh', 'key' : pub_key})
#No node adding outside OAR
- #elif type == 'node':
- #login_base = hrn_to_slab_login_base(sfa_record['authority'])
- #nodes = self.GetNodes([slab_record['hostname']])
- #if not nodes:
- #pointer = self.AddNode(login_base, slab_record)
- #else:
- #pointer = nodes[0]['node_id']
-
+
return pointer
#No site or node record update allowed
key_exists = True
if not key_exists:
self.AddPersonKey(pointer, {'key_type': 'ssh', 'key': new_key})
-
- #elif type == "node":
- #self.UpdateNode(pointer, new_sfa_record)
+
return True
return True
- def GetPeers (self,auth = None, peer_filter=None, return_fields=None):
+ def GetPeers (self,auth = None, peer_filter=None, return_fields_list=None):
existing_records = {}
existing_hrns_by_types= {}
- print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers auth = %s, peer_filter %s, return_field %s " %(auth , peer_filter, return_fields)
+ print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers auth = %s, peer_filter %s, return_field %s " %(auth , peer_filter, return_fields_list)
all_records = dbsession.query(RegRecord).filter(RegRecord.type.like('%authority%')).all()
for record in all_records:
- existing_records[record.hrn] = record
+ existing_records[(record.hrn,record.type)] = record
if record.type not in existing_hrns_by_types:
existing_hrns_by_types[record.type] = [record.hrn]
print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers \t NOT IN existing_hrns_by_types %s " %( existing_hrns_by_types)
else:
print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers \t INNN type %s hrn %s " %( record.type,record.hrn )
- existing_hrns_by_types.update({record.type:(existing_hrns_by_types[record.type].append(record.hrn))})
+ existing_hrns_by_types[record.type].append(record.hrn)
+ print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers \t INNN existing_hrns_by_types %s " %( existing_hrns_by_types)
+ #existing_hrns_by_types.update({record.type:(existing_hrns_by_types[record.type].append(record.hrn))})
print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers existing_hrns_by_types %s " %( existing_hrns_by_types)
records_list= []
- try:
- for hrn in existing_hrns_by_types['authority+sa']:
- records_list.append(existing_records[hrn])
- print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers records_list %s " %(records_list)
+ try:
+ print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers existing_hrns_by_types['authority+sa'] %s \t\t existing_records %s " %(existing_hrns_by_types['authority'],existing_records)
+ if peer_filter:
+ records_list.append(existing_records[(peer_filter,'authority')])
+ else :
+ for hrn in existing_hrns_by_types['authority']:
+ records_list.append(existing_records[(hrn,'authority')])
+
+ print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers records_list %s " %(records_list)
except:
pass
-
- if not peer_filter and not return_fields:
+
+ return_records = records_list
+ if not peer_filter and not return_fields_list:
return records_list
- return_records = parse_filter(records_list,peer_filter, 'peers', return_fields)
-
+
+
+ print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers return_records %s " %(return_records)
return return_records
-
- def GetPersons(self, person_filter=None, return_fields=None):
-
- person_list = self.ldap.ldapFind({'authority': self.root_auth })
-
- #check = False
- #if person_filter and isinstance(person_filter, dict):
- #for k in person_filter.keys():
- #if k in person_list[0].keys():
- #check = True
+ #TODO : Handling OR request in make_ldap_filters_from_records instead of the for loop
+ #over the records' list
+ def GetPersons(self, person_filter=None, return_fields_list=None):
+ """
+ person_filter should be a list of dictionnaries when not set to None.
+ Returns a list of users found.
+
+ """
+ print>>sys.stderr, "\r\n \r\n \t\t\t GetPersons person_filter %s" %(person_filter)
+ person_list = []
+ if person_filter and isinstance(person_filter,list):
+ #If we are looking for a list of users (list of dict records)
+ #Usually the list contains only one user record
+ for f in person_filter:
+ person = self.ldap.LdapFindUser(f)
+ person_list.append(person)
+
+ else:
+ person_list = self.ldap.LdapFindUser()
- return_person_list = parse_filter(person_list,person_filter ,'persons', return_fields)
- if return_person_list:
- print>>sys.stderr, " \r\n GetPersons person_filter %s return_fields %s " %(person_filter,return_fields)
- return return_person_list
+ return person_list
+
def GetTimezone(self):
- time = self.oar.parser.SendRequest("GET_timezone")
- return time
+ server_timestamp,server_tz = self.oar.parser.SendRequest("GET_timezone")
+ return server_timestamp,server_tz
- def DeleteJobs(self, job_id, username):
+ def DeleteJobs(self, job_id, slice_hrn):
if not job_id:
return
+ username = slice_hrn.split(".")[-1].rstrip("_slice")
reqdict = {}
reqdict['method'] = "delete"
reqdict['strval'] = str(job_id)
print>>sys.stderr, "\r\n \r\n jobid DeleteJobs %s " %(answer)
- def GetJobs(self,job_id= None, resources=True,return_fields=None, username = None):
+ def GetJobs(self,job_id= None, resources=True,return_fields_list=None, username = None):
#job_resources=['reserved_resources', 'assigned_resources','job_id', 'job_uri', 'assigned_nodes',\
#'api_timestamp']
#assigned_res = ['resource_id', 'resource_uri']
#assigned_n = ['node', 'node_uri']
-
if job_id and resources is False:
req = "GET_jobs_id"
if job_id and resources :
req = "GET_jobs_id_resources"
node_list_k = 'reserved_resources'
-
-
#Get job info from OAR
job_info = self.oar.parser.SendRequest(req, job_id, username)
for node in node_list:
node_hostname_list.append(node['hostname'])
node_dict = dict(zip(node_hostname_list,node_list))
-
- #print>>sys.stderr, "\r\n \r\n \r\n \r\n \r\n \t\t GetJobs GetNODES %s " %(node_list)
try :
-
- #for n in job_info[node_list]:
- #n = str(self.root_auth) + str(n)
-
liste =job_info[node_list_k]
- print>>sys.stderr, "\r\n \r\n \t\t GetJobs resources job_info liste%s" %(liste)
for k in range(len(liste)):
job_info[node_list_k][k] = node_dict[job_info[node_list_k][k]]['hostname']
- print>>sys.stderr, "\r\n \r\n \t\t YYYYYYYYYYYYGetJobs resources job_info %s" %(job_info)
+ #Replaces the previous entry "assigned_network_address" / "reserved_resources"
+ #with "node_ids"
job_info.update({'node_ids':job_info[node_list_k]})
del job_info[node_list_k]
return job_info
except KeyError:
print>>sys.stderr, "\r\n \r\n \t\t GetJobs KEYERROR "
-
-
-
-
+ def GetReservedNodes(self):
+ # this function returns a list of all the nodes already involved in an oar job
+ #jobs=self.oar.parser.SendRequest("GET_reserved_nodes")
+ jobs=self.oar.parser.SendRequest("GET_jobs_details")
+ nodes=[]
+ for j in jobs :
+ nodes=j['assigned_network_address']+nodes
+ return nodes
- def GetNodes(self,node_filter= None, return_fields=None):
-
- node_dict =self.oar.parser.SendRequest("GET_resources_full")
- print>>sys.stderr, "\r\n \r\n \t\t SLABDRIVER.PY GetNodes "
+ def GetNodes(self,node_filter_dict = None, return_fields_list = None):
+ """
+ node_filter_dict : dictionnary of lists
+
+ """
+ node_dict_by_id = self.oar.parser.SendRequest("GET_resources_full")
+ node_dict_list = node_dict_by_id.values()
+
+ #No filtering needed return the list directly
+ if not (node_filter_dict or return_fields_list):
+ return node_dict_list
+
return_node_list = []
- if not (node_filter or return_fields):
- return_node_list = node_dict.values()
- return return_node_list
-
- return_node_list= parse_filter(node_dict.values(),node_filter ,'node', return_fields)
+ if node_filter_dict:
+ for filter_key in node_filter_dict:
+ try:
+ #Filter the node_dict_list by each value contained in the
+ #list node_filter_dict[filter_key]
+ for value in node_filter_dict[filter_key]:
+ for node in node_dict_list:
+ if node[filter_key] == value:
+ if return_fields_list :
+ tmp = {}
+ for k in return_fields_list:
+ tmp[k] = node[k]
+ return_node_list.append(tmp)
+ else:
+ return_node_list.append(node)
+ except KeyError:
+ logger.log_exc("GetNodes KeyError")
+ return
+
+
return return_node_list
- def GetSites(self, site_filter = None, return_fields=None):
- site_dict =self.oar.parser.SendRequest("GET_sites")
- print>>sys.stderr, "\r\n \r\n \t\t SLABDRIVER.PY GetSites "
+ def GetSites(self, site_filter_name = None, return_fields_list = None):
+ site_dict = self.oar.parser.SendRequest("GET_sites")
+ #site_dict : dict where the key is the sit ename
return_site_list = []
- if not ( site_filter or return_fields):
+ if not ( site_filter_name or return_fields_list):
return_site_list = site_dict.values()
return return_site_list
-
- return_site_list = parse_filter(site_dict.values(), site_filter,'site', return_fields)
+
+ if site_filter_name in site_dict:
+ if return_fields_list:
+ for field in return_fields_list:
+ tmp = {}
+ Create
+ try:
+ tmp[field] = site_dict[site_filter_name][field]
+ except KeyError:
+ logger.error("GetSites KeyError %s "%(field))
+ return None
+ return_site_list.append(tmp)
+ else:
+ return_site_list.append( site_dict[site_filter_name])
+
+
return return_site_list
- def GetSlices(self,slice_filter = None, filter_type = None, return_fields=None):
+ def GetSlices(self,slice_filter = None, filter_type = None, return_fields_list=None):
return_slice_list = []
slicerec = {}
+ rec = {}
ftypes = ['slice_hrn', 'record_id_user']
if filter_type and filter_type in ftypes:
if filter_type == 'slice_hrn':
slicerec = slab_dbsession.query(SliceSenslab).filter_by(slice_hrn = slice_filter).first()
if filter_type == 'record_id_user':
slicerec = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = slice_filter).first()
+
if slicerec:
rec = slicerec.dumpquerytodict()
login = slicerec.slice_hrn.split(".")[1].split("_")[0]
- print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY slicerec GetSlices %s " %(slicerec)
+ #print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY slicerec GetSlices %s " %(slicerec)
if slicerec.oar_job_id is not -1:
rslt = self.GetJobs( slicerec.oar_job_id, resources=False, username = login )
- print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY GetSlices GetJobs %s " %(rslt)
+ #print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY GetSlices GetJobs %s " %(rslt)
if rslt :
rec.update(rslt)
rec.update({'hrn':str(rec['slice_hrn'])})
#If GetJobs is empty, this means the job is now in the 'Terminated' state
#Update the slice record
else :
- self.db.update_job(slice_filter, job_id = '-1')
- rec['oar_job_id'] = '-1'
+ self.db.update_job(slice_filter, job_id = -1)
+ rec['oar_job_id'] = -1
rec.update({'hrn':str(rec['slice_hrn'])})
- print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY GetSlices rec %s" %(rec)
+ try:
+ rec['node_ids'] = rec['node_list']
+ except KeyError:
+ pass
+
+ #print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY GetSlices rec %s" %(rec)
+
return rec
print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY GetSlices slices %s slice_filter %s " %(return_slice_list,slice_filter)
- #if return_fields:
- #return_slice_list = parse_filter(sliceslist, slice_filter,'slice', return_fields)
+ #if return_fields_list:
+ #return_slice_list = parse_filter(sliceslist, slice_filter,'slice', return_fields_list)
return slab_record
-
-
-
- def AddSliceToNodes(self, slice_name, added_nodes, slice_user=None):
+
+ def LaunchExperimentOnOAR(self, slice_dict, added_nodes, slice_user=None):
site_list = []
nodeid_list =[]
resource = ""
reqdict = {}
+ slice_name = slice_dict['name']
+ try:
+ slot = slice_dict['timeslot']
+ print>>sys.stderr, "\r\n \r\n \t\tLaunchExperimentOnOAR slot %s " %(slot)
+ except KeyError:
+ #Running on default parameters
+ #XP immediate , 10 mins
+ slot = {'date':None,'start_time':None, 'timezone':None,'duration':None }#10 min
+
+
reqdict['property'] ="network_address in ("
for node in added_nodes:
#Get the ID of the node : remove the root auth and put the site in a separate list
reqdict['property'] += "'"+ nodeid +"', "
nodeid_list.append(nodeid)
#site_list.append( l[0] )
+
+
reqdict['property'] = reqdict['property'][0: len( reqdict['property'])-2] +")"
reqdict['resource'] ="network_address="+ str(len(nodeid_list))
- reqdict['resource']+= ",walltime=" + str(00) + ":" + str(12) + ":" + str(20) #+2 min 20
- reqdict['script_path'] = "/bin/sleep 620" #+20 sec
+
+ if slot['duration']:
+ walltime = slot['duration'].split(":")
+ # Fixing the walltime by adding a few delays. First put the walltime in seconds
+ # oarAdditionalDelay = 20; additional delay for /bin/sleep command to
+ # take in account prologue and epilogue scripts execution
+ # int walltimeAdditionalDelay = 120; additional delay
+
+ desired_walltime = int(walltime[0])*3600 + int(walltime[1]) * 60 + int(walltime[2])
+ total_walltime = desired_walltime + 140 #+2 min 20
+ sleep_walltime = desired_walltime + 20 #+20 sec
+ print>>sys.stderr, "\r\n \r\n \t\tLaunchExperimentOnOAR desired_walltime %s total_walltime %s sleep_walltime %s " %(desired_walltime,total_walltime,sleep_walltime)
+ #Put the walltime back in str form
+ #First get the hours
+ walltime[0] = str(total_walltime / 3600)
+ total_walltime = total_walltime - 3600 * int(walltime[0])
+ #Get the remaining minutes
+ walltime[1] = str(total_walltime / 60)
+ total_walltime = total_walltime - 60 * int(walltime[1])
+ #Get the seconds
+ walltime[2] = str(total_walltime)
+ print>>sys.stderr, "\r\n \r\n \t\tLaunchExperimentOnOAR walltime %s " %(walltime)
+
+ reqdict['resource']+= ",walltime=" + str(walltime[0]) + ":" + str(walltime[1]) + ":" + str(walltime[2])
+ reqdict['script_path'] = "/bin/sleep " + str(sleep_walltime)
+ else:
+ reqdict['resource']+= ",walltime=" + str(00) + ":" + str(12) + ":" + str(20) #+2 min 20
+ reqdict['script_path'] = "/bin/sleep 620" #+20 sec
+ #In case of a scheduled experiment (not immediate)
+ #To run an XP immediately, don't specify date and time in RSpec
+ #They will be set to None.
+ if slot['date'] and slot['start_time']:
+ if slot['timezone'] is '' or slot['timezone'] is None:
+ #assume it is server timezone
+ server_timestamp,server_tz = self.GetTimezone()
+ from_zone=tz.gettz(server_tz)
+ print>>sys.stderr, "\r\n \r\n \t\tLaunchExperimentOnOAR timezone not specified server_tz %s from_zone %s" %(server_tz,from_zone)
+ else:
+ #Get zone of the user from the reservation time given in the rspec
+ from_zone = tz.gettz(slot['timezone'])
+
+ date = str(slot['date']) + " " + str(slot['start_time'])
+ user_datetime = datetime.datetime.strptime(date, self.time_format)
+ user_datetime = user_datetime.replace(tzinfo = from_zone)
+
+ #Convert to UTC zone
+ to_zone = tz.tzutc()
+ utc_date = user_datetime.astimezone(to_zone)
+ #Readable time accpeted by OAR
+ reqdict['reservation']= utc_date.strftime(self.time_format)
+
+ print>>sys.stderr, "\r\n \r\n \t\tLaunchExperimentOnOAR reqdict['reservation'] %s " %(reqdict['reservation'])
+
+ else:
+ # Immediate XP
+ # reservations are performed in the oar server timebase, so :
+ # 1- we get the server time(in UTC tz )/server timezone
+ # 2- convert the server UTC time in its timezone
+ # 3- add a custom delay to this time
+ # 4- convert this time to a readable form and it for the reservation request.
+ server_timestamp,server_tz = self.GetTimezone()
+ s_tz=tz.gettz(server_tz)
+ UTC_zone = tz.gettz("UTC")
+ #weird... datetime.fromtimestamp should work since we do from datetime import datetime
+ utc_server= datetime.datetime.fromtimestamp(float(server_timestamp)+20,UTC_zone)
+ server_localtime=utc_server.astimezone(s_tz)
+
+ print>>sys.stderr, "\r\n \r\n \t\tLaunchExperimentOnOAR server_timestamp %s server_tz %s slice_name %s added_nodes %s username %s reqdict %s " %(server_timestamp,server_tz,slice_name,added_nodes,slice_user, reqdict )
+ readable_time = server_localtime.strftime(self.time_format)
+
+ print >>sys.stderr," \r\n \r\n \t\t\t\tAPRES ParseTimezone readable_time %s timestanp %s " %(readable_time ,server_timestamp)
+ reqdict['reservation'] = readable_time
+
+
reqdict['type'] = "deploy"
reqdict['directory']= ""
reqdict['name']= "TestSandrine"
- timestamp = self.GetTimezone()
- print>>sys.stderr, "\r\n \r\n AddSliceToNodes slice_name %s added_nodes %s username %s reqdict %s " %(slice_name,added_nodes,slice_user, reqdict)
- readable_time = strftime(self.time_format, gmtime(float(timestamp)))
- print >>sys.stderr," \r\n \r\n \t\t\t\t AVANT ParseTimezone readable_time %s timestanp %s " %(readable_time, timestamp )
- timestamp = timestamp+ 3620 #Add 3 min to server time
- readable_time = strftime(self.time_format, gmtime(float(timestamp)))
-
- print >>sys.stderr," \r\n \r\n \t\t\t\tAPRES ParseTimezone readable_time %s timestanp %s " %(readable_time , timestamp)
- reqdict['reservation'] = readable_time
+
- # first step : start the OAR job
- print>>sys.stderr, "\r\n \r\n AddSliceToNodes reqdict %s \r\n site_list %s" %(reqdict,site_list)
- #OAR = OARrestapi()
+ # first step : start the OAR job and update the job
+ print>>sys.stderr, "\r\n \r\n LaunchExperimentOnOAR reqdict %s \r\n site_list %s" %(reqdict,site_list)
+
answer = self.oar.POSTRequestToOARRestAPI('POST_job',reqdict,slice_user)
- print>>sys.stderr, "\r\n \r\n AddSliceToNodes jobid %s " %(answer)
- #self.db.update('slice',['oar_job_id'], [answer['id']], 'slice_hrn', slice_name)
-
-
- self.db.update_job( slice_name, job_id = answer['id'] )
- jobid=answer['id']
- print>>sys.stderr, "\r\n \r\n AddSliceToNodes jobid %s added_nodes %s slice_user %s" %(jobid,added_nodes,slice_user)
+ print>>sys.stderr, "\r\n \r\n LaunchExperimentOnOAR jobid %s " %(answer)
+ try:
+ jobid = answer['id']
+ except KeyError:
+ print>>sys.stderr, "\r\n AddSliceTonode Impossible to create job %s " %( answer)
+ return
+
+ print>>sys.stderr, "\r\n \r\n LaunchExperimentOnOAR jobid %s added_nodes %s slice_user %s" %(jobid,added_nodes,slice_user)
+ self.db.update_job( slice_name, jobid ,added_nodes)
+
+
# second step : configure the experiment
# we need to store the nodes in a yaml (well...) file like this :
# [1,56,23,14,45,75] with name /tmp/sfa<jobid>.json
#ret=subprocess.check_output(["/usr/bin/java", "-jar", ", str(jobid), slice_user])
output = subprocess.Popen([javacmdline, "-jar", jarname, str(jobid), slice_user],stdout=subprocess.PIPE).communicate()[0]
- print>>sys.stderr, "\r\n \r\n AddSliceToNodes wrapper returns %s " %(output)
+ print>>sys.stderr, "\r\n \r\n LaunchExperimentOnOAR wrapper returns %s " %(output)
return
-
-
-
-
- def DeleteSliceFromNodes(self, slice_name, deleted_nodes):
+
+
+ #Delete the jobs and updates the job id in the senslab table
+ #to set it to -1
+ #Does not clear the node list
+ def DeleteSliceFromNodes(self, slice_record):
+ # Get user information
+
+ self.DeleteJobs(slice_record['oar_job_id'], slice_record['hrn'])
+ self.db.update_job(slice_record['hrn'], job_id = -1)
return
- def fill_record_sfa_info(self, records):
-
- def startswith(prefix, values):
- return [value for value in values if value.startswith(prefix)]
-
- # get person ids
- person_ids = []
- site_ids = []
- for record in records:
- person_ids.extend(record.get("person_ids", []))
- site_ids.extend(record.get("site_ids", []))
- if 'site_id' in record:
- site_ids.append(record['site_id'])
-
- #print>>sys.stderr, "\r\n \r\n _fill_record_sfa_info ___person_ids %s \r\n \t\t site_ids %s " %(person_ids, site_ids)
-
- # get all pis from the sites we've encountered
- # and store them in a dictionary keyed on site_id
- site_pis = {}
- if site_ids:
- pi_filter = {'|roles': ['pi'], '|site_ids': site_ids}
- pi_list = self.GetPersons( pi_filter, ['person_id', 'site_ids'])
- #print>>sys.stderr, "\r\n \r\n _fill_record_sfa_info ___ GetPersons ['person_id', 'site_ids'] pi_ilist %s" %(pi_list)
-
- for pi in pi_list:
- # we will need the pi's hrns also
- person_ids.append(pi['person_id'])
-
- # we also need to keep track of the sites these pis
- # belong to
- for site_id in pi['site_ids']:
- if site_id in site_pis:
- site_pis[site_id].append(pi)
- else:
- site_pis[site_id] = [pi]
-
- # get sfa records for all records associated with these records.
- # we'll replace pl ids (person_ids) with hrns from the sfa records
- # we obtain
-
- # get the sfa records
- #table = SfaTable()
- existing_records = {}
- all_records = dbsession.query(RegRecord).all()
- for record in all_records:
- existing_records[(record.type,record.pointer)] = record
-
- print >>sys.stderr, " \r\r\n SLABDRIVER fill_record_sfa_info existing_records %s " %(existing_records)
- person_list, persons = [], {}
- #person_list = table.find({'type': 'user', 'pointer': person_ids})
- try:
- for p_id in person_ids:
- person_list.append( existing_records.get(('user',p_id)))
- except KeyError:
- print >>sys.stderr, " \r\r\n SLABDRIVER fill_record_sfa_info ERRRRRRRRRROR"
-
- # create a hrns keyed on the sfa record's pointer.
- # Its possible for multiple records to have the same pointer so
- # the dict's value will be a list of hrns.
- persons = defaultdict(list)
- for person in person_list:
- persons[person['pointer']].append(person)
-
- # get the pl records
- slab_person_list, slab_persons = [], {}
- slab_person_list = self.GetPersons(person_ids, ['person_id', 'roles'])
- slab_persons = list_to_dict(slab_person_list, 'person_id')
- #print>>sys.stderr, "\r\n \r\n _fill_record_sfa_info ___ _list %s \r\n \t\t SenslabUsers.GetPersons ['person_id', 'roles'] slab_persons %s \r\n records %s" %(slab_person_list, slab_persons,records)
- # fill sfa info
-
- for record in records:
- # skip records with no pl info (top level authorities)
- #Sandrine 24 oct 11 2 lines
- #if record['pointer'] == -1:
- #continue
- sfa_info = {}
- type = record['type']
- if (type == "slice"):
- # all slice users are researchers
- #record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice') ? besoin ou pas ?
- record['PI'] = []
- record['researcher'] = []
- for person_id in record.get('person_ids', []):
- #Sandrine 24 oct 11 line
- #for person_id in record['person_ids']:
- hrns = [person['hrn'] for person in persons[person_id]]
- record['researcher'].extend(hrns)
-
- # pis at the slice's site
- slab_pis = site_pis[record['site_id']]
- pi_ids = [pi['person_id'] for pi in slab_pis]
- for person_id in pi_ids:
- hrns = [person['hrn'] for person in persons[person_id]]
- record['PI'].extend(hrns)
- record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice')
- record['geni_creator'] = record['PI']
-
- elif (type == "authority"):
- record['PI'] = []
- record['operator'] = []
- record['owner'] = []
- for pointer in record['person_ids']:
- if pointer not in persons or pointer not in slab_persons:
- # this means there is not sfa or pl record for this user
- continue
- hrns = [person['hrn'] for person in persons[pointer]]
- roles = slab_persons[pointer]['roles']
- if 'pi' in roles:
- record['PI'].extend(hrns)
- if 'tech' in roles:
- record['operator'].extend(hrns)
- if 'admin' in roles:
- record['owner'].extend(hrns)
- # xxx TODO: OrganizationName
- elif (type == "node"):
- sfa_info['dns'] = record.get("hostname", "")
- # xxx TODO: URI, LatLong, IP, DNS
-
- elif (type == "user"):
- sfa_info['email'] = record.get("email", "")
- sfa_info['geni_urn'] = hrn_to_urn(record['hrn'], 'user')
- sfa_info['geni_certificate'] = record['gid']
- # xxx TODO: PostalAddress, Phone
-
- #print>>sys.stderr, "\r\n \r\rn \t\t \t <<<<<<<<<<<<<<<<<<<<<<<< fill_record_sfa_info sfa_info %s \r\n record %s : "%(sfa_info,record)
- record.update(sfa_info)
+
def augment_records_with_testbed_info (self, sfa_records):
return self.fill_record_info (sfa_records)
for record in parkour:
if str(record['type']) == 'slice':
- print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info \t \t record %s" %(record)
+ #print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info \t \t record %s" %(record)
#sfatable = SfaTable()
#existing_records_by_id = {}
#recslice = self.db.find('slice',{'slice_hrn':str(record['hrn'])})
#recslice = slab_dbsession.query(SliceSenslab).filter_by(slice_hrn = str(record['hrn'])).first()
recslice = self.GetSlices(slice_filter = str(record['hrn']), filter_type = 'slice_hrn')
- print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info \t\t HOY HOY reclise %s" %(recslice)
+ #print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info \t\t HOY HOY reclise %s" %(recslice)
#if isinstance(recslice,list) and len(recslice) == 1:
#recslice = recslice[0]
-
+
recuser = dbsession.query(RegRecord).filter_by(record_id = recslice['record_id_user']).first()
#existing_records_by_id[recslice['record_id_user']]
- print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info \t\t recuser %s" %(recuser)
+ #print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info \t\t recuser %s" %(recuser)
-
+
record.update({'PI':[recuser.hrn],
'researcher': [recuser.hrn],
'name':record['hrn'],
'oar_job_id':recslice['oar_job_id'],
'node_ids': [],
- 'person_ids':[recslice['record_id_user']]})
+ 'person_ids':[recslice['record_id_user']],
+ 'geni_urn':'', #For client_helper.py compatibility
+ 'keys':'', #For client_helper.py compatibility
+ 'key_ids':''}) #For client_helper.py compatibility
elif str(record['type']) == 'user':
- print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info USEEEEEEEEEERDESU!"
-
+ #Add the data about slice
rec = self.GetSlices(slice_filter = record['record_id'], filter_type = 'record_id_user')
+ print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info USEEEEEEEEEERDESU! rec %s \r\n \t rec['record_id_user'] %s " %(rec,rec['record_id_user'])
#Append record in records list, therfore fetches user and slice info again(one more loop)
#Will update PIs and researcher for the slice
+ recuser = dbsession.query(RegRecord).filter_by(record_id = rec['record_id_user']).first()
+ rec.update({'PI':[recuser.hrn],
+ 'researcher': [recuser.hrn],
+ 'name':record['hrn'],
+ 'oar_job_id':rec['oar_job_id'],
+ 'node_ids': [],
+ 'person_ids':[rec['record_id_user']]})
+ #retourne une liste 100512
+
+ #GetPersons takes [] as filters
+ user_slab = self.GetPersons([{'hrn':recuser.hrn}])
+
rec.update({'type':'slice','hrn':rec['slice_hrn']})
+ record.update(user_slab[0])
+ #For client_helper.py compatibility
+ record.update( { 'geni_urn':'',
+ 'keys':'',
+ 'key_ids':'' })
records.append(rec)
- print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info ADDING SLIC EINFO rec %s" %(rec)
+
+ print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info ADDING SLICEINFO TO USER records %s" %(records)
print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info OKrecords %s" %(records)
except TypeError:
print >>sys.stderr, "\r\n \t\t SLABDRIVER fill_record_info EXCEPTION RECORDS : %s" %(records)
- return
+ return
#self.fill_record_slab_info(records)
##print >>sys.stderr, "\r\n \t\t after fill_record_slab_info %s" %(records)