import sys
+import subprocess
-from sfa.util.faults import MissingSfaInfo
+from datetime import datetime
+from dateutil import tz
+from time import strftime,gmtime
+
+from sfa.util.faults import MissingSfaInfo , SliverDoesNotExist
from sfa.util.sfalogging import logger
-from sfa.storage.table import SfaTable
from sfa.util.defaultdict import defaultdict
+from sfa.storage.record import Record
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord
+
+
from sfa.trust.certificate import *
from sfa.trust.credential import *
from sfa.trust.gid import GID
from sfa.rspecs.version_manager import VersionManager
from sfa.rspecs.rspec import RSpec
-from sfa.util.xrn import hrn_to_urn
-from sfa.util.plxrn import slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename, hrn_to_pl_login_base
+from sfa.util.xrn import hrn_to_urn, urn_to_sliver_id
+from sfa.util.plxrn import slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename
## thierry: everything that is API-related (i.e. handling incoming requests)
# is taken care of
## thierry : please avoid wildcard imports :)
from sfa.senslab.OARrestapi import OARrestapi
from sfa.senslab.LDAPapi import LDAPapi
-from sfa.senslab.SenslabImportUsers import SenslabImportUsers
+
from sfa.senslab.parsing import parse_filter
-from sfa.senslab.slabpostgres import SlabDB
+from sfa.senslab.slabpostgres import SlabDB, slab_dbsession,SliceSenslab
from sfa.senslab.slabaggregate import SlabAggregate
from sfa.senslab.slabslices import SlabSlices
convert a list of dictionaries into a dictionary keyed on the
specified dictionary key
"""
- # print>>sys.stderr, " \r\n \t\t 1list_to_dict : rec %s \r\n \t\t list_to_dict key %s" %(recs,key)
+
keys = [rec[key] for rec in recs]
- #print>>sys.stderr, " \r\n \t\t list_to_dict : rec %s \r\n \t\t list_to_dict keys %s" %(recs,keys)
return dict(zip(keys, recs))
# thierry : note
print >>sys.stderr, "\r\n_____________ SFA SENSLAB DRIVER \r\n"
- # thierry - just to not break the rest of this code
-
- #self.oar = OARapi()
self.oar = OARrestapi()
self.ldap = LDAPapi()
- self.users = SenslabImportUsers()
self.time_format = "%Y-%m-%d %H:%M:%S"
- self.db = SlabDB()
- #self.logger=sfa_logger()
+ self.db = SlabDB(config)
self.cache=None
+ def sliver_status(self,slice_urn,slice_hrn):
+ # receive a status request for slice named urn/hrn urn:publicid:IDN+senslab+nturro_slice hrn senslab.nturro_slice
+ # shall return a structure as described in
+ # http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
+ # NT : not sure if we should implement this or not, but used by sface.
+
+
+ sl = self.GetSlices(slice_filter= slice_hrn, filter_type = 'slice_hrn')
+ if len(sl) is 0:
+ raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
+
+ print >>sys.stderr, "\r\n \r\n_____________ Sliver status urn %s hrn %s sl %s \r\n " %(slice_urn,slice_hrn,sl)
+ if sl['oar_job_id'] is not -1:
+
+ # report about the local nodes only
+ nodes_all = self.GetNodes({'hostname':sl['node_ids']},
+ ['node_id', 'hostname','site','boot_state'])
+ nodeall_byhostname = dict([(n['hostname'], n) for n in nodes_all])
+ nodes = sl['node_ids']
+ if len(nodes) is 0:
+ raise SliverDoesNotExist("No slivers allocated ")
+
+
+ result = {}
+ top_level_status = 'unknown'
+ if nodes:
+ top_level_status = 'ready'
+ result['geni_urn'] = slice_urn
+ result['pl_login'] = sl['job_user']
+ #result['slab_login'] = sl['job_user']
+
+ timestamp = float(sl['startTime']) + float(sl['walltime'])
+ result['pl_expires'] = strftime(self.time_format, gmtime(float(timestamp)))
+ #result['slab_expires'] = strftime(self.time_format, gmtime(float(timestamp)))
+ resources = []
+ for node in nodes:
+ res = {}
+ #res['slab_hostname'] = node['hostname']
+ #res['slab_boot_state'] = node['boot_state']
+
+ res['pl_hostname'] = nodeall_byhostname[node]['hostname']
+ res['pl_boot_state'] = nodeall_byhostname[node]['boot_state']
+ res['pl_last_contact'] = strftime(self.time_format, gmtime(float(timestamp)))
+ sliver_id = urn_to_sliver_id(slice_urn, sl['record_id_slice'],nodeall_byhostname[node]['node_id'] )
+ res['geni_urn'] = sliver_id
+ if nodeall_byhostname[node]['boot_state'] == 'Alive':
+
+ res['geni_status'] = 'ready'
+ else:
+ res['geni_status'] = 'failed'
+ top_level_status = 'failed'
+
+ res['geni_error'] = ''
+
+ resources.append(res)
+
+ result['geni_status'] = top_level_status
+ result['geni_resources'] = resources
+ print >>sys.stderr, "\r\n \r\n_____________ Sliver status resources %s res %s \r\n " %(resources,res)
+ return result
+
+
def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
+ print>>sys.stderr, "\r\n \r\n \t=============================== SLABDRIVER.PY create_sliver "
aggregate = SlabAggregate(self)
- #aggregate = SlabAggregate(self)
+
slices = SlabSlices(self)
peer = slices.get_peer(slice_hrn)
sfa_peer = slices.get_sfa_peer(slice_hrn)
slice_record=None
- #print>>sys.stderr, " \r\n \r\n create_sliver creds %s \r\n \r\n users %s " %(creds,users)
-
+
if not isinstance(creds, list):
creds = [creds]
-
- #for cred in creds:
- #cred_obj=Credential(string=cred)
- #print >>sys.stderr," \r\n \r\n create_sliver cred %s " %(cred)
- #GIDcall = cred_obj.get_gid_caller()
- #GIDobj = cred_obj.get_gid_object()
- #print >>sys.stderr," \r\n \r\n create_sliver GIDobj pubkey %s hrn %s " %(GIDobj.get_pubkey().get_pubkey_string(), GIDobj.get_hrn())
- #print >>sys.stderr," \r\n \r\n create_sliver GIDcall pubkey %s hrn %s" %(GIDcall.get_pubkey().get_pubkey_string(),GIDobj.get_hrn())
-
-
- #tmpcert = GID(string = users[0]['gid'])
- #print >>sys.stderr," \r\n \r\n create_sliver tmpcer pubkey %s hrn %s " %(tmpcert.get_pubkey().get_pubkey_string(), tmpcert.get_hrn())
-
+
if users:
slice_record = users[0].get('slice_record', {})
# parse rspec
rspec = RSpec(rspec_string)
- requested_attributes = rspec.version.get_slice_attributes()
+ print>>sys.stderr, "\r\n \r\n \t=============================== SLABDRIVER.PY create_sliver ============================rspec.version %s " %(rspec.version)
+
- # ensure site record exists
- #site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer, options=options)
+ # ensure site record exists?
# ensure slice record exists
slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer, options=options)
+ requested_attributes = rspec.version.get_slice_attributes()
+
+ if requested_attributes:
+ for attrib_dict in requested_attributes:
+ if 'timeslot' in attrib_dict:
+ slice.update({'timeslot':attrib_dict['timeslot']})
+ print >>sys.stderr, "\r\n \r\n \t=============================== SLABDRIVER.PY create_sliver ..... slice %s " %(slice)
# ensure person records exists
persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer, options=options)
- # ensure slice attributes exists
- #slices.verify_slice_attributes(slice, requested_attributes, options=options)
+ # ensure slice attributes exists?
+
- # add/remove slice from nodes
+ # add/remove slice from nodes
+ print >>sys.stderr, "\r\n \r\n \t=============================== SLABDRIVER.PY create_sliver ..... "
+
requested_slivers = [node.get('component_name') for node in rspec.version.get_nodes_with_slivers()]
+ print >>sys.stderr, "\r\n \r\n \t=============================== ........... requested_slivers ============================requested_slivers %s " %(requested_slivers)
nodes = slices.verify_slice_nodes(slice, requested_slivers, peer)
-
-
- # handle MyPLC peer association.
- # only used by plc and ple.
- #slices.handle_peer(site, slice, persons, peer)
return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
def delete_sliver (self, slice_urn, slice_hrn, creds, options):
- slices = self.GetSlices({'slice_hrn': slice_hrn})
- if not slices:
+ slice = self.GetSlices(slice_filter= slice_hrn, filter_type = 'slice_hrn')
+ print>>sys.stderr, "\r\n \r\n \t\t SLABDRIVER.PY delete_sliver slice %s" %(slice)
+ if not slice:
return 1
- slice = slices[0]
-
+
+ slices = SlabSlices(self)
# determine if this is a peer slice
# xxx I wonder if this would not need to use PlSlices.get_peer instead
# in which case plc.peers could be deprecated as this here
# is the only/last call to this last method in plc.peers
- peer = peers.get_peer(self, slice_hrn)
+ peer = slices.get_peer(slice_hrn)
try:
if peer:
- self.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
- self.DeleteSliceFromNodes(slice_hrn, slice['node_ids'])
+ self.UnBindObjectFromPeer('slice', slice['record_id_slice'], peer)
+ self.DeleteSliceFromNodes(slice)
finally:
if peer:
self.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
return 1
-
-
-
+
# first 2 args are None in case of resource discovery
def list_resources (self, slice_urn, slice_hrn, creds, options):
#cached_requested = options.get('cached', True)
#return rspec
#panos: passing user-defined options
- #print "manager options = ",options
+
aggregate = SlabAggregate(self)
+ origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
+ #print>>sys.stderr, " \r\n \r\n \t SLABDRIVER list_resources origin_hrn %s" %(origin_hrn)
+ options.update({'origin_hrn':origin_hrn})
+ #print>>sys.stderr, " \r\n \r\n \t SLABDRIVER list_resources options %s" %(options)
rspec = aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version,
options=options)
-
+ print>>sys.stderr, " \r\n \r\n \t SLABDRIVER list_resources rspec "
# cache the result
#if self.cache and not slice_hrn:
#logger.debug("Slab.ListResources: stores advertisement in cache")
#No site or node register supported
def register (self, sfa_record, hrn, pub_key):
type = sfa_record['type']
- pl_record = self.sfa_fields_to_pl_fields(type, hrn, sfa_record)
-
- #if type == 'authority':
- #sites = self.shell.GetSites([pl_record['login_base']])
- #if not sites:
- #pointer = self.shell.AddSite(pl_record)
- #else:
- #pointer = sites[0]['site_id']
+ slab_record = self.sfa_fields_to_slab_fields(type, hrn, sfa_record)
+
if type == 'slice':
acceptable_fields=['url', 'instantiation', 'name', 'description']
- for key in pl_record.keys():
+ for key in slab_record.keys():
if key not in acceptable_fields:
- pl_record.pop(key)
+ slab_record.pop(key)
print>>sys.stderr, " \r\n \t\t SLABDRIVER.PY register"
- slices = self.GetSlices([pl_record['hrn']])
+ slices = self.GetSlices(slice_filter =slab_record['hrn'], filter_type = 'slice_hrn')
if not slices:
- pointer = self.AddSlice(pl_record)
+ pointer = self.AddSlice(slab_record)
else:
pointer = slices[0]['slice_id']
self.AddPersonKey(pointer, {'key_type' : 'ssh', 'key' : pub_key})
#No node adding outside OAR
- #elif type == 'node':
- #login_base = hrn_to_pl_login_base(sfa_record['authority'])
- #nodes = self.GetNodes([pl_record['hostname']])
- #if not nodes:
- #pointer = self.AddNode(login_base, pl_record)
- #else:
- #pointer = nodes[0]['node_id']
-
+
return pointer
#No site or node record update allowed
#self.shell.UpdateSite(pointer, new_sfa_record)
if type == "slice":
- pl_record=self.sfa_fields_to_pl_fields(type, hrn, new_sfa_record)
- if 'name' in pl_record:
- pl_record.pop('name')
- self.UpdateSlice(pointer, pl_record)
+ slab_record=self.sfa_fields_to_slab_fields(type, hrn, new_sfa_record)
+ if 'name' in slab_record:
+ slab_record.pop('name')
+ self.UpdateSlice(pointer, slab_record)
elif type == "user":
update_fields = {}
key_exists = True
if not key_exists:
self.AddPersonKey(pointer, {'key_type': 'ssh', 'key': new_key})
-
- #elif type == "node":
- #self.UpdateNode(pointer, new_sfa_record)
+
return True
if persons and persons[0]['site_ids']:
self.DeletePerson(username)
elif type == 'slice':
- if self.GetSlices(hrn):
+ if self.GetSlices(slice_filter = hrn, filter_type = 'slice_hrn'):
self.DeleteSlice(hrn)
#elif type == 'authority':
return True
def GetPeers (self,auth = None, peer_filter=None, return_fields=None):
- table = SfaTable()
- return_records = []
- records_list = table.findObjects({'type':'authority+sa'})
+
+ existing_records = {}
+ existing_hrns_by_types= {}
+ print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers auth = %s, peer_filter %s, return_field %s " %(auth , peer_filter, return_fields)
+ all_records = dbsession.query(RegRecord).filter(RegRecord.type.like('%authority%')).all()
+ for record in all_records:
+ existing_records[record.hrn] = record
+ if record.type not in existing_hrns_by_types:
+ existing_hrns_by_types[record.type] = [record.hrn]
+ print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers \t NOT IN existing_hrns_by_types %s " %( existing_hrns_by_types)
+ else:
+
+ print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers \t INNN type %s hrn %s " %( record.type,record.hrn )
+ existing_hrns_by_types.update({record.type:(existing_hrns_by_types[record.type].append(record.hrn))})
+
+ print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers existing_hrns_by_types %s " %( existing_hrns_by_types)
+ records_list= []
+
+ try:
+ for hrn in existing_hrns_by_types['authority+sa']:
+ records_list.append(existing_records[hrn])
+ print >>sys.stderr, "\r\n \r\n SLABDRIVER GetPeers records_list %s " %(records_list)
+
+ except:
+ pass
+
if not peer_filter and not return_fields:
return records_list
return_records = parse_filter(records_list,peer_filter, 'peers', return_fields)
return_person_list = parse_filter(person_list,person_filter ,'persons', return_fields)
if return_person_list:
- print>>sys.stderr, " \r\n GetPersons person_filter %s return_fields %s return_person_list %s " %(person_filter,return_fields,return_person_list)
+ print>>sys.stderr, " \r\n GetPersons person_filter %s return_fields %s " %(person_filter,return_fields)
return return_person_list
+ def GetTimezone(self):
+ server_timestamp,server_tz = self.oar.parser.SendRequest("GET_timezone")
+ return server_timestamp,server_tz
+
- def GetJobs(self,job_id= None, resources=True,return_fields=None, details = None):
+ def DeleteJobs(self, job_id, slice_hrn):
+ if not job_id:
+ return
+ username = slice_hrn.split(".")[-1].rstrip("_slice")
+ reqdict = {}
+ reqdict['method'] = "delete"
+ reqdict['strval'] = str(job_id)
+ answer = self.oar.POSTRequestToOARRestAPI('DELETE_jobs_id',reqdict,username)
+ print>>sys.stderr, "\r\n \r\n jobid DeleteJobs %s " %(answer)
+
+
+ def GetJobs(self,job_id= None, resources=True,return_fields=None, username = None):
#job_resources=['reserved_resources', 'assigned_resources','job_id', 'job_uri', 'assigned_nodes',\
#'api_timestamp']
#assigned_res = ['resource_id', 'resource_uri']
#assigned_n = ['node', 'node_uri']
-
if job_id and resources is False:
req = "GET_jobs_id"
- node_list = 'assigned_network_address'
+ node_list_k = 'assigned_network_address'
+
if job_id and resources :
req = "GET_jobs_id_resources"
- node_list = 'reserverd_resources'
-
+ node_list_k = 'reserved_resources'
+
#Get job info from OAR
- job_info = self.oar.parser.SendRequest(req, job_id)
- if job_info['state'] == 'Terminated':
- print>>sys.stderr, "\r\n \r\n \t\t GetJobs TERMINELEBOUSIN "
- return None
-
+ job_info = self.oar.parser.SendRequest(req, job_id, username)
+ print>>sys.stderr, "\r\n \r\n \t\t GetJobs %s " %(job_info)
+
+ if 'state' in job_info :
+ if job_info['state'] == 'Terminated':
+ print>>sys.stderr, "\r\n \r\n \t\t GetJobs TERMINELEBOUSIN "
+ return None
+ if job_info['state'] == 'Error':
+ print>>sys.stderr, "\r\n \r\n \t\t GetJobs ERROR message %s " %(job_info)
+ return None
+
+ #Get a dict of nodes . Key :hostname of the node
+ node_list = self.GetNodes()
+ node_hostname_list = []
+ for node in node_list:
+ node_hostname_list.append(node['hostname'])
+ node_dict = dict(zip(node_hostname_list,node_list))
try :
+ liste =job_info[node_list_k]
+ #print>>sys.stderr, "\r\n \r\n \t\t GetJobs resources job_info liste%s" %(liste)
+ for k in range(len(liste)):
+ job_info[node_list_k][k] = node_dict[job_info[node_list_k][k]]['hostname']
- for n in job_info[node_list]:
- n = str(self.root_auth) + str(n)
- print>>sys.stderr, "\r\n \r\n \t\t GetJobs resources job_info %s" %(job_info)
+ #print>>sys.stderr, "\r\n \r\n \t\t YYYYYYYYYYYYGetJobs resources job_info %s" %(job_info)
+ #Replaces the previous entry "assigned_network_address" / "reserved_resources"
+ #with "node_ids"
+ job_info.update({'node_ids':job_info[node_list_k]})
+ del job_info[node_list_k]
return job_info
except KeyError:
print>>sys.stderr, "\r\n \r\n \t\t GetJobs KEYERROR "
-
-
-
-
-
-
-
-
+ def GetReservedNodes(self):
+ # this function returns a list of all the nodes already involved in an oar job
+
+ jobs=self.oar.parser.SendRequest("GET_jobs_details")
+ nodes=[]
+ for j in jobs :
+ nodes=j['assigned_network_address']+nodes
+ return nodes
def GetNodes(self,node_filter= None, return_fields=None):
-
node_dict =self.oar.parser.SendRequest("GET_resources_full")
return_node_list = []
return return_node_list
+ def GetSites(self, site_filter = None, return_fields=None):
+ site_dict =self.oar.parser.SendRequest("GET_sites")
+ return_site_list = []
+ if not ( site_filter or return_fields):
+ return_site_list = site_dict.values()
+ return return_site_list
- def GetSlices(self,slice_filter = None, return_fields=None):
+ return_site_list = parse_filter(site_dict.values(), site_filter,'site', return_fields)
+ return return_site_list
- sliceslist = self.db.find('slice',columns = ['oar_job_id', 'slice_hrn', 'record_id_slice','record_id_user'], record_filter=slice_filter)
-
- print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY GetSlices slices %s slice_filter %s " %(sliceslist,slice_filter)
-
- return_slice_list = parse_filter(sliceslist, slice_filter,'slice', return_fields)
-
- if return_slice_list:
- for sl in return_slice_list:
- if sl['oar_job_id'] is not -1:
- rslt = self.GetJobs( sl['oar_job_id'],resources=False)
-
+ def GetSlices(self,slice_filter = None, filter_type = None, return_fields=None):
+ return_slice_list = []
+ slicerec = {}
+ rec = {}
+ ftypes = ['slice_hrn', 'record_id_user']
+ if filter_type and filter_type in ftypes:
+ if filter_type == 'slice_hrn':
+ slicerec = slab_dbsession.query(SliceSenslab).filter_by(slice_hrn = slice_filter).first()
+ if filter_type == 'record_id_user':
+ slicerec = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = slice_filter).first()
+
+ if slicerec:
+ rec = slicerec.dumpquerytodict()
+ login = slicerec.slice_hrn.split(".")[1].split("_")[0]
+ #print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY slicerec GetSlices %s " %(slicerec)
+ if slicerec.oar_job_id is not -1:
+ rslt = self.GetJobs( slicerec.oar_job_id, resources=False, username = login )
+ #print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY GetSlices GetJobs %s " %(rslt)
if rslt :
- sl.update(rslt)
- sl.update({'hrn':str(sl['slice_hrn'])})
- #If GetJobs is empty, this means the job is now in the 'Terminated' state
- #Update the slice record
+ rec.update(rslt)
+ rec.update({'hrn':str(rec['slice_hrn'])})
+ #If GetJobs is empty, this means the job is now in the 'Terminated' state
+ #Update the slice record
else :
- sl['oar_job_id'] = '-1'
- sl.update({'hrn':str(sl['slice_hrn'])})
- self.db.update_senslab_slice(sl)
-
-
-
- print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY GetSlices return_slice_list %s" %(return_slice_list)
- return return_slice_list
+ self.db.update_job(slice_filter, job_id = -1)
+ rec['oar_job_id'] = -1
+ rec.update({'hrn':str(rec['slice_hrn'])})
+
+ try:
+ rec['node_ids'] = rec['node_list']
+ except KeyError:
+ pass
+
+ #print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY GetSlices rec %s" %(rec)
+
+ return rec
+
+
+ else:
+ return_slice_list = slab_dbsession.query(SliceSenslab).all()
+
+ print >>sys.stderr, " \r\n \r\n \tSLABDRIVER.PY GetSlices slices %s slice_filter %s " %(return_slice_list,slice_filter)
+
+ #if return_fields:
+ #return_slice_list = parse_filter(sliceslist, slice_filter,'slice', return_fields)
+
+
+
+ return return_slice_list
+
# @param type type of record (user, slice, ...)
# @param hrn human readable name
# @param sfa_fields dictionary of SFA fields
- # @param pl_fields dictionary of PLC fields (output)
+ # @param slab_fields dictionary of PLC fields (output)
- def sfa_fields_to_pl_fields(self, type, hrn, record):
+ def sfa_fields_to_slab_fields(self, type, hrn, record):
def convert_ints(tmpdict, int_fields):
for field in int_fields:
if field in tmpdict:
tmpdict[field] = int(tmpdict[field])
- pl_record = {}
+ slab_record = {}
#for field in record:
- # pl_record[field] = record[field]
+ # slab_record[field] = record[field]
if type == "slice":
#instantion used in get_slivers ?
- if not "instantiation" in pl_record:
- pl_record["instantiation"] = "senslab-instantiated"
- pl_record["hrn"] = hrn_to_pl_slicename(hrn)
+ if not "instantiation" in slab_record:
+ slab_record["instantiation"] = "senslab-instantiated"
+ slab_record["hrn"] = hrn_to_pl_slicename(hrn)
+ print >>sys.stderr, "\r\n \r\n \t SLABDRIVER.PY sfa_fields_to_slab_fields slab_record %s hrn_to_pl_slicename(hrn) hrn %s " %(slab_record['hrn'], hrn)
if "url" in record:
- pl_record["url"] = record["url"]
+ slab_record["url"] = record["url"]
if "description" in record:
- pl_record["description"] = record["description"]
+ slab_record["description"] = record["description"]
if "expires" in record:
- pl_record["expires"] = int(record["expires"])
+ slab_record["expires"] = int(record["expires"])
#nodes added by OAR only and then imported to SFA
#elif type == "node":
- #if not "hostname" in pl_record:
+ #if not "hostname" in slab_record:
#if not "hostname" in record:
#raise MissingSfaInfo("hostname")
- #pl_record["hostname"] = record["hostname"]
- #if not "model" in pl_record:
- #pl_record["model"] = "geni"
+ #slab_record["hostname"] = record["hostname"]
+ #if not "model" in slab_record:
+ #slab_record["model"] = "geni"
#One authority only
#elif type == "authority":
- #pl_record["login_base"] = hrn_to_pl_login_base(hrn)
+ #slab_record["login_base"] = hrn_to_slab_login_base(hrn)
- #if not "name" in pl_record:
- #pl_record["name"] = hrn
+ #if not "name" in slab_record:
+ #slab_record["name"] = hrn
- #if not "abbreviated_name" in pl_record:
- #pl_record["abbreviated_name"] = hrn
+ #if not "abbreviated_name" in slab_record:
+ #slab_record["abbreviated_name"] = hrn
- #if not "enabled" in pl_record:
- #pl_record["enabled"] = True
+ #if not "enabled" in slab_record:
+ #slab_record["enabled"] = True
- #if not "is_public" in pl_record:
- #pl_record["is_public"] = True
+ #if not "is_public" in slab_record:
+ #slab_record["is_public"] = True
- return pl_record
+ return slab_record
-
-
-
- def AddSliceToNodes(self, slice_name, added_nodes, slice_user=None):
- print>>sys.stderr, "\r\n \r\n AddSliceToNodes slice_name %s added_nodes %s username %s" %(slice_name,added_nodes,slice_user )
+
+ def AddSliceToNodes(self, slice_dict, added_nodes, slice_user=None):
+
site_list = []
nodeid_list =[]
resource = ""
reqdict = {}
+ slice_name = slice_dict['name']
+ try:
+ slot = slice_dict['timeslot']
+ print>>sys.stderr, "\r\n \r\n \t\tAddSliceToNodes slot %s " %(slot)
+ except KeyError:
+ #Running on default parameters
+ #XP immediate , 10 mins
+ slot = {'date':None,'start_time':None, 'timezone':None,'duration':'00:10:00' }#10 min
+ reqdict['resource']+= ",walltime=" + str(00) + ":" + str(12) + ":" + str(20) #+2 min 20
+ reqdict['script_path'] = "/bin/sleep 620" #+20 sec
+
reqdict['property'] ="network_address in ("
for node in added_nodes:
#Get the ID of the node : remove the root auth and put the site in a separate list
- tmp = node.strip(self.root_auth+".")
- l = tmp.split("_")
-
- nodeid= (l[len(l)-1])
+ s=node.split(".")
+ # NT: it's not clear for me if the nodenames will have the senslab prefix
+ # so lets take the last part only, for now.
+ lastpart=s[-1]
+ #if s[0] == self.root_auth :
+ # Again here it's not clear if nodes will be prefixed with <site>_, lets split and tanke the last part for now.
+ s=lastpart.split("_")
+ nodeid=s[-1]
reqdict['property'] += "'"+ nodeid +"', "
nodeid_list.append(nodeid)
- site_list.append( l[0] )
+ #site_list.append( l[0] )
+
reqdict['property'] = reqdict['property'][0: len( reqdict['property'])-2] +")"
reqdict['resource'] ="network_address="+ str(len(nodeid_list))
- reqdict['resource']+= ",walltime=" + str(00) + ":" + str(05) + ":" + str(00)
- reqdict['script_path'] = "/bin/sleep 320"
- #reqdict['type'] = "deploy"
- print>>sys.stderr, "\r\n \r\n AddSliceToNodes reqdict %s \r\n site_list %s" %(reqdict,site_list)
- OAR = OARrestapi()
- answer = OAR.POSTRequestToOARRestAPI('POST_job',reqdict,slice_user)
- print>>sys.stderr, "\r\n \r\n AddSliceToNodes jobid %s " %(answer)
- self.db.update('slice',['oar_job_id'], [answer['id']], 'slice_hrn', slice_name)
- return
+
+ if slot['duration']:
+ walltime = slot['duration'].split(":")
+ # Fixing the walltime by adding a few delays. First put the walltime in seconds
+ # oarAdditionalDelay = 20; additional delay for /bin/sleep command to
+ # take in account prologue and epilogue scripts execution
+ # int walltimeAdditionalDelay = 120; additional delay
+
+ desired_walltime = int(walltime[0])*3600 + int(walltime[1]) * 60 + int(walltime[2])
+ total_walltime = desired_walltime + 140 #+2 min 20
+ sleep_walltime = desired_walltime + 20 #+20 sec
+ print>>sys.stderr, "\r\n \r\n \t\tAddSliceToNodes desired_walltime %s total_walltime %s sleep_walltime %s " %(desired_walltime,total_walltime,sleep_walltime)
+ #Put the walltime back in str form
+ #First get the hours
+ walltime[0] = str(total_walltime / 3600)
+ total_walltime = total_walltime - 3600 * int(walltime[0])
+ #Get the remaining minutes
+ walltime[1] = str(total_walltime / 60)
+ total_walltime = total_walltime - 60 * int(walltime[1])
+ #Get the seconds
+ walltime[2] = str(total_walltime)
+ print>>sys.stderr, "\r\n \r\n \t\tAddSliceToNodes walltime %s " %(walltime)
+
+ reqdict['resource']+= ",walltime=" + str(walltime[0]) + ":" + str(walltime[1]) + ":" + str(walltime[2])
+ reqdict['script_path'] = "/bin/sleep " + str(sleep_walltime)
+
+ #In case of a scheduled experiment (not immediate)
+ #To run an XP immediately, don't specify date and time in RSpec
+ #They will be set to None.
+ if slot['date'] and slot['start_time']:
+ if slot['timezone'] is '' or slot['timezone'] is None:
+ #assume it is server timezone
+ server_timestamp,server_tz = self.GetTimezone()
+ from_zone=tz.gettz(server_tz)
+ print>>sys.stderr, "\r\n \r\n \t\tAddSliceToNodes timezone not specified server_tz %s from_zone %s" %(server_tz,from_zone)
+ else:
+ #Get zone of the user from the reservation time given in the rspec
+ from_zone = tz.gettz(slot['timezone'])
+
+ date = str(slot['date']) + " " + str(slot['start_time'])
+ user_datetime = datetime.datetime.strptime(date, self.time_format)
+ user_datetime = user_datetime.replace(tzinfo = from_zone)
+
+ #Convert to UTC zone
+ to_zone = tz.tzutc()
+ utc_date = user_datetime.astimezone(to_zone)
+ #Readable time accpeted by OAR
+ reqdict['reservation']= utc_date.strftime(self.time_format)
+
+ print>>sys.stderr, "\r\n \r\n \t\tAddSliceToNodes reqdict['reservation'] %s " %(reqdict['reservation'])
+
+ else:
+ # Immediate XP
+ # reservations are performed in the oar server timebase, so :
+ # 1- we get the server time(in UTC tz )/server timezone
+ # 2- convert the server UTC time in its timezone
+ # 3- add a custom delay to this time
+ # 4- convert this time to a readable form and it for the reservation request.
+ server_timestamp,server_tz = self.GetTimezone()
+ s_tz=tz.gettz(server_tz)
+ UTC_zone = tz.gettz("UTC")
+ #weird... datetime.fromtimestamp should work since we do from datetime import datetime
+ utc_server= datetime.datetime.fromtimestamp(float(server_timestamp)+20,UTC_zone)
+ server_localtime=utc_server.astimezone(s_tz)
+ print>>sys.stderr, "\r\n \r\n \t\tAddSliceToNodes server_timestamp %s server_tz %s slice_name %s added_nodes %s username %s reqdict %s " %(server_timestamp,server_tz,slice_name,added_nodes,slice_user, reqdict )
+ readable_time = server_localtime.strftime(self.time_format)
+
+ print >>sys.stderr," \r\n \r\n \t\t\t\tAPRES ParseTimezone readable_time %s timestanp %s " %(readable_time ,server_timestamp)
+ reqdict['reservation'] = readable_time
+
+ reqdict['type'] = "deploy"
+ reqdict['directory']= ""
+ reqdict['name']= "TestSandrine"
+
+
+ # first step : start the OAR job and update the job
+ print>>sys.stderr, "\r\n \r\n AddSliceToNodes reqdict %s \r\n site_list %s" %(reqdict,site_list)
+
+ answer = self.oar.POSTRequestToOARRestAPI('POST_job',reqdict,slice_user)
+ print>>sys.stderr, "\r\n \r\n AddSliceToNodes jobid %s " %(answer)
+ try:
+ jobid = answer['id']
+ except KeyError:
+ print>>sys.stderr, "\r\n AddSliceTonode Impossible to create job %s " %( answer)
+ return
+ print>>sys.stderr, "\r\n \r\n AddSliceToNodes jobid %s added_nodes %s slice_user %s" %(jobid,added_nodes,slice_user)
+ self.db.update_job( slice_name, jobid ,added_nodes)
- def DeleteSliceFromNodes(self, slice_name, deleted_nodes):
+
+ # second step : configure the experiment
+ # we need to store the nodes in a yaml (well...) file like this :
+ # [1,56,23,14,45,75] with name /tmp/sfa<jobid>.json
+ f=open('/tmp/sfa/'+str(jobid)+'.json','w')
+ f.write('[')
+ f.write(str(added_nodes[0].strip('node')))
+ for node in added_nodes[1:len(added_nodes)] :
+ f.write(','+node.strip('node'))
+ f.write(']')
+ f.close()
+
+ # third step : call the senslab-experiment wrapper
+ #command= "java -jar target/sfa-1.0-jar-with-dependencies.jar "+str(jobid)+" "+slice_user
+ javacmdline="/usr/bin/java"
+ jarname="/opt/senslabexperimentwrapper/sfa-1.0-jar-with-dependencies.jar"
+ #ret=subprocess.check_output(["/usr/bin/java", "-jar", ", str(jobid), slice_user])
+ output = subprocess.Popen([javacmdline, "-jar", jarname, str(jobid), slice_user],stdout=subprocess.PIPE).communicate()[0]
+
+ print>>sys.stderr, "\r\n \r\n AddSliceToNodes wrapper returns %s " %(output)
+ return
+
+
+ #Delete the jobs and updates the job id in the senslab table
+ #to set it to -1
+ #Does not clear the node list
+ def DeleteSliceFromNodes(self, slice_record):
+ # Get user information
+
+ self.DeleteJobs(slice_record['oar_job_id'], slice_record['hrn'])
+ self.db.update_job(slice_record['hrn'], job_id = -1)
return
# we obtain
# get the sfa records
- table = SfaTable()
+ #table = SfaTable()
+ existing_records = {}
+ all_records = dbsession.query(RegRecord).all()
+ for record in all_records:
+ existing_records[(record.type,record.pointer)] = record
+
+ print >>sys.stderr, " \r\r\n SLABDRIVER fill_record_sfa_info existing_records %s " %(existing_records)
person_list, persons = [], {}
- person_list = table.find({'type': 'user', 'pointer': person_ids})
+ #person_list = table.find({'type': 'user', 'pointer': person_ids})
+ try:
+ for p_id in person_ids:
+ person_list.append( existing_records.get(('user',p_id)))
+ except KeyError:
+ print >>sys.stderr, " \r\r\n SLABDRIVER fill_record_sfa_info ERRRRRRRRRROR"
+
# create a hrns keyed on the sfa record's pointer.
# Its possible for multiple records to have the same pointer so
# the dict's value will be a list of hrns.
persons[person['pointer']].append(person)
# get the pl records
- pl_person_list, pl_persons = [], {}
- pl_person_list = self.GetPersons(person_ids, ['person_id', 'roles'])
- pl_persons = list_to_dict(pl_person_list, 'person_id')
- #print>>sys.stderr, "\r\n \r\n _fill_record_sfa_info ___ _list %s \r\n \t\t SenslabUsers.GetPersons ['person_id', 'roles'] pl_persons %s \r\n records %s" %(pl_person_list, pl_persons,records)
+ slab_person_list, slab_persons = [], {}
+ slab_person_list = self.GetPersons(person_ids, ['person_id', 'roles'])
+ slab_persons = list_to_dict(slab_person_list, 'person_id')
+ #print>>sys.stderr, "\r\n \r\n _fill_record_sfa_info ___ _list %s \r\n \t\t SenslabUsers.GetPersons ['person_id', 'roles'] slab_persons %s \r\n records %s" %(slab_person_list, slab_persons,records)
# fill sfa info
for record in records:
record['researcher'].extend(hrns)
# pis at the slice's site
- pl_pis = site_pis[record['site_id']]
- pi_ids = [pi['person_id'] for pi in pl_pis]
+ slab_pis = site_pis[record['site_id']]
+ pi_ids = [pi['person_id'] for pi in slab_pis]
for person_id in pi_ids:
hrns = [person['hrn'] for person in persons[person_id]]
record['PI'].extend(hrns)
record['operator'] = []
record['owner'] = []
for pointer in record['person_ids']:
- if pointer not in persons or pointer not in pl_persons:
+ if pointer not in persons or pointer not in slab_persons:
# this means there is not sfa or pl record for this user
continue
hrns = [person['hrn'] for person in persons[pointer]]
- roles = pl_persons[pointer]['roles']
+ roles = slab_persons[pointer]['roles']
if 'pi' in roles:
record['PI'].extend(hrns)
if 'tech' in roles:
Given a SFA record, fill in the senslab specific and SFA specific
fields in the record.
"""
- print >>sys.stderr, "\r\n \t\t BEFORE fill_record_info %s" %(records)
+
+ print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info 000000000 fill_record_info %s " %(records)
if not isinstance(records, list):
records = [records]
- #print >>sys.stderr, "\r\n \t\t BEFORE fill_record_pl_info %s" %(records)
+
parkour = records
try:
for record in parkour:
if str(record['type']) == 'slice':
- print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info record %s" %(record)
- sfatable = SfaTable()
- recslice = self.db.find('slice',str(record['hrn']))
- if isinstance(recslice,list) and len(recslice) == 1:
- recslice = recslice[0]
- recuser = sfatable.find( recslice['record_id_user'], ['hrn'])
+ #print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info \t \t record %s" %(record)
+ #sfatable = SfaTable()
- print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info %s" %(recuser)
+ #existing_records_by_id = {}
+ #all_records = dbsession.query(RegRecord).all()
+ #for rec in all_records:
+ #existing_records_by_id[rec.record_id] = rec
+ #print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info \t\t existing_records_by_id %s" %(existing_records_by_id[record['record_id']])
+
+ #recslice = self.db.find('slice',{'slice_hrn':str(record['hrn'])})
+ #recslice = slab_dbsession.query(SliceSenslab).filter_by(slice_hrn = str(record['hrn'])).first()
+ recslice = self.GetSlices(slice_filter = str(record['hrn']), filter_type = 'slice_hrn')
+ #print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info \t\t HOY HOY reclise %s" %(recslice)
+ #if isinstance(recslice,list) and len(recslice) == 1:
+ #recslice = recslice[0]
+
+ recuser = dbsession.query(RegRecord).filter_by(record_id = recslice['record_id_user']).first()
+ #existing_records_by_id[recslice['record_id_user']]
+ #print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info \t\t recuser %s" %(recuser)
- if isinstance(recuser,list) and len(recuser) == 1:
- recuser = recuser[0]
- record.update({'PI':[recuser['hrn']],
- 'researcher': [recuser['hrn']],
+
+ record.update({'PI':[recuser.hrn],
+ 'researcher': [recuser.hrn],
'name':record['hrn'],
'oar_job_id':recslice['oar_job_id'],
'node_ids': [],
'person_ids':[recslice['record_id_user']]})
- elif str(record['type']) == 'user':
- recslice = self.db.find('slice', record_filter={'record_id_user':record['record_id']})
- for rec in recslice:
- rec.update({'type':'slice'})
- rec.update({'hrn':rec['slice_hrn'], 'record_id':rec['record_id_slice']})
- records.append(rec)
- print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info ADDING SLIC EINFO recslice %s" %(recslice)
+ elif str(record['type']) == 'user':
+ #print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info USEEEEEEEEEERDESU!"
+
+ rec = self.GetSlices(slice_filter = record['record_id'], filter_type = 'record_id_user')
+ #Append record in records list, therfore fetches user and slice info again(one more loop)
+ #Will update PIs and researcher for the slice
+
+ rec.update({'type':'slice','hrn':rec['slice_hrn']})
+ records.append(rec)
+ #print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info ADDING SLIC EINFO rec %s" %(rec)
-
+ print >>sys.stderr, "\r\n \t\t SLABDRIVER.PY fill_record_info OKrecords %s" %(records)
except TypeError:
print >>sys.stderr, "\r\n \t\t SLABDRIVER fill_record_info EXCEPTION RECORDS : %s" %(records)
return
- #self.fill_record_pl_info(records)
- ##print >>sys.stderr, "\r\n \t\t after fill_record_pl_info %s" %(records)
+ #self.fill_record_slab_info(records)
+ ##print >>sys.stderr, "\r\n \t\t after fill_record_slab_info %s" %(records)
#self.fill_record_sfa_info(records)
#print >>sys.stderr, "\r\n \t\t after fill_record_sfa_info"
+
+
+
+
#def update_membership_list(self, oldRecord, record, listName, addFunc, delFunc):
## get a list of the HRNs tht are members of the old and new records
#if oldRecord: