3 from datetime import datetime
4 from time import gmtime
6 from sfa.util.faults import SliverDoesNotExist, UnknownSfaType
7 from sfa.util.sfalogging import logger
9 from sfa.storage.alchemy import dbsession
10 from sfa.storage.model import RegRecord, RegUser
12 from sfa.trust.credential import Credential
15 from sfa.managers.driver import Driver
16 from sfa.rspecs.version_manager import VersionManager
17 from sfa.rspecs.rspec import RSpec
19 from sfa.util.xrn import hrn_to_urn, urn_to_sliver_id, get_leaf
22 ## thierry: everything that is API-related (i.e. handling incoming requests)
24 # SlabDriver should be really only about talking to the senslab testbed
27 from sfa.senslab.OARrestapi import OARrestapi
28 from sfa.senslab.LDAPapi import LDAPapi
30 from sfa.senslab.slabpostgres import SlabDB, slab_dbsession, SliceSenslab, \
32 from sfa.senslab.slabaggregate import SlabAggregate, slab_xrn_to_hostname, \
34 from sfa.senslab.slabslices import SlabSlices
41 # this inheritance scheme is so that the driver object can receive
42 # GetNodes or GetSites sorts of calls directly
43 # and thus minimize the differences in the managers with the pl version
44 class SlabDriver(Driver):
46 def __init__(self, config):
47 Driver.__init__ (self, config)
49 self.hrn = config.SFA_INTERFACE_HRN
51 self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
53 self.oar = OARrestapi()
55 self.time_format = "%Y-%m-%d %H:%M:%S"
56 self.db = SlabDB(config,debug = True)
60 def sliver_status(self, slice_urn, slice_hrn):
61 """Receive a status request for slice named urn/hrn
62 urn:publicid:IDN+senslab+nturro_slice hrn senslab.nturro_slice
63 shall return a structure as described in
64 http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
65 NT : not sure if we should implement this or not, but used by sface.
69 #First get the slice with the slice hrn
70 sl = self.GetSlices(slice_filter = slice_hrn, \
71 slice_filter_type = 'slice_hrn')
74 raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
76 if isinstance(sl,list):
80 top_level_status = 'unknown'
81 nodes_in_slice = sl['node_ids']
82 recuser = dbsession.query(RegRecord).filter_by(record_id = \
83 sl['record_id_user']).first()
84 sl.update({'user':recuser.hrn})
85 if len(nodes_in_slice) is 0:
86 raise SliverDoesNotExist("No slivers allocated ")
88 top_level_status = 'ready'
90 logger.debug("Slabdriver - sliver_status Sliver status urn %s hrn %s sl\
91 %s \r\n " %(slice_urn, slice_hrn, sl))
93 if sl['oar_job_id'] is not []:
94 #A job is running on Senslab for this slice
95 # report about the local nodes that are in the slice only
97 nodes_all = self.GetNodes({'hostname':nodes_in_slice},
98 ['node_id', 'hostname','site','boot_state'])
99 nodeall_byhostname = dict([(n['hostname'], n) for n in nodes_all])
103 result['geni_urn'] = slice_urn
104 result['pl_login'] = sl['user'] #For compatibility
107 #timestamp = float(sl['startTime']) + float(sl['walltime'])
108 #result['pl_expires'] = strftime(self.time_format, \
109 #gmtime(float(timestamp)))
110 #result['slab_expires'] = strftime(self.time_format,\
111 #gmtime(float(timestamp)))
114 for node in nodeall_byhostname:
116 #res['slab_hostname'] = node['hostname']
117 #res['slab_boot_state'] = node['boot_state']
119 res['pl_hostname'] = nodeall_byhostname[node]['hostname']
120 res['pl_boot_state'] = nodeall_byhostname[node]['boot_state']
121 #res['pl_last_contact'] = strftime(self.time_format, \
122 #gmtime(float(timestamp)))
123 sliver_id = urn_to_sliver_id(slice_urn, sl['record_id_slice'], \
124 nodeall_byhostname[node]['node_id'])
125 res['geni_urn'] = sliver_id
126 if nodeall_byhostname[node]['boot_state'] == 'Alive':
128 res['geni_status'] = 'ready'
130 res['geni_status'] = 'failed'
131 top_level_status = 'failed'
133 res['geni_error'] = ''
135 resources.append(res)
137 result['geni_status'] = top_level_status
138 result['geni_resources'] = resources
139 logger.debug("SLABDRIVER \tsliver_statusresources %s res %s "\
144 def synchronize_oar_and_slice_table(self, slice_hrn = None):
146 oar_leases_list = self.GetReservedNodes()
148 logger.debug("SLABDRIVER \tsynchronize_oar_and_slice_table :\
149 oar_leases_list %s\r\n" %( oar_leases_list))
150 #Get list of slices/leases . multiple entry per user depending
152 #At this point we don't have the slice_hrn so that's why
153 #we are calling Getslices, which holds a field with slice_hrn
156 sfa_slices_list = self.GetSlices(slice_filter = slice_hrn, \
157 slice_filter_type = 'slice_hrn')
158 self.synchronize_oar_and_slice_table_for_slice_hrn(slice_hrn, \
159 oar_leases_list, sfa_slices_list)
161 sfa_slices_list = self.GetSlices()
163 sfa_slices_dict_by_slice_hrn = {}
164 for sfa_slice in sfa_slices_list:
165 if sfa_slice['slice_hrn'] not in sfa_slices_dict_by_slice_hrn:
166 sfa_slices_dict_by_slice_hrn[sfa_slice['slice_hrn']] = []
168 sfa_slices_dict_by_slice_hrn[sfa_slice['slice_hrn']].\
172 for slice_hrn in sfa_slices_dict_by_slice_hrn:
173 list_slices_sfa = sfa_slices_dict_by_slice_hrn[slice_hrn]
174 self.synchronize_oar_and_slice_table_for_slice_hrn(slice_hrn, \
175 oar_leases_list, list_slices_sfa)
180 def synchronize_oar_and_slice_table_for_slice_hrn(self,slice_hrn, \
181 oar_leases_list, sfa_slices_list):
183 #Get list of slices/leases .
184 #multiple entry per user depending on number of jobs
188 login = slice_hrn.split(".")[1].split("_")[0]
190 #Create dictionnaries based on the tuple user login/ job id
191 #for the leases list and the slices list
193 for sl in sfa_slices_list:
194 if sl['oar_job_id'] != [] :
195 #one entry in the dictionnary for each jobid/login, one login
196 #can have multiple jobs running
197 #for oar_jobid in sl['oar_job_id']:
198 if (login, sl['oar_job_id']) not in sfa_slices_dict:
199 sfa_slices_dict[(login,sl['oar_job_id'])] = sl
201 for lease in oar_leases_list:
202 if (lease['user'], lease['lease_id']) not in oar_leases_dict:
203 oar_leases_dict[(lease['user'], lease['lease_id'])] = lease
205 #Find missing entries in the sfa slices list dict by comparing
206 #the keys in both dictionnaries
207 #Add the missing entries in the slice sneslab table
209 for lease in oar_leases_dict :
211 if lease not in sfa_slices_dict and login == lease[0]:
212 #if lease in GetReservedNodes not in GetSlices
213 #and the login of the job running matches then update the db
215 #First get the list of nodes hostnames for this job
216 oar_reserved_nodes_listdict = \
217 oar_leases_dict[lease]['reserved_nodes']
218 oar_reserved_nodes_list = []
219 for node_dict in oar_reserved_nodes_listdict:
220 oar_reserved_nodes_list.append(node_dict['hostname'])
221 #And update the db with slice hrn, job id and node list
222 self.db.add_job(slice_hrn, lease[1], oar_reserved_nodes_list)
224 for lease in sfa_slices_dict:
225 #Job is now terminated or in Error,
226 #either way ot is not going to run again
227 #Remove it from the db
228 if lease not in oar_leases_dict:
229 self.db.delete_job( slice_hrn, lease[1])
232 def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, \
234 aggregate = SlabAggregate(self)
236 slices = SlabSlices(self)
237 peer = slices.get_peer(slice_hrn)
238 sfa_peer = slices.get_sfa_peer(slice_hrn)
241 if not isinstance(creds, list):
245 slice_record = users[0].get('slice_record', {})
248 rspec = RSpec(rspec_string)
249 logger.debug("SLABDRIVER.PY \t create_sliver \tr spec.version %s slice_record %s " \
250 %(rspec.version,slice_record))
252 self.synchronize_oar_and_slice_table(slice_hrn)
253 # ensure site record exists?
254 # ensure slice record exists
255 #Removed options to verify_slice SA 14/08/12
256 sfa_slice = slices.verify_slice(slice_hrn, slice_record, peer, \
259 #requested_attributes returned by rspec.version.get_slice_attributes()
260 #unused, removed SA 13/08/12
261 rspec.version.get_slice_attributes()
263 logger.debug("SLABDRIVER.PY create_sliver slice %s " %(sfa_slice))
265 # ensure person records exists
266 #verify_persons returns added persons but since the return value
268 slices.verify_persons(slice_hrn, sfa_slice, users, peer, \
269 sfa_peer, options=options)
273 # add/remove slice from nodes
275 requested_slivers = [node.get('component_name') \
276 for node in rspec.version.get_nodes_with_slivers()]
277 l = [ node for node in rspec.version.get_nodes_with_slivers() ]
278 logger.debug("SLADRIVER \tcreate_sliver requested_slivers \
279 requested_slivers %s listnodes %s" \
280 %(requested_slivers,l))
281 #verify_slice_nodes returns nodes, but unused here. Removed SA 13/08/12.
282 slices.verify_slice_nodes(sfa_slice, requested_slivers, peer)
285 requested_lease_list = []
287 for lease in rspec.version.get_leases():
288 single_requested_lease = {}
289 logger.debug("SLABDRIVER.PY \tcreate_sliver lease %s " %(lease))
290 if not lease.get('lease_id'):
291 single_requested_lease['hostname'] = \
292 slab_xrn_to_hostname(lease.get('component_id').strip())
293 single_requested_lease['start_time'] = lease.get('start_time')
294 single_requested_lease['duration'] = lease.get('duration')
296 kept_leases.append(int(lease['lease_id']))
297 if single_requested_lease.get('hostname'):
298 requested_lease_list.append(single_requested_lease)
300 #dCreate dict of leases by start_time, regrouping nodes reserved
302 #time, for the same amount of time = one job on OAR
303 requested_job_dict = {}
304 for lease in requested_lease_list:
306 #In case it is an asap experiment start_time is empty
307 if lease['start_time'] == '':
308 lease['start_time'] = '0'
310 if lease['start_time'] not in requested_job_dict:
311 if isinstance(lease['hostname'], str):
312 lease['hostname'] = [lease['hostname']]
314 requested_job_dict[lease['start_time']] = lease
317 job_lease = requested_job_dict[lease['start_time']]
318 if lease['duration'] == job_lease['duration'] :
319 job_lease['hostname'].append(lease['hostname'])
324 logger.debug("SLABDRIVER.PY \tcreate_sliver requested_job_dict %s " %(requested_job_dict))
325 #verify_slice_leases returns the leases , but the return value is unused
326 #here. Removed SA 13/08/12
327 slices.verify_slice_leases(sfa_slice, \
328 requested_job_dict, kept_leases, peer)
330 return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
333 def delete_sliver (self, slice_urn, slice_hrn, creds, options):
335 sfa_slice_list = self.GetSlices(slice_filter = slice_hrn, \
336 slice_filter_type = 'slice_hrn')
338 if not sfa_slice_list:
341 sfa_slice = sfa_slice_list[0]
343 logger.debug("SLABDRIVER.PY delete_sliver slice %s" %(sfa_slice))
344 slices = SlabSlices(self)
345 # determine if this is a peer slice
347 peer = slices.get_peer(slice_hrn)
348 #TODO delete_sliver SA : UnBindObjectFromPeer should be
349 #used when there is another
350 #senslab testbed, which is not the case 14/08/12 .
352 logger.debug("SLABDRIVER.PY delete_sliver peer %s" %(peer))
355 self.UnBindObjectFromPeer('slice', \
356 sfa_slice['record_id_slice'], peer,None)
357 self.DeleteSliceFromNodes(sfa_slice)
360 self.BindObjectToPeer('slice', sfa_slice['record_id_slice'], \
361 peer, sfa_slice['peer_slice_id'])
365 def AddSlice(self, slice_record):
366 slab_slice = SliceSenslab( slice_hrn = slice_record['slice_hrn'], \
367 record_id_slice= slice_record['record_id_slice'] , \
368 record_id_user= slice_record['record_id_user'], \
369 peer_authority = slice_record['peer_authority'])
370 logger.debug("SLABDRIVER.PY \tAddSlice slice_record %s slab_slice %s" \
371 %(slice_record,slab_slice))
372 slab_dbsession.add(slab_slice)
373 slab_dbsession.commit()
376 # first 2 args are None in case of resource discovery
377 def list_resources (self, slice_urn, slice_hrn, creds, options):
378 #cached_requested = options.get('cached', True)
380 version_manager = VersionManager()
381 # get the rspec's return format from options
383 version_manager.get_version(options.get('geni_rspec_version'))
384 version_string = "rspec_%s" % (rspec_version)
386 #panos adding the info option to the caching key (can be improved)
387 if options.get('info'):
388 version_string = version_string + "_" + \
389 options.get('info', 'default')
391 # look in cache first
392 #if cached_requested and self.cache and not slice_hrn:
393 #rspec = self.cache.get(version_string)
395 #logger.debug("SlabDriver.ListResources: \
396 #returning cached advertisement")
399 #panos: passing user-defined options
400 aggregate = SlabAggregate(self)
401 origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
402 options.update({'origin_hrn':origin_hrn})
403 rspec = aggregate.get_rspec(slice_xrn=slice_urn, \
404 version=rspec_version, options=options)
407 #if self.cache and not slice_hrn:
408 #logger.debug("Slab.ListResources: stores advertisement in cache")
409 #self.cache.add(version_string, rspec)
414 def list_slices (self, creds, options):
415 # look in cache first
417 #slices = self.cache.get('slices')
419 #logger.debug("PlDriver.list_slices returns from cache")
424 slices = self.GetSlices()
425 logger.debug("SLABDRIVER.PY \tlist_slices hrn %s \r\n \r\n" %(slices))
426 slice_hrns = [slab_slice['slice_hrn'] for slab_slice in slices]
427 #slice_hrns = [slicename_to_hrn(self.hrn, slab_slice['slice_hrn']) \
428 #for slab_slice in slices]
429 slice_urns = [hrn_to_urn(slice_hrn, 'slice') \
430 for slice_hrn in slice_hrns]
434 #logger.debug ("SlabDriver.list_slices stores value in cache")
435 #self.cache.add('slices', slice_urns)
439 #No site or node register supported
440 def register (self, sfa_record, hrn, pub_key):
441 record_type = sfa_record['type']
442 slab_record = self.sfa_fields_to_slab_fields(record_type, hrn, \
446 if record_type == 'slice':
447 acceptable_fields = ['url', 'instantiation', 'name', 'description']
448 for key in slab_record.keys():
449 if key not in acceptable_fields:
451 logger.debug("SLABDRIVER.PY register")
452 slices = self.GetSlices(slice_filter =slab_record['hrn'], \
453 slice_filter_type = 'slice_hrn')
455 pointer = self.AddSlice(slab_record)
457 pointer = slices[0]['slice_id']
459 elif record_type == 'user':
460 persons = self.GetPersons([sfa_record])
461 #persons = self.GetPersons([sfa_record['hrn']])
463 pointer = self.AddPerson(dict(sfa_record))
466 pointer = persons[0]['person_id']
468 #Does this make sense to senslab ?
469 #if 'enabled' in sfa_record and sfa_record['enabled']:
470 #self.UpdatePerson(pointer, \
471 #{'enabled': sfa_record['enabled']})
473 #TODO register Change this AddPersonToSite stuff 05/07/2012 SA
474 # add this person to the site only if
475 # she is being added for the first
476 # time by sfa and doesnt already exist in plc
477 if not persons or not persons[0]['site_ids']:
478 login_base = get_leaf(sfa_record['authority'])
479 self.AddPersonToSite(pointer, login_base)
481 # What roles should this user have?
482 #TODO : DElete this AddRoleToPerson 04/07/2012 SA
483 #Function prototype is :
484 #AddRoleToPerson(self, auth, role_id_or_name, person_id_or_email)
485 #what's the pointer doing here?
486 self.AddRoleToPerson('user', pointer)
489 self.AddPersonKey(pointer, {'key_type' : 'ssh', \
492 #No node adding outside OAR
496 #No site or node record update allowed
497 def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
498 pointer = old_sfa_record['pointer']
499 old_sfa_record_type = old_sfa_record['type']
501 # new_key implemented for users only
502 if new_key and old_sfa_record_type not in [ 'user' ]:
503 raise UnknownSfaType(old_sfa_record_type)
505 #if (type == "authority"):
506 #self.shell.UpdateSite(pointer, new_sfa_record)
508 if old_sfa_record_type == "slice":
509 slab_record = self.sfa_fields_to_slab_fields(old_sfa_record_type, \
511 if 'name' in slab_record:
512 slab_record.pop('name')
513 #Prototype should be UpdateSlice(self,
514 #auth, slice_id_or_name, slice_fields)
515 #Senslab cannot update slice since slice = job
516 #so we must delete and create another job
517 self.UpdateSlice(pointer, slab_record)
519 elif old_sfa_record_type == "user":
521 all_fields = new_sfa_record
522 for key in all_fields.keys():
523 if key in ['first_name', 'last_name', 'title', 'email',
524 'password', 'phone', 'url', 'bio', 'accepted_aup',
526 update_fields[key] = all_fields[key]
527 self.UpdatePerson(pointer, update_fields)
530 # must check this key against the previous one if it exists
531 persons = self.GetPersons([pointer], ['key_ids'])
533 keys = person['key_ids']
534 keys = self.GetKeys(person['key_ids'])
536 # Delete all stale keys
539 if new_key != key['key']:
540 self.DeleteKey(key['key_id'])
544 self.AddPersonKey(pointer, {'key_type': 'ssh', \
551 def remove (self, sfa_record):
552 sfa_record_type = sfa_record['type']
553 hrn = sfa_record['hrn']
554 if sfa_record_type == 'user':
556 #get user from senslab ldap
557 person = self.GetPersons(sfa_record)
558 #No registering at a given site in Senslab.
559 #Once registered to the LDAP, all senslab sites are
562 #Mark account as disabled in ldap
563 self.DeletePerson(sfa_record)
564 elif sfa_record_type == 'slice':
565 if self.GetSlices(slice_filter = hrn, \
566 slice_filter_type = 'slice_hrn'):
567 self.DeleteSlice(sfa_record)
569 #elif type == 'authority':
570 #if self.GetSites(pointer):
571 #self.DeleteSite(pointer)
577 #TODO clean GetPeers. 05/07/12SA
578 def GetPeers (self, auth = None, peer_filter=None, return_fields_list=None):
580 existing_records = {}
581 existing_hrns_by_types = {}
582 logger.debug("SLABDRIVER \tGetPeers auth = %s, peer_filter %s, \
583 return_field %s " %(auth , peer_filter, return_fields_list))
584 all_records = dbsession.query(RegRecord).filter(RegRecord.type.like('%authority%')).all()
585 for record in all_records:
586 existing_records[(record.hrn, record.type)] = record
587 if record.type not in existing_hrns_by_types:
588 existing_hrns_by_types[record.type] = [record.hrn]
589 logger.debug("SLABDRIVER \tGetPeer\t NOT IN \
590 existing_hrns_by_types %s " %( existing_hrns_by_types))
593 logger.debug("SLABDRIVER \tGetPeer\t \INNN type %s hrn %s " \
594 %(record.type,record.hrn))
595 existing_hrns_by_types[record.type].append(record.hrn)
598 logger.debug("SLABDRIVER \tGetPeer\texisting_hrns_by_types %s "\
599 %( existing_hrns_by_types))
604 records_list.append(existing_records[(peer_filter,'authority')])
606 for hrn in existing_hrns_by_types['authority']:
607 records_list.append(existing_records[(hrn,'authority')])
609 logger.debug("SLABDRIVER \tGetPeer \trecords_list %s " \
615 return_records = records_list
616 if not peer_filter and not return_fields_list:
620 logger.debug("SLABDRIVER \tGetPeer return_records %s " \
622 return return_records
625 #TODO : Handling OR request in make_ldap_filters_from_records
626 #instead of the for loop
627 #over the records' list
628 def GetPersons(self, person_filter=None):
630 person_filter should be a list of dictionnaries when not set to None.
631 Returns a list of users whose accounts are enabled found in ldap.
634 logger.debug("SLABDRIVER \tGetPersons person_filter %s" \
637 if person_filter and isinstance(person_filter, list):
638 #If we are looking for a list of users (list of dict records)
639 #Usually the list contains only one user record
640 for searched_attributes in person_filter:
642 #Get only enabled user accounts in senslab LDAP :
643 #add a filter for make_ldap_filters_from_record
644 person = self.ldap.LdapFindUser(searched_attributes, \
645 is_user_enabled=True)
646 person_list.append(person)
649 #Get only enabled user accounts in senslab LDAP :
650 #add a filter for make_ldap_filters_from_record
651 person_list = self.ldap.LdapFindUser(is_user_enabled=True)
655 def GetTimezone(self):
656 server_timestamp, server_tz = self.oar.parser.\
657 SendRequest("GET_timezone")
658 return server_timestamp, server_tz
661 def DeleteJobs(self, job_id, slice_hrn):
662 if not job_id or job_id is -1:
664 username = slice_hrn.split(".")[-1].rstrip("_slice")
666 reqdict['method'] = "delete"
667 reqdict['strval'] = str(job_id)
669 self.db.delete_job(slice_hrn, job_id)
670 answer = self.oar.POSTRequestToOARRestAPI('DELETE_jobs_id', \
672 logger.debug("SLABDRIVER \tDeleteJobs jobid %s \r\n answer %s \
673 username %s" %(job_id,answer, username))
678 ##TODO : Unused GetJobsId ? SA 05/07/12
679 #def GetJobsId(self, job_id, username = None ):
681 #Details about a specific job.
682 #Includes details about submission time, jot type, state, events,
683 #owner, assigned ressources, walltime etc...
687 #node_list_k = 'assigned_network_address'
688 ##Get job info from OAR
689 #job_info = self.oar.parser.SendRequest(req, job_id, username)
691 #logger.debug("SLABDRIVER \t GetJobsId %s " %(job_info))
693 #if job_info['state'] == 'Terminated':
694 #logger.debug("SLABDRIVER \t GetJobsId job %s TERMINATED"\
697 #if job_info['state'] == 'Error':
698 #logger.debug("SLABDRIVER \t GetJobsId ERROR message %s "\
703 #logger.error("SLABDRIVER \tGetJobsId KeyError")
706 #parsed_job_info = self.get_info_on_reserved_nodes(job_info, \
708 ##Replaces the previous entry
709 ##"assigned_network_address" / "reserved_resources"
711 #job_info.update({'node_ids':parsed_job_info[node_list_k]})
712 #del job_info[node_list_k]
713 #logger.debug(" \r\nSLABDRIVER \t GetJobsId job_info %s " %(job_info))
717 def GetJobsResources(self, job_id, username = None):
718 #job_resources=['reserved_resources', 'assigned_resources',\
719 #'job_id', 'job_uri', 'assigned_nodes',\
721 #assigned_res = ['resource_id', 'resource_uri']
722 #assigned_n = ['node', 'node_uri']
724 req = "GET_jobs_id_resources"
727 #Get job resources list from OAR
728 node_id_list = self.oar.parser.SendRequest(req, job_id, username)
729 logger.debug("SLABDRIVER \t GetJobsResources %s " %(node_id_list))
732 self.__get_hostnames_from_oar_node_ids(node_id_list)
735 #Replaces the previous entry "assigned_network_address" /
736 #"reserved_resources"
738 job_info = {'node_ids': hostname_list}
743 def get_info_on_reserved_nodes(self, job_info, node_list_name):
744 #Get the list of the testbed nodes records and make a
745 #dictionnary keyed on the hostname out of it
746 node_list_dict = self.GetNodes()
747 #node_hostname_list = []
748 node_hostname_list = [node['hostname'] for node in node_list_dict]
749 #for node in node_list_dict:
750 #node_hostname_list.append(node['hostname'])
751 node_dict = dict(zip(node_hostname_list, node_list_dict))
753 reserved_node_hostname_list = []
754 for index in range(len(job_info[node_list_name])):
755 #job_info[node_list_name][k] =
756 reserved_node_hostname_list[index] = \
757 node_dict[job_info[node_list_name][index]]['hostname']
759 logger.debug("SLABDRIVER \t get_info_on_reserved_nodes \
760 reserved_node_hostname_list %s" \
761 %(reserved_node_hostname_list))
763 logger.error("SLABDRIVER \t get_info_on_reserved_nodes KEYERROR " )
765 return reserved_node_hostname_list
767 def GetNodesCurrentlyInUse(self):
768 """Returns a list of all the nodes already involved in an oar job"""
769 return self.oar.parser.SendRequest("GET_running_jobs")
771 def __get_hostnames_from_oar_node_ids(self, resource_id_list ):
772 full_nodes_dict_list = self.GetNodes()
773 #Put the full node list into a dictionary keyed by oar node id
774 oar_id_node_dict = {}
775 for node in full_nodes_dict_list:
776 oar_id_node_dict[node['oar_id']] = node
778 logger.debug("SLABDRIVER \t __get_hostnames_from_oar_node_ids\
779 oar_id_node_dict %s" %(oar_id_node_dict))
781 hostname_dict_list = []
782 for resource_id in resource_id_list:
783 #Because jobs requested "asap" do not have defined resources
784 if resource_id is not "Undefined":
785 hostname_dict_list.append({'hostname' : \
786 oar_id_node_dict[resource_id]['hostname'],
787 'site_id' : oar_id_node_dict[resource_id]['site']})
789 #hostname_list.append(oar_id_node_dict[resource_id]['hostname'])
790 return hostname_dict_list
792 def GetReservedNodes(self):
793 #Get the nodes in use and the reserved nodes
794 reservation_dict_list = \
795 self.oar.parser.SendRequest("GET_reserved_nodes")
798 for resa in reservation_dict_list:
799 logger.debug ("GetReservedNodes resa %s"%(resa))
800 #dict list of hostnames and their site
801 resa['reserved_nodes'] = \
802 self.__get_hostnames_from_oar_node_ids(resa['resource_ids'])
804 #del resa['resource_ids']
805 return reservation_dict_list
807 def GetNodes(self, node_filter_dict = None, return_fields_list = None):
809 node_filter_dict : dictionnary of lists
812 node_dict_by_id = self.oar.parser.SendRequest("GET_resources_full")
813 node_dict_list = node_dict_by_id.values()
815 #No filtering needed return the list directly
816 if not (node_filter_dict or return_fields_list):
817 return node_dict_list
819 return_node_list = []
821 for filter_key in node_filter_dict:
823 #Filter the node_dict_list by each value contained in the
824 #list node_filter_dict[filter_key]
825 for value in node_filter_dict[filter_key]:
826 for node in node_dict_list:
827 if node[filter_key] == value:
828 if return_fields_list :
830 for k in return_fields_list:
832 return_node_list.append(tmp)
834 return_node_list.append(node)
836 logger.log_exc("GetNodes KeyError")
840 return return_node_list
843 def GetSites(self, site_filter_name_list = None, return_fields_list = None):
844 site_dict = self.oar.parser.SendRequest("GET_sites")
845 #site_dict : dict where the key is the sit ename
846 return_site_list = []
847 if not ( site_filter_name_list or return_fields_list):
848 return_site_list = site_dict.values()
849 return return_site_list
851 for site_filter_name in site_filter_name_list:
852 if site_filter_name in site_dict:
853 if return_fields_list:
854 for field in return_fields_list:
857 tmp[field] = site_dict[site_filter_name][field]
859 logger.error("GetSites KeyError %s "%(field))
861 return_site_list.append(tmp)
863 return_site_list.append( site_dict[site_filter_name])
866 return return_site_list
867 #warning return_fields_list paramr emoved (Not used)
868 def GetSlices(self, slice_filter = None, slice_filter_type = None):
869 #def GetSlices(self, slice_filter = None, slice_filter_type = None, \
870 #return_fields_list = None):
871 """ Get the slice records from the slab db.
872 Returns a slice ditc if slice_filter and slice_filter_type
874 Returns a list of slice dictionnaries if there are no filters
879 return_slice_list = []
882 authorized_filter_types_list = ['slice_hrn', 'record_id_user']
883 slicerec_dictlist = []
884 if slice_filter_type in authorized_filter_types_list:
885 #Get list of slices based on the slice hrn
886 if slice_filter_type == 'slice_hrn':
888 login = slice_filter.split(".")[1].split("_")[0]
890 #DO NOT USE RegSlice - reg_researchers to get the hrn of the user
891 #otherwise will mess up the RegRecord in Resolve, don't know
894 #Only one entry for one user = one slice in slice_senslab table
895 slicerec = slab_dbsession.query(SliceSenslab).filter_by(slice_hrn = slice_filter).first()
897 #Get slice based on user id
898 if slice_filter_type == 'record_id_user':
899 slicerec = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = slice_filter).first()
904 #slicerec_dictlist = []
905 fixed_slicerec_dict = slicerec.dump_sqlalchemyobj_to_dict()
907 login = fixed_slicerec_dict['slice_hrn'].split(".")[1].split("_")[0]
909 #for record in slicerec:
910 #slicerec_dictlist.append(record.dump_sqlalchemyobj_to_dict())
912 #login = slicerec_dictlist[0]['slice_hrn'].split(".")[1].split("_")[0]
914 #One slice can have multiple jobs
915 sqljob_list = slab_dbsession.query(JobSenslab).filter_by( slice_hrn=fixed_slicerec_dict['slice_hrn']).all()
917 for job in sqljob_list:
918 job_list.append(job.dump_sqlalchemyobj_to_dict())
920 logger.debug(" SLABDRIVER \tGetSlices login %s \
922 %(login, fixed_slicerec_dict))
924 #Several jobs for one slice
925 #TODO : Modify to make a diff with jobs not terminated = 1 OAR request SA 20/08/12
926 for job in job_list :
928 slicerec_dict['oar_job_id'] = []
930 #Check with OAR the status of the job if a job id is in
933 rslt = self.GetJobsResources(job['oar_job_id'], \
935 logger.debug("SLABDRIVER.PY \tGetSlices job %s \trslt fromn GetJobsResources %s \r\n"\
938 slicerec_dict['oar_job_id']= job['oar_job_id']
939 slicerec_dict.update(rslt)
940 slicerec_dict.update(fixed_slicerec_dict)
941 slicerec_dict.update({'hrn':\
942 str(fixed_slicerec_dict['slice_hrn'])})
945 #If GetJobsResources is empty, this means the job is
946 #now in the 'Terminated' state
947 #Update the slice record
949 self.db.delete_job(slice_filter, job['oar_job_id'])
951 update({'hrn':str(fixed_slicerec_dict['slice_hrn'])})
954 slicerec_dict['node_ids'] = job['node_list']
958 slicerec_dictlist.append(slicerec_dict)
959 logger.debug("SLABDRIVER.PY \tGetSlices slicerec_dict %s slicerec_dictlist %s" %(slicerec_dict, slicerec_dictlist))
961 logger.debug("SLABDRIVER.PY \tGetSlices RETURN slicerec_dictlist %s"\
962 %(slicerec_dictlist))
964 return slicerec_dictlist
968 slice_list = slab_dbsession.query(SliceSenslab).all()
969 sqljob_list = slab_dbsession.query(JobSenslab).all()
972 for job in sqljob_list:
973 job_list.append(job.dump_sqlalchemyobj_to_dict())
975 return_slice_list = []
976 for record in slice_list:
977 return_slice_list.append(record.dump_sqlalchemyobj_to_dict())
979 for slicerec_dict in return_slice_list:
980 slicerec_dict['oar_job_id'] = []
982 if slicerec_dict['slice_hrn'] in job:
983 slicerec_dict['oar_job_id'].append(job['oar_job_id'])
985 logger.debug("SLABDRIVER.PY \tGetSlices RETURN slices %s \
986 slice_filter %s " %(return_slice_list, slice_filter))
988 #if return_fields_list:
989 #return_slice_list = parse_filter(sliceslist, \
990 #slice_filter,'slice', return_fields_list)
992 return return_slice_list
998 def testbed_name (self): return self.hrn
1000 # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
1001 def aggregate_version (self):
1002 version_manager = VersionManager()
1003 ad_rspec_versions = []
1004 request_rspec_versions = []
1005 for rspec_version in version_manager.versions:
1006 if rspec_version.content_type in ['*', 'ad']:
1007 ad_rspec_versions.append(rspec_version.to_dict())
1008 if rspec_version.content_type in ['*', 'request']:
1009 request_rspec_versions.append(rspec_version.to_dict())
1011 'testbed':self.testbed_name(),
1012 'geni_request_rspec_versions': request_rspec_versions,
1013 'geni_ad_rspec_versions': ad_rspec_versions,
1022 # Convert SFA fields to PLC fields for use when registering up updating
1023 # registry record in the PLC database
1025 # @param type type of record (user, slice, ...)
1026 # @param hrn human readable name
1027 # @param sfa_fields dictionary of SFA fields
1028 # @param slab_fields dictionary of PLC fields (output)
1030 def sfa_fields_to_slab_fields(self, sfa_type, hrn, record):
1034 #for field in record:
1035 # slab_record[field] = record[field]
1037 if sfa_type == "slice":
1038 #instantion used in get_slivers ?
1039 if not "instantiation" in slab_record:
1040 slab_record["instantiation"] = "senslab-instantiated"
1041 #slab_record["hrn"] = hrn_to_pl_slicename(hrn)
1042 #Unused hrn_to_pl_slicename because Slab's hrn already in the appropriate form SA 23/07/12
1043 slab_record["hrn"] = hrn
1044 logger.debug("SLABDRIVER.PY sfa_fields_to_slab_fields \
1045 slab_record %s " %(slab_record['hrn']))
1047 slab_record["url"] = record["url"]
1048 if "description" in record:
1049 slab_record["description"] = record["description"]
1050 if "expires" in record:
1051 slab_record["expires"] = int(record["expires"])
1053 #nodes added by OAR only and then imported to SFA
1054 #elif type == "node":
1055 #if not "hostname" in slab_record:
1056 #if not "hostname" in record:
1057 #raise MissingSfaInfo("hostname")
1058 #slab_record["hostname"] = record["hostname"]
1059 #if not "model" in slab_record:
1060 #slab_record["model"] = "geni"
1063 #elif type == "authority":
1064 #slab_record["login_base"] = hrn_to_slab_login_base(hrn)
1066 #if not "name" in slab_record:
1067 #slab_record["name"] = hrn
1069 #if not "abbreviated_name" in slab_record:
1070 #slab_record["abbreviated_name"] = hrn
1072 #if not "enabled" in slab_record:
1073 #slab_record["enabled"] = True
1075 #if not "is_public" in slab_record:
1076 #slab_record["is_public"] = True
1083 def __transforms_timestamp_into_date(self, xp_utc_timestamp = None):
1084 """ Transforms unix timestamp into valid OAR date format """
1086 #Used in case of a scheduled experiment (not immediate)
1087 #To run an XP immediately, don't specify date and time in RSpec
1088 #They will be set to None.
1089 if xp_utc_timestamp:
1090 #transform the xp_utc_timestamp into server readable time
1091 xp_server_readable_date = datetime.fromtimestamp(int(\
1092 xp_utc_timestamp)).strftime(self.time_format)
1094 return xp_server_readable_date
1102 def LaunchExperimentOnOAR(self, added_nodes, slice_name, \
1103 lease_start_time, lease_duration, slice_user=None):
1105 lease_dict['lease_start_time'] = lease_start_time
1106 lease_dict['lease_duration'] = lease_duration
1107 lease_dict['added_nodes'] = added_nodes
1108 lease_dict['slice_name'] = slice_name
1109 lease_dict['slice_user'] = slice_user
1110 lease_dict['grain'] = self.GetLeaseGranularity()
1111 lease_dict['time_format'] = self.time_format
1113 def __create_job_structure_request_for_OAR(lease_dict):
1114 """ Creates the structure needed for a correct POST on OAR.
1115 Makes the timestamp transformation into the appropriate format.
1116 Sends the POST request to create the job with the resources in
1125 reqdict['workdir'] = '/tmp'
1126 reqdict['resource'] = "{network_address in ("
1128 for node in lease_dict['added_nodes']:
1129 logger.debug("\r\n \r\n OARrestapi \t __create_job_structure_request_for_OAR \
1132 # Get the ID of the node
1134 reqdict['resource'] += "'" + nodeid + "', "
1135 nodeid_list.append(nodeid)
1137 custom_length = len(reqdict['resource'])- 2
1138 reqdict['resource'] = reqdict['resource'][0:custom_length] + \
1139 ")}/nodes=" + str(len(nodeid_list))
1141 def __process_walltime(duration):
1142 """ Calculates the walltime in seconds from the duration in H:M:S
1143 specified in the RSpec.
1147 # Fixing the walltime by adding a few delays.
1148 # First put the walltime in seconds oarAdditionalDelay = 20;
1149 # additional delay for /bin/sleep command to
1150 # take in account prologue and epilogue scripts execution
1151 # int walltimeAdditionalDelay = 120; additional delay
1152 desired_walltime = duration
1153 total_walltime = desired_walltime + 140#+2 min 20
1154 sleep_walltime = desired_walltime + 20 #+20 sec
1156 #Put the walltime back in str form
1157 #First get the hours
1158 walltime.append(str(total_walltime / 3600))
1159 total_walltime = total_walltime - 3600 * int(walltime[0])
1160 #Get the remaining minutes
1161 walltime.append(str(total_walltime / 60))
1162 total_walltime = total_walltime - 60 * int(walltime[1])
1164 walltime.append(str(total_walltime))
1167 logger.log_exc(" __process_walltime duration null")
1169 return walltime, sleep_walltime
1172 walltime, sleep_walltime = \
1173 __process_walltime(int(lease_dict['lease_duration'])*lease_dict['grain'])
1176 reqdict['resource'] += ",walltime=" + str(walltime[0]) + \
1177 ":" + str(walltime[1]) + ":" + str(walltime[2])
1178 reqdict['script_path'] = "/bin/sleep " + str(sleep_walltime)
1180 #In case of a scheduled experiment (not immediate)
1181 #To run an XP immediately, don't specify date and time in RSpec
1182 #They will be set to None.
1183 if lease_dict['lease_start_time'] is not '0':
1184 #Readable time accepted by OAR
1185 start_time = datetime.fromtimestamp(int(lease_dict['lease_start_time'])).\
1186 strftime(lease_dict['time_format'])
1187 reqdict['reservation'] = start_time
1188 #If there is not start time, Immediate XP. No need to add special
1192 reqdict['type'] = "deploy"
1193 reqdict['directory'] = ""
1194 reqdict['name'] = "SFA_" + lease_dict['slice_user']
1199 #Create the request for OAR
1200 reqdict = __create_job_structure_request_for_OAR(lease_dict)
1201 # first step : start the OAR job and update the job
1202 logger.debug("SLABDRIVER.PY \tLaunchExperimentOnOAR reqdict %s\
1205 answer = self.oar.POSTRequestToOARRestAPI('POST_job', \
1206 reqdict, slice_user)
1207 logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid %s " %(answer))
1209 jobid = answer['id']
1211 logger.log_exc("SLABDRIVER \tLaunchExperimentOnOAR \
1212 Impossible to create job %s " %(answer))
1216 def __configure_experiment(jobid, added_nodes):
1217 # second step : configure the experiment
1218 # we need to store the nodes in a yaml (well...) file like this :
1219 # [1,56,23,14,45,75] with name /tmp/sfa<jobid>.json
1220 job_file = open('/tmp/sfa/'+ str(jobid) + '.json', 'w')
1222 job_file.write(str(added_nodes[0].strip('node')))
1223 for node in added_nodes[1:len(added_nodes)] :
1224 job_file.write(', '+ node.strip('node'))
1229 def __launch_senslab_experiment(jobid):
1230 # third step : call the senslab-experiment wrapper
1231 #command= "java -jar target/sfa-1.0-jar-with-dependencies.jar
1232 # "+str(jobid)+" "+slice_user
1233 javacmdline = "/usr/bin/java"
1235 "/opt/senslabexperimentwrapper/sfa-1.0-jar-with-dependencies.jar"
1236 #ret=subprocess.check_output(["/usr/bin/java", "-jar", ", \
1237 #str(jobid), slice_user])
1238 output = subprocess.Popen([javacmdline, "-jar", jarname, str(jobid), \
1239 slice_user],stdout=subprocess.PIPE).communicate()[0]
1241 logger.debug("SLABDRIVER \t __configure_experiment wrapper returns%s " \
1248 logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid %s \
1249 added_nodes %s slice_user %s" %(jobid, added_nodes, slice_user))
1250 self.db.add_job( slice_name, jobid, added_nodes)
1252 __configure_experiment(jobid, added_nodes)
1253 __launch_senslab_experiment(jobid)
1257 def AddLeases(self, hostname_list, slice_record, lease_start_time, lease_duration):
1258 logger.debug("SLABDRIVER \r\n \r\n \t AddLeases hostname_list %s \
1259 slice_record %s lease_start_time %s lease_duration %s "\
1260 %( hostname_list, slice_record , lease_start_time, \
1263 tmp = slice_record['PI'][0].split(".")
1264 username = tmp[(len(tmp)-1)]
1265 self.LaunchExperimentOnOAR(hostname_list, slice_record['name'], lease_start_time, lease_duration, username)
1266 start_time = datetime.fromtimestamp(int(lease_start_time)).strftime(self.time_format)
1267 logger.debug("SLABDRIVER \t AddLeases hostname_list start_time %s " %(start_time))
1272 #Delete the jobs from job_senslab table
1273 def DeleteSliceFromNodes(self, slice_record):
1274 for job_id in slice_record['oar_job_id']:
1275 self.DeleteJobs(job_id, slice_record['hrn'])
1280 def GetLeaseGranularity(self):
1281 """ Returns the granularity of Senslab testbed.
1282 Defined in seconds. """
1287 def GetLeases(self, lease_filter_dict=None):
1288 unfiltered_reservation_list = self.GetReservedNodes()
1290 ##Synchronize slice_table of sfa senslab db
1291 #self.synchronize_oar_and_slice_table(unfiltered_reservation_list)
1293 reservation_list = []
1294 #Find the slice associated with this user senslab ldap uid
1295 logger.debug(" SLABDRIVER.PY \tGetLeases ")
1296 #Create user dict first to avoir looking several times for
1297 #the same user in LDAP SA 27/07/12
1299 for resa in unfiltered_reservation_list:
1300 logger.debug("SLABDRIVER \tGetLeases USER %s"\
1302 if resa['user'] not in resa_user_dict:
1303 logger.debug("SLABDRIVER \tGetLeases userNOTIN ")
1304 ldap_info = self.ldap.LdapSearch('(uid='+resa['user']+')')
1305 ldap_info = ldap_info[0][1]
1306 user = dbsession.query(RegUser).filter_by(email = \
1307 ldap_info['mail'][0]).first()
1308 #Separated in case user not in database : record_id not defined SA 17/07//12
1309 query_slice_info = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = user.record_id)
1310 if query_slice_info:
1311 slice_info = query_slice_info.first()
1315 resa_user_dict[resa['user']] = {}
1316 resa_user_dict[resa['user']]['ldap_info'] = user
1317 resa_user_dict[resa['user']]['slice_info'] = slice_info
1319 logger.debug("SLABDRIVER \tGetLeases resa_user_dict %s"\
1321 for resa in unfiltered_reservation_list:
1325 resa['slice_hrn'] = resa_user_dict[resa['user']]['slice_info'].slice_hrn
1326 resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
1328 #resa['slice_id'] = hrn_to_urn(slice_info.slice_hrn, 'slice')
1329 resa['component_id_list'] = []
1330 #Transform the hostnames into urns (component ids)
1331 for node in resa['reserved_nodes']:
1332 #resa['component_id_list'].append(hostname_to_urn(self.hrn, \
1333 #self.root_auth, node['hostname']))
1334 slab_xrn = slab_xrn_object(self.root_auth, node['hostname'])
1335 resa['component_id_list'].append(slab_xrn.urn)
1337 #Filter the reservation list if necessary
1338 #Returns all the leases associated with a given slice
1339 if lease_filter_dict:
1340 logger.debug("SLABDRIVER \tGetLeases lease_filter_dict %s"\
1341 %(lease_filter_dict))
1342 for resa in unfiltered_reservation_list:
1343 if lease_filter_dict['name'] == resa['slice_hrn']:
1344 reservation_list.append(resa)
1346 reservation_list = unfiltered_reservation_list
1348 logger.debug(" SLABDRIVER.PY \tGetLeases reservation_list %s"\
1349 %(reservation_list))
1350 return reservation_list
1352 def augment_records_with_testbed_info (self, sfa_records):
1353 return self.fill_record_info (sfa_records)
1355 def fill_record_info(self, record_list):
1357 Given a SFA record, fill in the senslab specific and SFA specific
1358 fields in the record.
1361 logger.debug("SLABDRIVER \tfill_record_info records %s " %(record_list))
1362 if not isinstance(record_list, list):
1363 record_list = [record_list]
1366 for record in record_list:
1367 #If the record is a SFA slice record, then add information
1368 #about the user of this slice. This kind of
1369 #information is in the Senslab's DB.
1370 if str(record['type']) == 'slice':
1371 #Get slab slice record.
1372 recslice_list = self.GetSlices(slice_filter = \
1373 str(record['hrn']),\
1374 slice_filter_type = 'slice_hrn')
1376 recuser = dbsession.query(RegRecord).filter_by(record_id = \
1377 recslice_list[0]['record_id_user']).first()
1378 logger.debug("SLABDRIVER \tfill_record_info TYPE SLICE RECUSER %s " %(recuser))
1379 record.update({'PI':[recuser.hrn],
1380 'researcher': [recuser.hrn],
1381 'name':record['hrn'],
1382 'oar_job_id':[rec['oar_job_id'] for rec in recslice_list],
1384 'person_ids':[recslice_list[0]['record_id_user']],
1385 'geni_urn':'', #For client_helper.py compatibility
1386 'keys':'', #For client_helper.py compatibility
1387 'key_ids':''}) #For client_helper.py compatibility
1389 #for rec in recslice_list:
1390 #record['oar_job_id'].append(rec['oar_job_id'])
1391 logger.debug( "SLABDRIVER.PY \t fill_record_info SLICE \
1392 recslice_list %s \r\n \t RECORD %s \r\n \r\n" %(recslice_list,record))
1393 if str(record['type']) == 'user':
1394 #The record is a SFA user record.
1395 #Get the information about his slice from Senslab's DB
1396 #and add it to the user record.
1397 recslice_list = self.GetSlices(\
1398 slice_filter = record['record_id'],\
1399 slice_filter_type = 'record_id_user')
1401 logger.debug( "SLABDRIVER.PY \t fill_record_info TYPE USER \
1402 recslice_list %s \r\n \t RECORD %s \r\n" %(recslice_list , record))
1403 #Append slice record in records list,
1404 #therefore fetches user and slice info again(one more loop)
1405 #Will update PIs and researcher for the slice
1406 recuser = dbsession.query(RegRecord).filter_by(record_id = \
1407 recslice_list[0]['record_id_user']).first()
1408 logger.debug( "SLABDRIVER.PY \t fill_record_info USER \
1409 recuser %s \r\n \r\n" %(recuser))
1411 recslice = recslice_list[0]
1412 recslice.update({'PI':[recuser.hrn],
1413 'researcher': [recuser.hrn],
1414 'name':record['hrn'],
1416 'oar_job_id': [rec['oar_job_id'] for rec in recslice_list],
1417 'person_ids':[recslice_list[0]['record_id_user']]})
1418 recslice.update({'type':'slice', \
1419 'hrn':recslice_list[0]['slice_hrn']})
1420 #for rec in recslice_list:
1421 #recslice['oar_job_id'].append(rec['oar_job_id'])
1423 #GetPersons takes [] as filters
1424 #user_slab = self.GetPersons([{'hrn':recuser.hrn}])
1425 user_slab = self.GetPersons([record])
1428 record.update(user_slab[0])
1429 #For client_helper.py compatibility
1430 record.update( { 'geni_urn':'',
1433 record_list.append(recslice)
1435 logger.debug("SLABDRIVER.PY \tfill_record_info ADDING SLICE\
1436 INFO TO USER records %s" %(record_list))
1437 logger.debug("SLABDRIVER.PY \tfill_record_info END \
1438 #record %s \r\n \r\n " %(record))
1440 except TypeError, error:
1441 logger.log_exc("SLABDRIVER \t fill_record_info EXCEPTION %s"\
1443 #logger.debug("SLABDRIVER.PY \t fill_record_info ENDENDEND ")
1447 #self.fill_record_slab_info(records)
1453 #TODO Update membership? update_membership_list SA 05/07/12
1454 #def update_membership_list(self, oldRecord, record, listName, addFunc, \
1456 ## get a list of the HRNs tht are members of the old and new records
1458 #oldList = oldRecord.get(listName, [])
1461 #newList = record.get(listName, [])
1463 ## if the lists are the same, then we don't have to update anything
1464 #if (oldList == newList):
1467 ## build a list of the new person ids, by looking up each person to get
1471 #records = table.find({'type': 'user', 'hrn': newList})
1472 #for rec in records:
1473 #newIdList.append(rec['pointer'])
1475 ## build a list of the old person ids from the person_ids field
1477 #oldIdList = oldRecord.get("person_ids", [])
1478 #containerId = oldRecord.get_pointer()
1480 ## if oldRecord==None, then we are doing a Register, instead of an
1483 #containerId = record.get_pointer()
1485 ## add people who are in the new list, but not the oldList
1486 #for personId in newIdList:
1487 #if not (personId in oldIdList):
1488 #addFunc(self.plauth, personId, containerId)
1490 ## remove people who are in the old list, but not the new list
1491 #for personId in oldIdList:
1492 #if not (personId in newIdList):
1493 #delFunc(self.plauth, personId, containerId)
1495 #def update_membership(self, oldRecord, record):
1497 #if record.type == "slice":
1498 #self.update_membership_list(oldRecord, record, 'researcher',
1499 #self.users.AddPersonToSlice,
1500 #self.users.DeletePersonFromSlice)
1501 #elif record.type == "authority":
1506 # I don't think you plan on running a component manager at this point
1507 # let me clean up the mess of ComponentAPI that is deprecated anyways
1510 #TODO FUNCTIONS SECTION 04/07/2012 SA
1512 #TODO : Is UnBindObjectFromPeer still necessary ? Currently does nothing
1514 def UnBindObjectFromPeer(self, auth, object_type, object_id, shortname):
1515 """ This method is a hopefully temporary hack to let the sfa correctly
1516 detach the objects it creates from a remote peer object. This is
1517 needed so that the sfa federation link can work in parallel with
1518 RefreshPeer, as RefreshPeer depends on remote objects being correctly
1521 auth : struct, API authentication structure
1522 AuthMethod : string, Authentication method to use
1523 object_type : string, Object type, among 'site','person','slice',
1525 object_id : int, object_id
1526 shortname : string, peer shortname
1530 logger.warning("SLABDRIVER \tUnBindObjectFromPeer EMPTY-\
1534 #TODO Is BindObjectToPeer still necessary ? Currently does nothing
1536 def BindObjectToPeer(self, auth, object_type, object_id, shortname=None, \
1537 remote_object_id=None):
1538 """This method is a hopefully temporary hack to let the sfa correctly
1539 attach the objects it creates to a remote peer object. This is needed
1540 so that the sfa federation link can work in parallel with RefreshPeer,
1541 as RefreshPeer depends on remote objects being correctly marked.
1543 shortname : string, peer shortname
1544 remote_object_id : int, remote object_id, set to 0 if unknown
1548 logger.warning("SLABDRIVER \tBindObjectToPeer EMPTY - DO NOTHING \r\n ")
1551 #TODO UpdateSlice 04/07/2012 SA
1552 #Funciton should delete and create another job since oin senslab slice=job
1553 def UpdateSlice(self, auth, slice_id_or_name, slice_fields=None):
1554 """Updates the parameters of an existing slice with the values in
1556 Users may only update slices of which they are members.
1557 PIs may update any of the slices at their sites, or any slices of
1558 which they are members. Admins may update any slice.
1559 Only PIs and admins may update max_nodes. Slices cannot be renewed
1560 (by updating the expires parameter) more than 8 weeks into the future.
1561 Returns 1 if successful, faults otherwise.
1565 logger.warning("SLABDRIVER UpdateSlice EMPTY - DO NOTHING \r\n ")
1568 #TODO UpdatePerson 04/07/2012 SA
1569 def UpdatePerson(self, auth, person_id_or_email, person_fields=None):
1570 """Updates a person. Only the fields specified in person_fields
1571 are updated, all other fields are left untouched.
1572 Users and techs can only update themselves. PIs can only update
1573 themselves and other non-PIs at their sites.
1574 Returns 1 if successful, faults otherwise.
1578 logger.warning("SLABDRIVER UpdatePerson EMPTY - DO NOTHING \r\n ")
1581 #TODO GetKeys 04/07/2012 SA
1582 def GetKeys(self, auth, key_filter=None, return_fields=None):
1583 """Returns an array of structs containing details about keys.
1584 If key_filter is specified and is an array of key identifiers,
1585 or a struct of key attributes, only keys matching the filter
1586 will be returned. If return_fields is specified, only the
1587 specified details will be returned.
1589 Admin may query all keys. Non-admins may only query their own keys.
1593 logger.warning("SLABDRIVER GetKeys EMPTY - DO NOTHING \r\n ")
1596 #TODO DeleteKey 04/07/2012 SA
1597 def DeleteKey(self, auth, key_id):
1599 Non-admins may only delete their own keys.
1600 Returns 1 if successful, faults otherwise.
1604 logger.warning("SLABDRIVER DeleteKey EMPTY - DO NOTHING \r\n ")
1608 #TODO : Check rights to delete person
1609 def DeletePerson(self, auth, person_record):
1610 """ Disable an existing account in senslab LDAP.
1611 Users and techs can only delete themselves. PIs can only
1612 delete themselves and other non-PIs at their sites.
1613 ins can delete anyone.
1614 Returns 1 if successful, faults otherwise.
1618 #Disable user account in senslab LDAP
1619 ret = self.ldap.LdapMarkUserAsDeleted(person_record)
1620 logger.warning("SLABDRIVER DeletePerson %s " %(person_record))
1623 #TODO Check DeleteSlice, check rights 05/07/2012 SA
1624 def DeleteSlice(self, auth, slice_record):
1625 """ Deletes the specified slice.
1626 Senslab : Kill the job associated with the slice if there is one
1627 using DeleteSliceFromNodes.
1628 Updates the slice record in slab db to remove the slice nodes.
1630 Users may only delete slices of which they are members. PIs may
1631 delete any of the slices at their sites, or any slices of which
1632 they are members. Admins may delete any slice.
1633 Returns 1 if successful, faults otherwise.
1637 self.DeleteSliceFromNodes(slice_record)
1638 logger.warning("SLABDRIVER DeleteSlice %s "%(slice_record))
1641 #TODO AddPerson 04/07/2012 SA
1642 def AddPerson(self, auth, person_fields=None):
1643 """Adds a new account. Any fields specified in person_fields are used,
1644 otherwise defaults are used.
1645 Accounts are disabled by default. To enable an account,
1647 Returns the new person_id (> 0) if successful, faults otherwise.
1651 logger.warning("SLABDRIVER AddPerson EMPTY - DO NOTHING \r\n ")
1654 #TODO AddPersonToSite 04/07/2012 SA
1655 def AddPersonToSite (self, auth, person_id_or_email, \
1656 site_id_or_login_base=None):
1657 """ Adds the specified person to the specified site. If the person is
1658 already a member of the site, no errors are returned. Does not change
1659 the person's primary site.
1660 Returns 1 if successful, faults otherwise.
1664 logger.warning("SLABDRIVER AddPersonToSite EMPTY - DO NOTHING \r\n ")
1667 #TODO AddRoleToPerson : Not sure if needed in senslab 04/07/2012 SA
1668 def AddRoleToPerson(self, auth, role_id_or_name, person_id_or_email):
1669 """Grants the specified role to the person.
1670 PIs can only grant the tech and user roles to users and techs at their
1671 sites. Admins can grant any role to any user.
1672 Returns 1 if successful, faults otherwise.
1676 logger.warning("SLABDRIVER AddRoleToPerson EMPTY - DO NOTHING \r\n ")
1679 #TODO AddPersonKey 04/07/2012 SA
1680 def AddPersonKey(self, auth, person_id_or_email, key_fields=None):
1681 """Adds a new key to the specified account.
1682 Non-admins can only modify their own keys.
1683 Returns the new key_id (> 0) if successful, faults otherwise.
1687 logger.warning("SLABDRIVER AddPersonKey EMPTY - DO NOTHING \r\n ")
1690 def DeleteLeases(self, leases_id_list, slice_hrn ):
1691 for job_id in leases_id_list:
1692 self.DeleteJobs(job_id, slice_hrn)
1694 logger.debug("SLABDRIVER DeleteLeases leases_id_list %s slice_hrn %s \
1695 \r\n " %(leases_id_list, slice_hrn))