3 from datetime import datetime
5 from sfa.util.faults import SliverDoesNotExist, UnknownSfaType
6 from sfa.util.sfalogging import logger
8 from sfa.storage.alchemy import dbsession
9 from sfa.storage.model import RegRecord, RegUser
11 from sfa.trust.credential import Credential
14 from sfa.managers.driver import Driver
15 from sfa.rspecs.version_manager import VersionManager
16 from sfa.rspecs.rspec import RSpec
18 from sfa.util.xrn import hrn_to_urn
21 ## thierry: everything that is API-related (i.e. handling incoming requests)
23 # SlabDriver should be really only about talking to the senslab testbed
26 from sfa.senslab.OARrestapi import OARrestapi
27 from sfa.senslab.LDAPapi import LDAPapi
29 from sfa.senslab.slabpostgres import SlabDB, slab_dbsession, SliceSenslab
31 from sfa.senslab.slabaggregate import SlabAggregate, slab_xrn_to_hostname, \
33 from sfa.senslab.slabslices import SlabSlices
40 # this inheritance scheme is so that the driver object can receive
41 # GetNodes or GetSites sorts of calls directly
42 # and thus minimize the differences in the managers with the pl version
43 class SlabDriver(Driver):
44 """ Senslab Driver class inherited from Driver generic class.
46 Contains methods compliant with the SFA standard and the testbed
47 infrastructure (calls to LDAP and OAR).
49 def __init__(self, config):
50 Driver.__init__ (self, config)
52 self.hrn = config.SFA_INTERFACE_HRN
53 self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
54 self.oar = OARrestapi()
56 self.time_format = "%Y-%m-%d %H:%M:%S"
57 self.db = SlabDB(config, debug = True)
61 def sliver_status(self, slice_urn, slice_hrn):
62 """Receive a status request for slice named urn/hrn
63 urn:publicid:IDN+senslab+nturro_slice hrn senslab.nturro_slice
64 shall return a structure as described in
65 http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
66 NT : not sure if we should implement this or not, but used by sface.
70 #First get the slice with the slice hrn
71 slice_list = self.GetSlices(slice_filter = slice_hrn, \
72 slice_filter_type = 'slice_hrn')
74 if len(slice_list) is 0:
75 raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
77 #Slice has the same slice hrn for each slice in the slice/lease list
78 #So fetch the info on the user once
79 one_slice = slice_list[0]
80 recuser = dbsession.query(RegRecord).filter_by(record_id = \
81 one_slice['record_id_user']).first()
83 #Make a list of all the nodes hostnames in use for this slice
86 for node in sl['node_ids']:
87 slice_nodes_list.append(node['hostname'])
89 #Get all the corresponding nodes details
90 nodes_all = self.GetNodes({'hostname':slice_nodes_list},
91 ['node_id', 'hostname','site','boot_state'])
92 nodeall_byhostname = dict([(n['hostname'], n) for n in nodes_all])
99 top_level_status = 'empty'
101 result.fromkeys(['geni_urn','pl_login','geni_status','geni_resources'],None)
102 result['pl_login'] = recuser.hrn
103 logger.debug("Slabdriver - sliver_status Sliver status urn %s hrn %s sl\
104 %s \r\n " %(slice_urn, slice_hrn, sl))
106 nodes_in_slice = sl['node_ids']
109 result['geni_status'] = top_level_status
110 result['geni_resources'] = []
113 top_level_status = 'ready'
115 #A job is running on Senslab for this slice
116 # report about the local nodes that are in the slice only
118 result['geni_urn'] = slice_urn
122 #timestamp = float(sl['startTime']) + float(sl['walltime'])
123 #result['pl_expires'] = strftime(self.time_format, \
124 #gmtime(float(timestamp)))
125 #result['slab_expires'] = strftime(self.time_format,\
126 #gmtime(float(timestamp)))
129 for node in sl['node_ids']:
131 #res['slab_hostname'] = node['hostname']
132 #res['slab_boot_state'] = node['boot_state']
134 res['pl_hostname'] = node['hostname']
135 res['pl_boot_state'] = nodeall_byhostname[node['hostname']]['boot_state']
136 #res['pl_last_contact'] = strftime(self.time_format, \
137 #gmtime(float(timestamp)))
138 sliver_id = Xrn(slice_urn, type='slice', \
139 id=nodeall_byhostname[node['hostname']]['node_id'], \
140 authority=self.hrn).urn
142 res['geni_urn'] = sliver_id
143 if nodeall_byhostname[node['hostname']]['boot_state'] == 'Alive':
145 res['geni_status'] = 'ready'
147 res['geni_status'] = 'failed'
148 top_level_status = 'failed'
150 res['geni_error'] = ''
152 resources.append(res)
154 result['geni_status'] = top_level_status
155 result['geni_resources'] = resources
156 logger.debug("SLABDRIVER \tsliver_statusresources %s res %s "\
161 def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, \
163 aggregate = SlabAggregate(self)
165 slices = SlabSlices(self)
166 peer = slices.get_peer(slice_hrn)
167 sfa_peer = slices.get_sfa_peer(slice_hrn)
170 if not isinstance(creds, list):
174 slice_record = users[0].get('slice_record', {})
177 rspec = RSpec(rspec_string)
178 logger.debug("SLABDRIVER.PY \t create_sliver \tr spec.version %s slice_record %s " \
179 %(rspec.version,slice_record))
181 # ensure site record exists?
182 # ensure slice record exists
183 #Removed options to verify_slice SA 14/08/12
184 sfa_slice = slices.verify_slice(slice_hrn, slice_record, peer, \
187 #requested_attributes returned by rspec.version.get_slice_attributes()
188 #unused, removed SA 13/08/12
189 rspec.version.get_slice_attributes()
191 logger.debug("SLABDRIVER.PY create_sliver slice %s " %(sfa_slice))
193 # ensure person records exists
194 #verify_persons returns added persons but since the return value
196 slices.verify_persons(slice_hrn, sfa_slice, users, peer, \
197 sfa_peer, options=options)
201 # add/remove slice from nodes
203 requested_slivers = [node.get('component_name') \
204 for node in rspec.version.get_nodes_with_slivers()]
205 l = [ node for node in rspec.version.get_nodes_with_slivers() ]
206 logger.debug("SLADRIVER \tcreate_sliver requested_slivers \
207 requested_slivers %s listnodes %s" \
208 %(requested_slivers,l))
209 #verify_slice_nodes returns nodes, but unused here. Removed SA 13/08/12.
210 slices.verify_slice_nodes(sfa_slice, requested_slivers, peer)
213 requested_lease_list = []
215 logger.debug("SLABDRIVER.PY \tcreate_sliver AVANTLEASE " )
216 for lease in rspec.version.get_leases():
217 single_requested_lease = {}
218 logger.debug("SLABDRIVER.PY \tcreate_sliver lease %s " %(lease))
219 if not lease.get('lease_id'):
220 single_requested_lease['hostname'] = \
221 slab_xrn_to_hostname(lease.get('component_id').strip())
222 single_requested_lease['start_time'] = lease.get('start_time')
223 single_requested_lease['duration'] = lease.get('duration')
225 kept_leases.append(int(lease['lease_id']))
226 if single_requested_lease.get('hostname'):
227 requested_lease_list.append(single_requested_lease)
229 logger.debug("SLABDRIVER.PY \tcreate_sliver APRESLEASE" )
230 #dCreate dict of leases by start_time, regrouping nodes reserved
232 #time, for the same amount of time = one job on OAR
233 requested_job_dict = {}
234 for lease in requested_lease_list:
236 #In case it is an asap experiment start_time is empty
237 if lease['start_time'] == '':
238 lease['start_time'] = '0'
240 if lease['start_time'] not in requested_job_dict:
241 if isinstance(lease['hostname'], str):
242 lease['hostname'] = [lease['hostname']]
244 requested_job_dict[lease['start_time']] = lease
247 job_lease = requested_job_dict[lease['start_time']]
248 if lease['duration'] == job_lease['duration'] :
249 job_lease['hostname'].append(lease['hostname'])
254 logger.debug("SLABDRIVER.PY \tcreate_sliver requested_job_dict %s " %(requested_job_dict))
255 #verify_slice_leases returns the leases , but the return value is unused
256 #here. Removed SA 13/08/12
257 slices.verify_slice_leases(sfa_slice, \
258 requested_job_dict, kept_leases, peer)
260 return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
263 def delete_sliver (self, slice_urn, slice_hrn, creds, options):
265 sfa_slice_list = self.GetSlices(slice_filter = slice_hrn, \
266 slice_filter_type = 'slice_hrn')
268 if not sfa_slice_list:
271 #Delete all in the slice
272 for sfa_slice in sfa_slice_list:
275 logger.debug("SLABDRIVER.PY delete_sliver slice %s" %(sfa_slice))
276 slices = SlabSlices(self)
277 # determine if this is a peer slice
279 peer = slices.get_peer(slice_hrn)
280 #TODO delete_sliver SA : UnBindObjectFromPeer should be
281 #used when there is another
282 #senslab testbed, which is not the case 14/08/12 .
284 logger.debug("SLABDRIVER.PY delete_sliver peer %s" %(peer))
287 self.UnBindObjectFromPeer('slice', \
288 sfa_slice['record_id_slice'], peer,None)
289 self.DeleteSliceFromNodes(sfa_slice)
292 self.BindObjectToPeer('slice', sfa_slice['record_id_slice'], \
293 peer, sfa_slice['peer_slice_id'])
297 def AddSlice(self, slice_record):
298 slab_slice = SliceSenslab( slice_hrn = slice_record['slice_hrn'], \
299 record_id_slice= slice_record['record_id_slice'] , \
300 record_id_user= slice_record['record_id_user'], \
301 peer_authority = slice_record['peer_authority'])
302 logger.debug("SLABDRIVER.PY \tAddSlice slice_record %s slab_slice %s" \
303 %(slice_record,slab_slice))
304 slab_dbsession.add(slab_slice)
305 slab_dbsession.commit()
308 # first 2 args are None in case of resource discovery
309 def list_resources (self, slice_urn, slice_hrn, creds, options):
310 #cached_requested = options.get('cached', True)
312 version_manager = VersionManager()
313 # get the rspec's return format from options
315 version_manager.get_version(options.get('geni_rspec_version'))
316 version_string = "rspec_%s" % (rspec_version)
318 #panos adding the info option to the caching key (can be improved)
319 if options.get('info'):
320 version_string = version_string + "_" + \
321 options.get('info', 'default')
323 # look in cache first
324 #if cached_requested and self.cache and not slice_hrn:
325 #rspec = self.cache.get(version_string)
327 #logger.debug("SlabDriver.ListResources: \
328 #returning cached advertisement")
331 #panos: passing user-defined options
332 aggregate = SlabAggregate(self)
333 origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
334 options.update({'origin_hrn':origin_hrn})
335 rspec = aggregate.get_rspec(slice_xrn=slice_urn, \
336 version=rspec_version, options=options)
339 #if self.cache and not slice_hrn:
340 #logger.debug("Slab.ListResources: stores advertisement in cache")
341 #self.cache.add(version_string, rspec)
346 def list_slices (self, creds, options):
347 # look in cache first
349 #slices = self.cache.get('slices')
351 #logger.debug("PlDriver.list_slices returns from cache")
356 slices = self.GetSlices()
357 logger.debug("SLABDRIVER.PY \tlist_slices hrn %s \r\n \r\n" %(slices))
358 slice_hrns = [slab_slice['slice_hrn'] for slab_slice in slices]
359 #slice_hrns = [slicename_to_hrn(self.hrn, slab_slice['slice_hrn']) \
360 #for slab_slice in slices]
361 slice_urns = [hrn_to_urn(slice_hrn, 'slice') \
362 for slice_hrn in slice_hrns]
366 #logger.debug ("SlabDriver.list_slices stores value in cache")
367 #self.cache.add('slices', slice_urns)
372 def register (self, sfa_record, hrn, pub_key):
374 Adding new user, slice, node or site should not be handled
378 Adding users = LDAP Senslab
379 Adding slice = Import from LDAP users
384 #No site or node record update allowed
385 def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
386 pointer = old_sfa_record['pointer']
387 old_sfa_record_type = old_sfa_record['type']
389 # new_key implemented for users only
390 if new_key and old_sfa_record_type not in [ 'user' ]:
391 raise UnknownSfaType(old_sfa_record_type)
393 #if (type == "authority"):
394 #self.shell.UpdateSite(pointer, new_sfa_record)
396 if old_sfa_record_type == "slice":
397 slab_record = self.sfa_fields_to_slab_fields(old_sfa_record_type, \
399 if 'name' in slab_record:
400 slab_record.pop('name')
401 #Prototype should be UpdateSlice(self,
402 #auth, slice_id_or_name, slice_fields)
403 #Senslab cannot update slice since slice = job
404 #so we must delete and create another job
405 self.UpdateSlice(pointer, slab_record)
407 elif old_sfa_record_type == "user":
409 all_fields = new_sfa_record
410 for key in all_fields.keys():
411 if key in ['first_name', 'last_name', 'title', 'email',
412 'password', 'phone', 'url', 'bio', 'accepted_aup',
414 update_fields[key] = all_fields[key]
415 self.UpdatePerson(pointer, update_fields)
418 # must check this key against the previous one if it exists
419 persons = self.GetPersons([pointer], ['key_ids'])
421 keys = person['key_ids']
422 keys = self.GetKeys(person['key_ids'])
424 # Delete all stale keys
427 if new_key != key['key']:
428 self.DeleteKey(key['key_id'])
432 self.AddPersonKey(pointer, {'key_type': 'ssh', \
439 def remove (self, sfa_record):
440 sfa_record_type = sfa_record['type']
441 hrn = sfa_record['hrn']
442 if sfa_record_type == 'user':
444 #get user from senslab ldap
445 person = self.GetPersons(sfa_record)
446 #No registering at a given site in Senslab.
447 #Once registered to the LDAP, all senslab sites are
450 #Mark account as disabled in ldap
451 self.DeletePerson(sfa_record)
452 elif sfa_record_type == 'slice':
453 if self.GetSlices(slice_filter = hrn, \
454 slice_filter_type = 'slice_hrn'):
455 self.DeleteSlice(sfa_record)
457 #elif type == 'authority':
458 #if self.GetSites(pointer):
459 #self.DeleteSite(pointer)
465 #TODO clean GetPeers. 05/07/12SA
466 def GetPeers (self, auth = None, peer_filter=None, return_fields_list=None):
468 existing_records = {}
469 existing_hrns_by_types = {}
470 logger.debug("SLABDRIVER \tGetPeers auth = %s, peer_filter %s, \
471 return_field %s " %(auth , peer_filter, return_fields_list))
472 all_records = dbsession.query(RegRecord).filter(RegRecord.type.like('%authority%')).all()
473 for record in all_records:
474 existing_records[(record.hrn, record.type)] = record
475 if record.type not in existing_hrns_by_types:
476 existing_hrns_by_types[record.type] = [record.hrn]
477 logger.debug("SLABDRIVER \tGetPeer\t NOT IN \
478 existing_hrns_by_types %s " %( existing_hrns_by_types))
481 logger.debug("SLABDRIVER \tGetPeer\t \INNN type %s hrn %s " \
482 %(record.type,record.hrn))
483 existing_hrns_by_types[record.type].append(record.hrn)
486 logger.debug("SLABDRIVER \tGetPeer\texisting_hrns_by_types %s "\
487 %( existing_hrns_by_types))
492 records_list.append(existing_records[(peer_filter,'authority')])
494 for hrn in existing_hrns_by_types['authority']:
495 records_list.append(existing_records[(hrn,'authority')])
497 logger.debug("SLABDRIVER \tGetPeer \trecords_list %s " \
503 return_records = records_list
504 if not peer_filter and not return_fields_list:
508 logger.debug("SLABDRIVER \tGetPeer return_records %s " \
510 return return_records
513 #TODO : Handling OR request in make_ldap_filters_from_records
514 #instead of the for loop
515 #over the records' list
516 def GetPersons(self, person_filter=None):
518 person_filter should be a list of dictionnaries when not set to None.
519 Returns a list of users whose accounts are enabled found in ldap.
522 logger.debug("SLABDRIVER \tGetPersons person_filter %s" \
525 if person_filter and isinstance(person_filter, list):
526 #If we are looking for a list of users (list of dict records)
527 #Usually the list contains only one user record
528 for searched_attributes in person_filter:
530 #Get only enabled user accounts in senslab LDAP :
531 #add a filter for make_ldap_filters_from_record
532 person = self.ldap.LdapFindUser(searched_attributes, \
533 is_user_enabled=True)
534 person_list.append(person)
537 #Get only enabled user accounts in senslab LDAP :
538 #add a filter for make_ldap_filters_from_record
539 person_list = self.ldap.LdapFindUser(is_user_enabled=True)
543 def GetTimezone(self):
544 server_timestamp, server_tz = self.oar.parser.\
545 SendRequest("GET_timezone")
546 return server_timestamp, server_tz
549 def DeleteJobs(self, job_id, slice_hrn):
550 if not job_id or job_id is -1:
552 username = slice_hrn.split(".")[-1].rstrip("_slice")
554 reqdict['method'] = "delete"
555 reqdict['strval'] = str(job_id)
558 answer = self.oar.POSTRequestToOARRestAPI('DELETE_jobs_id', \
560 logger.debug("SLABDRIVER \tDeleteJobs jobid %s \r\n answer %s \
561 username %s" %(job_id,answer, username))
566 ##TODO : Unused GetJobsId ? SA 05/07/12
567 #def GetJobsId(self, job_id, username = None ):
569 #Details about a specific job.
570 #Includes details about submission time, jot type, state, events,
571 #owner, assigned ressources, walltime etc...
575 #node_list_k = 'assigned_network_address'
576 ##Get job info from OAR
577 #job_info = self.oar.parser.SendRequest(req, job_id, username)
579 #logger.debug("SLABDRIVER \t GetJobsId %s " %(job_info))
581 #if job_info['state'] == 'Terminated':
582 #logger.debug("SLABDRIVER \t GetJobsId job %s TERMINATED"\
585 #if job_info['state'] == 'Error':
586 #logger.debug("SLABDRIVER \t GetJobsId ERROR message %s "\
591 #logger.error("SLABDRIVER \tGetJobsId KeyError")
594 #parsed_job_info = self.get_info_on_reserved_nodes(job_info, \
596 ##Replaces the previous entry
597 ##"assigned_network_address" / "reserved_resources"
599 #job_info.update({'node_ids':parsed_job_info[node_list_k]})
600 #del job_info[node_list_k]
601 #logger.debug(" \r\nSLABDRIVER \t GetJobsId job_info %s " %(job_info))
605 def GetJobsResources(self, job_id, username = None):
606 #job_resources=['reserved_resources', 'assigned_resources',\
607 #'job_id', 'job_uri', 'assigned_nodes',\
609 #assigned_res = ['resource_id', 'resource_uri']
610 #assigned_n = ['node', 'node_uri']
612 req = "GET_jobs_id_resources"
615 #Get job resources list from OAR
616 node_id_list = self.oar.parser.SendRequest(req, job_id, username)
617 logger.debug("SLABDRIVER \t GetJobsResources %s " %(node_id_list))
620 self.__get_hostnames_from_oar_node_ids(node_id_list)
623 #Replaces the previous entry "assigned_network_address" /
624 #"reserved_resources"
626 job_info = {'node_ids': hostname_list}
631 def get_info_on_reserved_nodes(self, job_info, node_list_name):
632 #Get the list of the testbed nodes records and make a
633 #dictionnary keyed on the hostname out of it
634 node_list_dict = self.GetNodes()
635 #node_hostname_list = []
636 node_hostname_list = [node['hostname'] for node in node_list_dict]
637 #for node in node_list_dict:
638 #node_hostname_list.append(node['hostname'])
639 node_dict = dict(zip(node_hostname_list, node_list_dict))
641 reserved_node_hostname_list = []
642 for index in range(len(job_info[node_list_name])):
643 #job_info[node_list_name][k] =
644 reserved_node_hostname_list[index] = \
645 node_dict[job_info[node_list_name][index]]['hostname']
647 logger.debug("SLABDRIVER \t get_info_on_reserved_nodes \
648 reserved_node_hostname_list %s" \
649 %(reserved_node_hostname_list))
651 logger.error("SLABDRIVER \t get_info_on_reserved_nodes KEYERROR " )
653 return reserved_node_hostname_list
655 def GetNodesCurrentlyInUse(self):
656 """Returns a list of all the nodes already involved in an oar job"""
657 return self.oar.parser.SendRequest("GET_running_jobs")
659 def __get_hostnames_from_oar_node_ids(self, resource_id_list ):
660 full_nodes_dict_list = self.GetNodes()
661 #Put the full node list into a dictionary keyed by oar node id
662 oar_id_node_dict = {}
663 for node in full_nodes_dict_list:
664 oar_id_node_dict[node['oar_id']] = node
666 #logger.debug("SLABDRIVER \t __get_hostnames_from_oar_node_ids\
667 #oar_id_node_dict %s" %(oar_id_node_dict))
669 hostname_dict_list = []
670 for resource_id in resource_id_list:
671 #Because jobs requested "asap" do not have defined resources
672 if resource_id is not "Undefined":
673 hostname_dict_list.append(\
674 oar_id_node_dict[resource_id]['hostname'])
676 #hostname_list.append(oar_id_node_dict[resource_id]['hostname'])
677 return hostname_dict_list
679 def GetReservedNodes(self,username = None):
680 #Get the nodes in use and the reserved nodes
681 reservation_dict_list = \
682 self.oar.parser.SendRequest("GET_reserved_nodes", username = username)
685 for resa in reservation_dict_list:
686 logger.debug ("GetReservedNodes resa %s"%(resa))
687 #dict list of hostnames and their site
688 resa['reserved_nodes'] = \
689 self.__get_hostnames_from_oar_node_ids(resa['resource_ids'])
691 #del resa['resource_ids']
692 return reservation_dict_list
694 def GetNodes(self, node_filter_dict = None, return_fields_list = None):
696 node_filter_dict : dictionnary of lists
699 node_dict_by_id = self.oar.parser.SendRequest("GET_resources_full")
700 node_dict_list = node_dict_by_id.values()
701 logger.debug (" SLABDRIVER GetNodes node_filter_dict %s return_fields_list %s "%(node_filter_dict,return_fields_list))
702 #No filtering needed return the list directly
703 if not (node_filter_dict or return_fields_list):
704 return node_dict_list
706 return_node_list = []
708 for filter_key in node_filter_dict:
710 #Filter the node_dict_list by each value contained in the
711 #list node_filter_dict[filter_key]
712 for value in node_filter_dict[filter_key]:
713 for node in node_dict_list:
714 if node[filter_key] == value:
715 if return_fields_list :
717 for k in return_fields_list:
719 return_node_list.append(tmp)
721 return_node_list.append(node)
723 logger.log_exc("GetNodes KeyError")
727 return return_node_list
730 def GetSites(self, site_filter_name_list = None, return_fields_list = None):
731 site_dict = self.oar.parser.SendRequest("GET_sites")
732 #site_dict : dict where the key is the sit ename
733 return_site_list = []
734 if not ( site_filter_name_list or return_fields_list):
735 return_site_list = site_dict.values()
736 return return_site_list
738 for site_filter_name in site_filter_name_list:
739 if site_filter_name in site_dict:
740 if return_fields_list:
741 for field in return_fields_list:
744 tmp[field] = site_dict[site_filter_name][field]
746 logger.error("GetSites KeyError %s "%(field))
748 return_site_list.append(tmp)
750 return_site_list.append( site_dict[site_filter_name])
753 return return_site_list
757 def GetSlices(self, slice_filter = None, slice_filter_type = None):
758 #def GetSlices(self, slice_filter = None, slice_filter_type = None, \
759 #return_fields_list = None):
760 """ Get the slice records from the slab db.
761 Returns a slice ditc if slice_filter and slice_filter_type
763 Returns a list of slice dictionnaries if there are no filters
768 return_slice_list = []
771 authorized_filter_types_list = ['slice_hrn', 'record_id_user']
772 slicerec_dictlist = []
775 if slice_filter_type in authorized_filter_types_list:
778 def __get_slice_records(slice_filter = None, slice_filter_type = None):
781 #Get list of slices based on the slice hrn
782 if slice_filter_type == 'slice_hrn':
784 login = slice_filter.split(".")[1].split("_")[0]
786 #DO NOT USE RegSlice - reg_researchers to get the hrn of the user
787 #otherwise will mess up the RegRecord in Resolve, don't know
790 #Only one entry for one user = one slice in slice_senslab table
791 slicerec = slab_dbsession.query(SliceSenslab).filter_by(slice_hrn = slice_filter).first()
793 #Get slice based on user id
794 if slice_filter_type == 'record_id_user':
795 slicerec = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = slice_filter).first()
800 fixed_slicerec_dict = slicerec.dump_sqlalchemyobj_to_dict()
803 login = fixed_slicerec_dict['slice_hrn'].split(".")[1].split("_")[0]
804 return login, fixed_slicerec_dict
809 login, fixed_slicerec_dict = __get_slice_records(slice_filter, slice_filter_type)
810 logger.debug(" SLABDRIVER \tGetSlices login %s \
812 %(login, fixed_slicerec_dict))
816 #One slice can have multiple jobs
818 leases_list = self.GetReservedNodes(username = login)
819 #If no job is running or no job scheduled
820 if leases_list == [] :
821 return [fixed_slicerec_dict]
823 #Several jobs for one slice
824 for lease in leases_list :
828 #Check with OAR the status of the job if a job id is in
833 slicerec_dict['oar_job_id'] = lease['lease_id']
835 #for reserved_node in lease['reserved_nodes']:
836 #reserved_list.append(reserved_node['hostname'])
837 reserved_list = lease['reserved_nodes']
838 #slicerec_dict.update({'node_ids':[lease['reserved_nodes'][n]['hostname'] for n in lease['reserved_nodes']]})
839 slicerec_dict.update({'list_node_ids':{'hostname':reserved_list}})
840 slicerec_dict.update({'node_ids':lease['reserved_nodes']})
841 slicerec_dict.update(fixed_slicerec_dict)
842 slicerec_dict.update({'hrn':\
843 str(fixed_slicerec_dict['slice_hrn'])})
846 slicerec_dictlist.append(slicerec_dict)
847 logger.debug("SLABDRIVER.PY \tGetSlices slicerec_dict %s slicerec_dictlist %s lease['reserved_nodes'] %s" %(slicerec_dict, slicerec_dictlist,lease['reserved_nodes'] ))
849 logger.debug("SLABDRIVER.PY \tGetSlices RETURN slicerec_dictlist %s"\
850 %(slicerec_dictlist))
852 return slicerec_dictlist
857 slice_list = slab_dbsession.query(SliceSenslab).all()
858 leases_list = self.GetReservedNodes()
861 slicerec_dictlist = []
862 return_slice_list = []
863 for record in slice_list:
864 return_slice_list.append(record.dump_sqlalchemyobj_to_dict())
866 for fixed_slicerec_dict in return_slice_list:
868 owner = fixed_slicerec_dict['slice_hrn'].split(".")[1].split("_")[0]
869 for lease in leases_list:
870 if owner == lease['user']:
871 slicerec_dict['oar_job_id'] = lease['lease_id']
873 for reserved_node in lease['reserved_nodes']:
874 reserved_list.append(reserved_node['hostname'])
875 #slicerec_dict.update({'node_ids':{'hostname':reserved_list}})
876 #slicerec_dict.update({'node_ids':[lease['reserved_nodes'][n]['hostname'] for n in lease['reserved_nodes']]})
877 slicerec_dict.update({'node_ids':lease['reserved_nodes']})
878 slicerec_dict.update({'list_node_ids':{'hostname':reserved_list}})
879 slicerec_dict.update(fixed_slicerec_dict)
880 slicerec_dict.update({'hrn':\
881 str(fixed_slicerec_dict['slice_hrn'])})
882 slicerec_dictlist.append(slicerec_dict)
884 logger.debug("SLABDRIVER.PY \tGetSlices RETURN slices %s \
885 slice_filter %s " %(return_slice_list, slice_filter))
887 #if return_fields_list:
888 #return_slice_list = parse_filter(sliceslist, \
889 #slice_filter,'slice', return_fields_list)
891 return slicerec_dictlist
894 def testbed_name (self): return self.hrn
896 # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
897 def aggregate_version (self):
898 version_manager = VersionManager()
899 ad_rspec_versions = []
900 request_rspec_versions = []
901 for rspec_version in version_manager.versions:
902 if rspec_version.content_type in ['*', 'ad']:
903 ad_rspec_versions.append(rspec_version.to_dict())
904 if rspec_version.content_type in ['*', 'request']:
905 request_rspec_versions.append(rspec_version.to_dict())
907 'testbed':self.testbed_name(),
908 'geni_request_rspec_versions': request_rspec_versions,
909 'geni_ad_rspec_versions': ad_rspec_versions,
918 # Convert SFA fields to PLC fields for use when registering up updating
919 # registry record in the PLC database
921 # @param type type of record (user, slice, ...)
922 # @param hrn human readable name
923 # @param sfa_fields dictionary of SFA fields
924 # @param slab_fields dictionary of PLC fields (output)
926 def sfa_fields_to_slab_fields(self, sfa_type, hrn, record):
930 #for field in record:
931 # slab_record[field] = record[field]
933 if sfa_type == "slice":
934 #instantion used in get_slivers ?
935 if not "instantiation" in slab_record:
936 slab_record["instantiation"] = "senslab-instantiated"
937 #slab_record["hrn"] = hrn_to_pl_slicename(hrn)
938 #Unused hrn_to_pl_slicename because Slab's hrn already in the appropriate form SA 23/07/12
939 slab_record["hrn"] = hrn
940 logger.debug("SLABDRIVER.PY sfa_fields_to_slab_fields \
941 slab_record %s " %(slab_record['hrn']))
943 slab_record["url"] = record["url"]
944 if "description" in record:
945 slab_record["description"] = record["description"]
946 if "expires" in record:
947 slab_record["expires"] = int(record["expires"])
949 #nodes added by OAR only and then imported to SFA
950 #elif type == "node":
951 #if not "hostname" in slab_record:
952 #if not "hostname" in record:
953 #raise MissingSfaInfo("hostname")
954 #slab_record["hostname"] = record["hostname"]
955 #if not "model" in slab_record:
956 #slab_record["model"] = "geni"
959 #elif type == "authority":
960 #slab_record["login_base"] = hrn_to_slab_login_base(hrn)
962 #if not "name" in slab_record:
963 #slab_record["name"] = hrn
965 #if not "abbreviated_name" in slab_record:
966 #slab_record["abbreviated_name"] = hrn
968 #if not "enabled" in slab_record:
969 #slab_record["enabled"] = True
971 #if not "is_public" in slab_record:
972 #slab_record["is_public"] = True
979 def __transforms_timestamp_into_date(self, xp_utc_timestamp = None):
980 """ Transforms unix timestamp into valid OAR date format """
982 #Used in case of a scheduled experiment (not immediate)
983 #To run an XP immediately, don't specify date and time in RSpec
984 #They will be set to None.
986 #transform the xp_utc_timestamp into server readable time
987 xp_server_readable_date = datetime.fromtimestamp(int(\
988 xp_utc_timestamp)).strftime(self.time_format)
990 return xp_server_readable_date
998 def LaunchExperimentOnOAR(self, added_nodes, slice_name, \
999 lease_start_time, lease_duration, slice_user=None):
1001 lease_dict['lease_start_time'] = lease_start_time
1002 lease_dict['lease_duration'] = lease_duration
1003 lease_dict['added_nodes'] = added_nodes
1004 lease_dict['slice_name'] = slice_name
1005 lease_dict['slice_user'] = slice_user
1006 lease_dict['grain'] = self.GetLeaseGranularity()
1007 lease_dict['time_format'] = self.time_format
1009 def __create_job_structure_request_for_OAR(lease_dict):
1010 """ Creates the structure needed for a correct POST on OAR.
1011 Makes the timestamp transformation into the appropriate format.
1012 Sends the POST request to create the job with the resources in
1021 reqdict['workdir'] = '/tmp'
1022 reqdict['resource'] = "{network_address in ("
1024 for node in lease_dict['added_nodes']:
1025 logger.debug("\r\n \r\n OARrestapi \t __create_job_structure_request_for_OAR \
1028 # Get the ID of the node
1030 reqdict['resource'] += "'" + nodeid + "', "
1031 nodeid_list.append(nodeid)
1033 custom_length = len(reqdict['resource'])- 2
1034 reqdict['resource'] = reqdict['resource'][0:custom_length] + \
1035 ")}/nodes=" + str(len(nodeid_list))
1037 def __process_walltime(duration):
1038 """ Calculates the walltime in seconds from the duration in H:M:S
1039 specified in the RSpec.
1043 # Fixing the walltime by adding a few delays.
1044 # First put the walltime in seconds oarAdditionalDelay = 20;
1045 # additional delay for /bin/sleep command to
1046 # take in account prologue and epilogue scripts execution
1047 # int walltimeAdditionalDelay = 120; additional delay
1048 desired_walltime = duration
1049 total_walltime = desired_walltime + 140#+2 min 20
1050 sleep_walltime = desired_walltime + 20 #+20 sec
1052 #Put the walltime back in str form
1053 #First get the hours
1054 walltime.append(str(total_walltime / 3600))
1055 total_walltime = total_walltime - 3600 * int(walltime[0])
1056 #Get the remaining minutes
1057 walltime.append(str(total_walltime / 60))
1058 total_walltime = total_walltime - 60 * int(walltime[1])
1060 walltime.append(str(total_walltime))
1063 logger.log_exc(" __process_walltime duration null")
1065 return walltime, sleep_walltime
1068 walltime, sleep_walltime = \
1069 __process_walltime(int(lease_dict['lease_duration'])*lease_dict['grain'])
1072 reqdict['resource'] += ",walltime=" + str(walltime[0]) + \
1073 ":" + str(walltime[1]) + ":" + str(walltime[2])
1074 reqdict['script_path'] = "/bin/sleep " + str(sleep_walltime)
1076 #In case of a scheduled experiment (not immediate)
1077 #To run an XP immediately, don't specify date and time in RSpec
1078 #They will be set to None.
1079 if lease_dict['lease_start_time'] is not '0':
1080 #Readable time accepted by OAR
1081 start_time = datetime.fromtimestamp(int(lease_dict['lease_start_time'])).\
1082 strftime(lease_dict['time_format'])
1083 reqdict['reservation'] = start_time
1084 #If there is not start time, Immediate XP. No need to add special
1088 reqdict['type'] = "deploy"
1089 reqdict['directory'] = ""
1090 reqdict['name'] = "SFA_" + lease_dict['slice_user']
1095 #Create the request for OAR
1096 reqdict = __create_job_structure_request_for_OAR(lease_dict)
1097 # first step : start the OAR job and update the job
1098 logger.debug("SLABDRIVER.PY \tLaunchExperimentOnOAR reqdict %s\
1101 answer = self.oar.POSTRequestToOARRestAPI('POST_job', \
1102 reqdict, slice_user)
1103 logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid %s " %(answer))
1105 jobid = answer['id']
1107 logger.log_exc("SLABDRIVER \tLaunchExperimentOnOAR \
1108 Impossible to create job %s " %(answer))
1112 def __configure_experiment(jobid, added_nodes):
1113 # second step : configure the experiment
1114 # we need to store the nodes in a yaml (well...) file like this :
1115 # [1,56,23,14,45,75] with name /tmp/sfa<jobid>.json
1116 job_file = open('/tmp/sfa/'+ str(jobid) + '.json', 'w')
1118 job_file.write(str(added_nodes[0].strip('node')))
1119 for node in added_nodes[1:len(added_nodes)] :
1120 job_file.write(', '+ node.strip('node'))
1125 def __launch_senslab_experiment(jobid):
1126 # third step : call the senslab-experiment wrapper
1127 #command= "java -jar target/sfa-1.0-jar-with-dependencies.jar
1128 # "+str(jobid)+" "+slice_user
1129 javacmdline = "/usr/bin/java"
1131 "/opt/senslabexperimentwrapper/sfa-1.0-jar-with-dependencies.jar"
1132 #ret=subprocess.check_output(["/usr/bin/java", "-jar", ", \
1133 #str(jobid), slice_user])
1134 output = subprocess.Popen([javacmdline, "-jar", jarname, str(jobid), \
1135 slice_user],stdout=subprocess.PIPE).communicate()[0]
1137 logger.debug("SLABDRIVER \t __configure_experiment wrapper returns%s " \
1144 logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid %s \
1145 added_nodes %s slice_user %s" %(jobid, added_nodes, slice_user))
1148 __configure_experiment(jobid, added_nodes)
1149 __launch_senslab_experiment(jobid)
1153 def AddLeases(self, hostname_list, slice_record, lease_start_time, lease_duration):
1154 logger.debug("SLABDRIVER \r\n \r\n \t AddLeases hostname_list %s \
1155 slice_record %s lease_start_time %s lease_duration %s "\
1156 %( hostname_list, slice_record , lease_start_time, \
1159 tmp = slice_record['reg-researchers'][0].split(".")
1160 username = tmp[(len(tmp)-1)]
1161 self.LaunchExperimentOnOAR(hostname_list, slice_record['slice_hrn'], lease_start_time, lease_duration, username)
1162 start_time = datetime.fromtimestamp(int(lease_start_time)).strftime(self.time_format)
1163 logger.debug("SLABDRIVER \t AddLeases hostname_list start_time %s " %(start_time))
1168 #Delete the jobs from job_senslab table
1169 def DeleteSliceFromNodes(self, slice_record):
1171 self.DeleteJobs(slice_record['oar_job_id'], slice_record['hrn'])
1175 def GetLeaseGranularity(self):
1176 """ Returns the granularity of Senslab testbed.
1177 Defined in seconds. """
1182 def GetLeases(self, lease_filter_dict=None):
1183 unfiltered_reservation_list = self.GetReservedNodes()
1185 ##Synchronize slice_table of sfa senslab db
1186 #self.synchronize_oar_and_slice_table(unfiltered_reservation_list)
1188 reservation_list = []
1189 #Find the slice associated with this user senslab ldap uid
1190 logger.debug(" SLABDRIVER.PY \tGetLeases ")
1191 #Create user dict first to avoir looking several times for
1192 #the same user in LDAP SA 27/07/12
1194 for resa in unfiltered_reservation_list:
1195 logger.debug("SLABDRIVER \tGetLeases USER %s"\
1197 if resa['user'] not in resa_user_dict:
1198 logger.debug("SLABDRIVER \tGetLeases userNOTIN ")
1199 ldap_info = self.ldap.LdapSearch('(uid='+resa['user']+')')
1200 ldap_info = ldap_info[0][1]
1201 user = dbsession.query(RegUser).filter_by(email = \
1202 ldap_info['mail'][0]).first()
1203 #Separated in case user not in database : record_id not defined SA 17/07//12
1204 query_slice_info = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = user.record_id)
1205 if query_slice_info:
1206 slice_info = query_slice_info.first()
1210 resa_user_dict[resa['user']] = {}
1211 resa_user_dict[resa['user']]['ldap_info'] = user
1212 resa_user_dict[resa['user']]['slice_info'] = slice_info
1214 logger.debug("SLABDRIVER \tGetLeases resa_user_dict %s"\
1216 for resa in unfiltered_reservation_list:
1220 resa['slice_hrn'] = resa_user_dict[resa['user']]['slice_info'].slice_hrn
1221 resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
1223 #resa['slice_id'] = hrn_to_urn(slice_info.slice_hrn, 'slice')
1224 resa['component_id_list'] = []
1225 #Transform the hostnames into urns (component ids)
1226 for node in resa['reserved_nodes']:
1227 #resa['component_id_list'].append(hostname_to_urn(self.hrn, \
1228 #self.root_auth, node['hostname']))
1229 slab_xrn = slab_xrn_object(self.root_auth, node)
1230 resa['component_id_list'].append(slab_xrn.urn)
1232 #Filter the reservation list if necessary
1233 #Returns all the leases associated with a given slice
1234 if lease_filter_dict:
1235 logger.debug("SLABDRIVER \tGetLeases lease_filter_dict %s"\
1236 %(lease_filter_dict))
1237 for resa in unfiltered_reservation_list:
1238 if lease_filter_dict['name'] == resa['slice_hrn']:
1239 reservation_list.append(resa)
1241 reservation_list = unfiltered_reservation_list
1243 logger.debug(" SLABDRIVER.PY \tGetLeases reservation_list %s"\
1244 %(reservation_list))
1245 return reservation_list
1247 def augment_records_with_testbed_info (self, sfa_records):
1248 return self.fill_record_info (sfa_records)
1250 def fill_record_info(self, record_list):
1252 Given a SFA record, fill in the senslab specific and SFA specific
1253 fields in the record.
1256 logger.debug("SLABDRIVER \tfill_record_info records %s " %(record_list))
1257 if not isinstance(record_list, list):
1258 record_list = [record_list]
1261 for record in record_list:
1262 #If the record is a SFA slice record, then add information
1263 #about the user of this slice. This kind of
1264 #information is in the Senslab's DB.
1265 if str(record['type']) == 'slice':
1266 #Get slab slice record.
1267 recslice_list = self.GetSlices(slice_filter = \
1268 str(record['hrn']),\
1269 slice_filter_type = 'slice_hrn')
1271 recuser = dbsession.query(RegRecord).filter_by(record_id = \
1272 recslice_list[0]['record_id_user']).first()
1273 logger.debug("SLABDRIVER \tfill_record_info TYPE SLICE RECUSER %s " %(recuser))
1274 record.update({'PI':[recuser.hrn],
1275 'researcher': [recuser.hrn],
1276 'name':record['hrn'],
1279 'person_ids':[recslice_list[0]['record_id_user']],
1280 'geni_urn':'', #For client_helper.py compatibility
1281 'keys':'', #For client_helper.py compatibility
1282 'key_ids':''}) #For client_helper.py compatibility
1285 for rec in recslice_list:
1286 record['oar_job_id'].append(rec['oar_job_id'])
1290 logger.debug( "SLABDRIVER.PY \t fill_record_info SLICE \
1291 recslice_list %s \r\n \t RECORD %s \r\n \r\n" %(recslice_list,record))
1292 if str(record['type']) == 'user':
1293 #The record is a SFA user record.
1294 #Get the information about his slice from Senslab's DB
1295 #and add it to the user record.
1296 recslice_list = self.GetSlices(\
1297 slice_filter = record['record_id'],\
1298 slice_filter_type = 'record_id_user')
1300 logger.debug( "SLABDRIVER.PY \t fill_record_info TYPE USER \
1301 recslice_list %s \r\n \t RECORD %s \r\n" %(recslice_list , record))
1302 #Append slice record in records list,
1303 #therefore fetches user and slice info again(one more loop)
1304 #Will update PIs and researcher for the slice
1305 recuser = dbsession.query(RegRecord).filter_by(record_id = \
1306 recslice_list[0]['record_id_user']).first()
1307 logger.debug( "SLABDRIVER.PY \t fill_record_info USER \
1308 recuser %s \r\n \r\n" %(recuser))
1310 recslice = recslice_list[0]
1311 recslice.update({'PI':[recuser.hrn],
1312 'researcher': [recuser.hrn],
1313 'name':record['hrn'],
1316 'person_ids':[recslice_list[0]['record_id_user']]})
1318 for rec in recslice_list:
1319 recslice['oar_job_id'].append(rec['oar_job_id'])
1323 recslice.update({'type':'slice', \
1324 'hrn':recslice_list[0]['slice_hrn']})
1327 #GetPersons takes [] as filters
1328 #user_slab = self.GetPersons([{'hrn':recuser.hrn}])
1329 user_slab = self.GetPersons([record])
1332 record.update(user_slab[0])
1333 #For client_helper.py compatibility
1334 record.update( { 'geni_urn':'',
1337 record_list.append(recslice)
1339 logger.debug("SLABDRIVER.PY \tfill_record_info ADDING SLICE\
1340 INFO TO USER records %s" %(record_list))
1341 logger.debug("SLABDRIVER.PY \tfill_record_info END \
1342 #record %s \r\n \r\n " %(record))
1344 except TypeError, error:
1345 logger.log_exc("SLABDRIVER \t fill_record_info EXCEPTION %s"\
1347 #logger.debug("SLABDRIVER.PY \t fill_record_info ENDENDEND ")
1351 #self.fill_record_slab_info(records)
1357 #TODO Update membership? update_membership_list SA 05/07/12
1358 #def update_membership_list(self, oldRecord, record, listName, addFunc, \
1360 ## get a list of the HRNs tht are members of the old and new records
1362 #oldList = oldRecord.get(listName, [])
1365 #newList = record.get(listName, [])
1367 ## if the lists are the same, then we don't have to update anything
1368 #if (oldList == newList):
1371 ## build a list of the new person ids, by looking up each person to get
1375 #records = table.find({'type': 'user', 'hrn': newList})
1376 #for rec in records:
1377 #newIdList.append(rec['pointer'])
1379 ## build a list of the old person ids from the person_ids field
1381 #oldIdList = oldRecord.get("person_ids", [])
1382 #containerId = oldRecord.get_pointer()
1384 ## if oldRecord==None, then we are doing a Register, instead of an
1387 #containerId = record.get_pointer()
1389 ## add people who are in the new list, but not the oldList
1390 #for personId in newIdList:
1391 #if not (personId in oldIdList):
1392 #addFunc(self.plauth, personId, containerId)
1394 ## remove people who are in the old list, but not the new list
1395 #for personId in oldIdList:
1396 #if not (personId in newIdList):
1397 #delFunc(self.plauth, personId, containerId)
1399 #def update_membership(self, oldRecord, record):
1401 #if record.type == "slice":
1402 #self.update_membership_list(oldRecord, record, 'researcher',
1403 #self.users.AddPersonToSlice,
1404 #self.users.DeletePersonFromSlice)
1405 #elif record.type == "authority":
1410 # I don't think you plan on running a component manager at this point
1411 # let me clean up the mess of ComponentAPI that is deprecated anyways
1414 #TODO FUNCTIONS SECTION 04/07/2012 SA
1416 #TODO : Is UnBindObjectFromPeer still necessary ? Currently does nothing
1418 def UnBindObjectFromPeer(self, auth, object_type, object_id, shortname):
1419 """ This method is a hopefully temporary hack to let the sfa correctly
1420 detach the objects it creates from a remote peer object. This is
1421 needed so that the sfa federation link can work in parallel with
1422 RefreshPeer, as RefreshPeer depends on remote objects being correctly
1425 auth : struct, API authentication structure
1426 AuthMethod : string, Authentication method to use
1427 object_type : string, Object type, among 'site','person','slice',
1429 object_id : int, object_id
1430 shortname : string, peer shortname
1434 logger.warning("SLABDRIVER \tUnBindObjectFromPeer EMPTY-\
1438 #TODO Is BindObjectToPeer still necessary ? Currently does nothing
1440 def BindObjectToPeer(self, auth, object_type, object_id, shortname=None, \
1441 remote_object_id=None):
1442 """This method is a hopefully temporary hack to let the sfa correctly
1443 attach the objects it creates to a remote peer object. This is needed
1444 so that the sfa federation link can work in parallel with RefreshPeer,
1445 as RefreshPeer depends on remote objects being correctly marked.
1447 shortname : string, peer shortname
1448 remote_object_id : int, remote object_id, set to 0 if unknown
1452 logger.warning("SLABDRIVER \tBindObjectToPeer EMPTY - DO NOTHING \r\n ")
1455 #TODO UpdateSlice 04/07/2012 SA
1456 #Funciton should delete and create another job since oin senslab slice=job
1457 def UpdateSlice(self, auth, slice_id_or_name, slice_fields=None):
1458 """Updates the parameters of an existing slice with the values in
1460 Users may only update slices of which they are members.
1461 PIs may update any of the slices at their sites, or any slices of
1462 which they are members. Admins may update any slice.
1463 Only PIs and admins may update max_nodes. Slices cannot be renewed
1464 (by updating the expires parameter) more than 8 weeks into the future.
1465 Returns 1 if successful, faults otherwise.
1469 logger.warning("SLABDRIVER UpdateSlice EMPTY - DO NOTHING \r\n ")
1472 #TODO UpdatePerson 04/07/2012 SA
1473 def UpdatePerson(self, auth, person_id_or_email, person_fields=None):
1474 """Updates a person. Only the fields specified in person_fields
1475 are updated, all other fields are left untouched.
1476 Users and techs can only update themselves. PIs can only update
1477 themselves and other non-PIs at their sites.
1478 Returns 1 if successful, faults otherwise.
1482 logger.warning("SLABDRIVER UpdatePerson EMPTY - DO NOTHING \r\n ")
1485 #TODO GetKeys 04/07/2012 SA
1486 def GetKeys(self, auth, key_filter=None, return_fields=None):
1487 """Returns an array of structs containing details about keys.
1488 If key_filter is specified and is an array of key identifiers,
1489 or a struct of key attributes, only keys matching the filter
1490 will be returned. If return_fields is specified, only the
1491 specified details will be returned.
1493 Admin may query all keys. Non-admins may only query their own keys.
1497 logger.warning("SLABDRIVER GetKeys EMPTY - DO NOTHING \r\n ")
1500 #TODO DeleteKey 04/07/2012 SA
1501 def DeleteKey(self, auth, key_id):
1503 Non-admins may only delete their own keys.
1504 Returns 1 if successful, faults otherwise.
1508 logger.warning("SLABDRIVER DeleteKey EMPTY - DO NOTHING \r\n ")
1512 #TODO : Check rights to delete person
1513 def DeletePerson(self, auth, person_record):
1514 """ Disable an existing account in senslab LDAP.
1515 Users and techs can only delete themselves. PIs can only
1516 delete themselves and other non-PIs at their sites.
1517 ins can delete anyone.
1518 Returns 1 if successful, faults otherwise.
1522 #Disable user account in senslab LDAP
1523 ret = self.ldap.LdapMarkUserAsDeleted(person_record)
1524 logger.warning("SLABDRIVER DeletePerson %s " %(person_record))
1527 #TODO Check DeleteSlice, check rights 05/07/2012 SA
1528 def DeleteSlice(self, auth, slice_record):
1529 """ Deletes the specified slice.
1530 Senslab : Kill the job associated with the slice if there is one
1531 using DeleteSliceFromNodes.
1532 Updates the slice record in slab db to remove the slice nodes.
1534 Users may only delete slices of which they are members. PIs may
1535 delete any of the slices at their sites, or any slices of which
1536 they are members. Admins may delete any slice.
1537 Returns 1 if successful, faults otherwise.
1541 self.DeleteSliceFromNodes(slice_record)
1542 logger.warning("SLABDRIVER DeleteSlice %s "%(slice_record))
1545 #TODO AddPerson 04/07/2012 SA
1546 #def AddPerson(self, auth, person_fields=None):
1547 def AddPerson(self, record):#TODO fixing 28/08//2012 SA
1548 """Adds a new account. Any fields specified in records are used,
1549 otherwise defaults are used.
1550 Accounts are disabled by default. To enable an account,
1552 Returns the new person_id (> 0) if successful, faults otherwise.
1556 ret = self.ldap.LdapAddUser(record)
1557 logger.warning("SLABDRIVER AddPerson return code %s \r\n ", ret)
1560 #TODO AddPersonToSite 04/07/2012 SA
1561 def AddPersonToSite (self, auth, person_id_or_email, \
1562 site_id_or_login_base=None):
1563 """ Adds the specified person to the specified site. If the person is
1564 already a member of the site, no errors are returned. Does not change
1565 the person's primary site.
1566 Returns 1 if successful, faults otherwise.
1570 logger.warning("SLABDRIVER AddPersonToSite EMPTY - DO NOTHING \r\n ")
1573 #TODO AddRoleToPerson : Not sure if needed in senslab 04/07/2012 SA
1574 def AddRoleToPerson(self, auth, role_id_or_name, person_id_or_email):
1575 """Grants the specified role to the person.
1576 PIs can only grant the tech and user roles to users and techs at their
1577 sites. Admins can grant any role to any user.
1578 Returns 1 if successful, faults otherwise.
1582 logger.warning("SLABDRIVER AddRoleToPerson EMPTY - DO NOTHING \r\n ")
1585 #TODO AddPersonKey 04/07/2012 SA
1586 def AddPersonKey(self, auth, person_id_or_email, key_fields=None):
1587 """Adds a new key to the specified account.
1588 Non-admins can only modify their own keys.
1589 Returns the new key_id (> 0) if successful, faults otherwise.
1593 logger.warning("SLABDRIVER AddPersonKey EMPTY - DO NOTHING \r\n ")
1596 def DeleteLeases(self, leases_id_list, slice_hrn ):
1597 for job_id in leases_id_list:
1598 self.DeleteJobs(job_id, slice_hrn)
1600 logger.debug("SLABDRIVER DeleteLeases leases_id_list %s slice_hrn %s \
1601 \r\n " %(leases_id_list, slice_hrn))