3 from datetime import datetime
5 from sfa.util.faults import SliverDoesNotExist, UnknownSfaType
6 from sfa.util.sfalogging import logger
8 from sfa.storage.alchemy import dbsession
9 from sfa.storage.model import RegRecord, RegUser
11 from sfa.trust.credential import Credential
14 from sfa.managers.driver import Driver
15 from sfa.rspecs.version_manager import VersionManager
16 from sfa.rspecs.rspec import RSpec
18 from sfa.util.xrn import hrn_to_urn
21 ## thierry: everything that is API-related (i.e. handling incoming requests)
23 # SlabDriver should be really only about talking to the senslab testbed
26 from sfa.senslab.OARrestapi import OARrestapi
27 from sfa.senslab.LDAPapi import LDAPapi
29 from sfa.senslab.slabpostgres import SlabDB, slab_dbsession, SliceSenslab
31 from sfa.senslab.slabaggregate import SlabAggregate, slab_xrn_to_hostname, \
33 from sfa.senslab.slabslices import SlabSlices
38 # this inheritance scheme is so that the driver object can receive
39 # GetNodes or GetSites sorts of calls directly
40 # and thus minimize the differences in the managers with the pl version
41 class SlabDriver(Driver):
42 """ Senslab Driver class inherited from Driver generic class.
44 Contains methods compliant with the SFA standard and the testbed
45 infrastructure (calls to LDAP and OAR).
47 def __init__(self, config):
48 Driver.__init__ (self, config)
50 self.hrn = config.SFA_INTERFACE_HRN
51 self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
52 self.oar = OARrestapi()
54 self.time_format = "%Y-%m-%d %H:%M:%S"
55 self.db = SlabDB(config, debug = True)
59 def sliver_status(self, slice_urn, slice_hrn):
60 """Receive a status request for slice named urn/hrn
61 urn:publicid:IDN+senslab+nturro_slice hrn senslab.nturro_slice
62 shall return a structure as described in
63 http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
64 NT : not sure if we should implement this or not, but used by sface.
68 #First get the slice with the slice hrn
69 slice_list = self.GetSlices(slice_filter = slice_hrn, \
70 slice_filter_type = 'slice_hrn')
72 if len(slice_list) is 0:
73 raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
75 #Slice has the same slice hrn for each slice in the slice/lease list
76 #So fetch the info on the user once
77 one_slice = slice_list[0]
78 recuser = dbsession.query(RegRecord).filter_by(record_id = \
79 one_slice['record_id_user']).first()
81 #Make a list of all the nodes hostnames in use for this slice
84 for node in sl['node_ids']:
85 slice_nodes_list.append(node['hostname'])
87 #Get all the corresponding nodes details
88 nodes_all = self.GetNodes({'hostname':slice_nodes_list},
89 ['node_id', 'hostname','site','boot_state'])
90 nodeall_byhostname = dict([(n['hostname'], n) for n in nodes_all])
97 top_level_status = 'empty'
100 ['geni_urn','pl_login','geni_status','geni_resources'], None)
101 result['pl_login'] = recuser.hrn
102 logger.debug("Slabdriver - sliver_status Sliver status \
103 urn %s hrn %s sl %s \r\n " \
104 %(slice_urn, slice_hrn, sl))
106 nodes_in_slice = sl['node_ids']
109 result['geni_status'] = top_level_status
110 result['geni_resources'] = []
113 top_level_status = 'ready'
115 #A job is running on Senslab for this slice
116 # report about the local nodes that are in the slice only
118 result['geni_urn'] = slice_urn
122 #timestamp = float(sl['startTime']) + float(sl['walltime'])
123 #result['pl_expires'] = strftime(self.time_format, \
124 #gmtime(float(timestamp)))
125 #result['slab_expires'] = strftime(self.time_format,\
126 #gmtime(float(timestamp)))
129 for node in sl['node_ids']:
131 #res['slab_hostname'] = node['hostname']
132 #res['slab_boot_state'] = node['boot_state']
134 res['pl_hostname'] = node['hostname']
135 res['pl_boot_state'] = \
136 nodeall_byhostname[node['hostname']]['boot_state']
137 #res['pl_last_contact'] = strftime(self.time_format, \
138 #gmtime(float(timestamp)))
139 sliver_id = Xrn(slice_urn, type='slice', \
140 id=nodeall_byhostname[node['hostname']]['node_id'], \
141 authority=self.hrn).urn
143 res['geni_urn'] = sliver_id
144 if nodeall_byhostname[node['hostname']]['boot_state'] == 'Alive':
146 res['geni_status'] = 'ready'
148 res['geni_status'] = 'failed'
149 top_level_status = 'failed'
151 res['geni_error'] = ''
153 resources.append(res)
155 result['geni_status'] = top_level_status
156 result['geni_resources'] = resources
157 logger.debug("SLABDRIVER \tsliver_statusresources %s res %s "\
162 def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, \
164 aggregate = SlabAggregate(self)
166 slices = SlabSlices(self)
167 peer = slices.get_peer(slice_hrn)
168 sfa_peer = slices.get_sfa_peer(slice_hrn)
171 if not isinstance(creds, list):
175 slice_record = users[0].get('slice_record', {})
178 rspec = RSpec(rspec_string)
179 logger.debug("SLABDRIVER.PY \t create_sliver \tr spec.version \
180 %s slice_record %s " \
181 %(rspec.version,slice_record))
183 # ensure site record exists?
184 # ensure slice record exists
185 #Removed options to verify_slice SA 14/08/12
186 sfa_slice = slices.verify_slice(slice_hrn, slice_record, peer, \
189 #requested_attributes returned by rspec.version.get_slice_attributes()
190 #unused, removed SA 13/08/12
191 rspec.version.get_slice_attributes()
193 logger.debug("SLABDRIVER.PY create_sliver slice %s " %(sfa_slice))
195 # ensure person records exists
196 #verify_persons returns added persons but since the return value
198 slices.verify_persons(slice_hrn, sfa_slice, users, peer, \
199 sfa_peer, options=options)
203 # add/remove slice from nodes
205 requested_slivers = [node.get('component_name') \
206 for node in rspec.version.get_nodes_with_slivers()]
207 l = [ node for node in rspec.version.get_nodes_with_slivers() ]
208 logger.debug("SLADRIVER \tcreate_sliver requested_slivers \
209 requested_slivers %s listnodes %s" \
210 %(requested_slivers,l))
211 #verify_slice_nodes returns nodes, but unused here. Removed SA 13/08/12.
212 #slices.verify_slice_nodes(sfa_slice, requested_slivers, peer)
215 requested_lease_list = []
217 logger.debug("SLABDRIVER.PY \tcreate_sliver AVANTLEASE " )
218 rspec_requested_leases = rspec.version.get_leases()
219 for lease in rspec.version.get_leases():
220 single_requested_lease = {}
221 logger.debug("SLABDRIVER.PY \tcreate_sliver lease %s " %(lease))
222 if not lease.get('lease_id'):
223 single_requested_lease['hostname'] = \
224 slab_xrn_to_hostname(\
225 lease.get('component_id').strip())
226 single_requested_lease['start_time'] = lease.get('start_time')
227 single_requested_lease['duration'] = lease.get('duration')
229 if single_requested_lease.get('hostname'):
230 requested_lease_list.append(single_requested_lease)
232 logger.debug("SLABDRIVER.PY \tcreate_sliver APRESLEASE" )
233 #dCreate dict of leases by start_time, regrouping nodes reserved
235 #time, for the same amount of time = one job on OAR
236 requested_job_dict = {}
237 for lease in requested_lease_list:
239 #In case it is an asap experiment start_time is empty
240 if lease['start_time'] == '':
241 lease['start_time'] = '0'
243 if lease['start_time'] not in requested_job_dict:
244 if isinstance(lease['hostname'], str):
245 lease['hostname'] = [lease['hostname']]
247 requested_job_dict[lease['start_time']] = lease
250 job_lease = requested_job_dict[lease['start_time']]
251 if lease['duration'] == job_lease['duration'] :
252 job_lease['hostname'].append(lease['hostname'])
257 logger.debug("SLABDRIVER.PY \tcreate_sliver requested_job_dict %s "\
258 %(requested_job_dict))
259 #verify_slice_leases returns the leases , but the return value is unused
260 #here. Removed SA 13/08/12
261 slices.verify_slice_leases(sfa_slice, \
262 requested_job_dict, peer)
264 return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
267 def delete_sliver (self, slice_urn, slice_hrn, creds, options):
269 sfa_slice_list = self.GetSlices(slice_filter = slice_hrn, \
270 slice_filter_type = 'slice_hrn')
272 if not sfa_slice_list:
275 #Delete all in the slice
276 for sfa_slice in sfa_slice_list:
279 logger.debug("SLABDRIVER.PY delete_sliver slice %s" %(sfa_slice))
280 slices = SlabSlices(self)
281 # determine if this is a peer slice
283 peer = slices.get_peer(slice_hrn)
284 #TODO delete_sliver SA : UnBindObjectFromPeer should be
285 #used when there is another
286 #senslab testbed, which is not the case 14/08/12 .
288 logger.debug("SLABDRIVER.PY delete_sliver peer %s" %(peer))
291 self.UnBindObjectFromPeer('slice', \
292 sfa_slice['record_id_slice'], \
294 self.DeleteSliceFromNodes(sfa_slice)
297 self.BindObjectToPeer('slice', \
298 sfa_slice['record_id_slice'], \
299 peer, sfa_slice['peer_slice_id'])
303 def AddSlice(self, slice_record):
304 slab_slice = SliceSenslab( slice_hrn = slice_record['slice_hrn'], \
305 record_id_slice= slice_record['record_id_slice'] , \
306 record_id_user= slice_record['record_id_user'], \
307 peer_authority = slice_record['peer_authority'])
308 logger.debug("SLABDRIVER.PY \tAddSlice slice_record %s slab_slice %s" \
309 %(slice_record,slab_slice))
310 slab_dbsession.add(slab_slice)
311 slab_dbsession.commit()
314 # first 2 args are None in case of resource discovery
315 def list_resources (self, slice_urn, slice_hrn, creds, options):
316 #cached_requested = options.get('cached', True)
318 version_manager = VersionManager()
319 # get the rspec's return format from options
321 version_manager.get_version(options.get('geni_rspec_version'))
322 version_string = "rspec_%s" % (rspec_version)
324 #panos adding the info option to the caching key (can be improved)
325 if options.get('info'):
326 version_string = version_string + "_" + \
327 options.get('info', 'default')
329 # look in cache first
330 #if cached_requested and self.cache and not slice_hrn:
331 #rspec = self.cache.get(version_string)
333 #logger.debug("SlabDriver.ListResources: \
334 #returning cached advertisement")
337 #panos: passing user-defined options
338 aggregate = SlabAggregate(self)
339 origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
340 options.update({'origin_hrn':origin_hrn})
341 rspec = aggregate.get_rspec(slice_xrn=slice_urn, \
342 version=rspec_version, options=options)
345 #if self.cache and not slice_hrn:
346 #logger.debug("Slab.ListResources: stores advertisement in cache")
347 #self.cache.add(version_string, rspec)
352 def list_slices (self, creds, options):
353 # look in cache first
355 #slices = self.cache.get('slices')
357 #logger.debug("PlDriver.list_slices returns from cache")
362 slices = self.GetSlices()
363 logger.debug("SLABDRIVER.PY \tlist_slices hrn %s \r\n \r\n" %(slices))
364 slice_hrns = [slab_slice['slice_hrn'] for slab_slice in slices]
365 #slice_hrns = [slicename_to_hrn(self.hrn, slab_slice['slice_hrn']) \
366 #for slab_slice in slices]
367 slice_urns = [hrn_to_urn(slice_hrn, 'slice') \
368 for slice_hrn in slice_hrns]
372 #logger.debug ("SlabDriver.list_slices stores value in cache")
373 #self.cache.add('slices', slice_urns)
378 def register (self, sfa_record, hrn, pub_key):
380 Adding new user, slice, node or site should not be handled
384 Adding users = LDAP Senslab
385 Adding slice = Import from LDAP users
390 #No site or node record update allowed
391 def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
392 pointer = old_sfa_record['pointer']
393 old_sfa_record_type = old_sfa_record['type']
395 # new_key implemented for users only
396 if new_key and old_sfa_record_type not in [ 'user' ]:
397 raise UnknownSfaType(old_sfa_record_type)
399 #if (type == "authority"):
400 #self.shell.UpdateSite(pointer, new_sfa_record)
402 if old_sfa_record_type == "slice":
403 slab_record = self.sfa_fields_to_slab_fields(old_sfa_record_type, \
405 if 'name' in slab_record:
406 slab_record.pop('name')
407 #Prototype should be UpdateSlice(self,
408 #auth, slice_id_or_name, slice_fields)
409 #Senslab cannot update slice since slice = job
410 #so we must delete and create another job
411 self.UpdateSlice(pointer, slab_record)
413 elif old_sfa_record_type == "user":
415 all_fields = new_sfa_record
416 for key in all_fields.keys():
417 if key in ['first_name', 'last_name', 'title', 'email',
418 'password', 'phone', 'url', 'bio', 'accepted_aup',
420 update_fields[key] = all_fields[key]
421 self.UpdatePerson(pointer, update_fields)
424 # must check this key against the previous one if it exists
425 persons = self.GetPersons([pointer], ['key_ids'])
427 keys = person['key_ids']
428 keys = self.GetKeys(person['key_ids'])
430 # Delete all stale keys
433 if new_key != key['key']:
434 self.DeleteKey(key['key_id'])
438 self.AddPersonKey(pointer, {'key_type': 'ssh', \
445 def remove (self, sfa_record):
446 sfa_record_type = sfa_record['type']
447 hrn = sfa_record['hrn']
448 if sfa_record_type == 'user':
450 #get user from senslab ldap
451 person = self.GetPersons(sfa_record)
452 #No registering at a given site in Senslab.
453 #Once registered to the LDAP, all senslab sites are
456 #Mark account as disabled in ldap
457 self.DeletePerson(sfa_record)
458 elif sfa_record_type == 'slice':
459 if self.GetSlices(slice_filter = hrn, \
460 slice_filter_type = 'slice_hrn'):
461 self.DeleteSlice(sfa_record)
463 #elif type == 'authority':
464 #if self.GetSites(pointer):
465 #self.DeleteSite(pointer)
471 #TODO clean GetPeers. 05/07/12SA
472 def GetPeers (self, auth = None, peer_filter=None, return_fields_list=None):
474 existing_records = {}
475 existing_hrns_by_types = {}
476 logger.debug("SLABDRIVER \tGetPeers auth = %s, peer_filter %s, \
477 return_field %s " %(auth , peer_filter, return_fields_list))
478 all_records = dbsession.query(RegRecord).filter(RegRecord.type.like('%authority%')).all()
479 for record in all_records:
480 existing_records[(record.hrn, record.type)] = record
481 if record.type not in existing_hrns_by_types:
482 existing_hrns_by_types[record.type] = [record.hrn]
483 logger.debug("SLABDRIVER \tGetPeer\t NOT IN \
484 existing_hrns_by_types %s " %( existing_hrns_by_types))
487 logger.debug("SLABDRIVER \tGetPeer\t \INNN type %s hrn %s " \
488 %(record.type,record.hrn))
489 existing_hrns_by_types[record.type].append(record.hrn)
492 logger.debug("SLABDRIVER \tGetPeer\texisting_hrns_by_types %s "\
493 %( existing_hrns_by_types))
498 records_list.append(existing_records[(peer_filter,'authority')])
500 for hrn in existing_hrns_by_types['authority']:
501 records_list.append(existing_records[(hrn,'authority')])
503 logger.debug("SLABDRIVER \tGetPeer \trecords_list %s " \
509 return_records = records_list
510 if not peer_filter and not return_fields_list:
514 logger.debug("SLABDRIVER \tGetPeer return_records %s " \
516 return return_records
519 #TODO : Handling OR request in make_ldap_filters_from_records
520 #instead of the for loop
521 #over the records' list
522 def GetPersons(self, person_filter=None):
524 person_filter should be a list of dictionnaries when not set to None.
525 Returns a list of users whose accounts are enabled found in ldap.
528 logger.debug("SLABDRIVER \tGetPersons person_filter %s" \
531 if person_filter and isinstance(person_filter, list):
532 #If we are looking for a list of users (list of dict records)
533 #Usually the list contains only one user record
534 for searched_attributes in person_filter:
536 #Get only enabled user accounts in senslab LDAP :
537 #add a filter for make_ldap_filters_from_record
538 person = self.ldap.LdapFindUser(searched_attributes, \
539 is_user_enabled=True)
540 person_list.append(person)
543 #Get only enabled user accounts in senslab LDAP :
544 #add a filter for make_ldap_filters_from_record
545 person_list = self.ldap.LdapFindUser(is_user_enabled=True)
549 def GetTimezone(self):
550 server_timestamp, server_tz = self.oar.parser.\
551 SendRequest("GET_timezone")
552 return server_timestamp, server_tz
555 def DeleteJobs(self, job_id, slice_hrn):
556 if not job_id or job_id is -1:
558 username = slice_hrn.split(".")[-1].rstrip("_slice")
560 reqdict['method'] = "delete"
561 reqdict['strval'] = str(job_id)
564 answer = self.oar.POSTRequestToOARRestAPI('DELETE_jobs_id', \
566 logger.debug("SLABDRIVER \tDeleteJobs jobid %s \r\n answer %s \
567 username %s" %(job_id,answer, username))
572 ##TODO : Unused GetJobsId ? SA 05/07/12
573 #def GetJobsId(self, job_id, username = None ):
575 #Details about a specific job.
576 #Includes details about submission time, jot type, state, events,
577 #owner, assigned ressources, walltime etc...
581 #node_list_k = 'assigned_network_address'
582 ##Get job info from OAR
583 #job_info = self.oar.parser.SendRequest(req, job_id, username)
585 #logger.debug("SLABDRIVER \t GetJobsId %s " %(job_info))
587 #if job_info['state'] == 'Terminated':
588 #logger.debug("SLABDRIVER \t GetJobsId job %s TERMINATED"\
591 #if job_info['state'] == 'Error':
592 #logger.debug("SLABDRIVER \t GetJobsId ERROR message %s "\
597 #logger.error("SLABDRIVER \tGetJobsId KeyError")
600 #parsed_job_info = self.get_info_on_reserved_nodes(job_info, \
602 ##Replaces the previous entry
603 ##"assigned_network_address" / "reserved_resources"
605 #job_info.update({'node_ids':parsed_job_info[node_list_k]})
606 #del job_info[node_list_k]
607 #logger.debug(" \r\nSLABDRIVER \t GetJobsId job_info %s " %(job_info))
611 def GetJobsResources(self, job_id, username = None):
612 #job_resources=['reserved_resources', 'assigned_resources',\
613 #'job_id', 'job_uri', 'assigned_nodes',\
615 #assigned_res = ['resource_id', 'resource_uri']
616 #assigned_n = ['node', 'node_uri']
618 req = "GET_jobs_id_resources"
621 #Get job resources list from OAR
622 node_id_list = self.oar.parser.SendRequest(req, job_id, username)
623 logger.debug("SLABDRIVER \t GetJobsResources %s " %(node_id_list))
626 self.__get_hostnames_from_oar_node_ids(node_id_list)
629 #Replaces the previous entry "assigned_network_address" /
630 #"reserved_resources"
632 job_info = {'node_ids': hostname_list}
637 def get_info_on_reserved_nodes(self, job_info, node_list_name):
638 #Get the list of the testbed nodes records and make a
639 #dictionnary keyed on the hostname out of it
640 node_list_dict = self.GetNodes()
641 #node_hostname_list = []
642 node_hostname_list = [node['hostname'] for node in node_list_dict]
643 #for node in node_list_dict:
644 #node_hostname_list.append(node['hostname'])
645 node_dict = dict(zip(node_hostname_list, node_list_dict))
647 reserved_node_hostname_list = []
648 for index in range(len(job_info[node_list_name])):
649 #job_info[node_list_name][k] =
650 reserved_node_hostname_list[index] = \
651 node_dict[job_info[node_list_name][index]]['hostname']
653 logger.debug("SLABDRIVER \t get_info_on_reserved_nodes \
654 reserved_node_hostname_list %s" \
655 %(reserved_node_hostname_list))
657 logger.error("SLABDRIVER \t get_info_on_reserved_nodes KEYERROR " )
659 return reserved_node_hostname_list
661 def GetNodesCurrentlyInUse(self):
662 """Returns a list of all the nodes already involved in an oar job"""
663 return self.oar.parser.SendRequest("GET_running_jobs")
665 def __get_hostnames_from_oar_node_ids(self, resource_id_list ):
666 full_nodes_dict_list = self.GetNodes()
667 #Put the full node list into a dictionary keyed by oar node id
668 oar_id_node_dict = {}
669 for node in full_nodes_dict_list:
670 oar_id_node_dict[node['oar_id']] = node
672 #logger.debug("SLABDRIVER \t __get_hostnames_from_oar_node_ids\
673 #oar_id_node_dict %s" %(oar_id_node_dict))
675 hostname_dict_list = []
676 for resource_id in resource_id_list:
677 #Because jobs requested "asap" do not have defined resources
678 if resource_id is not "Undefined":
679 hostname_dict_list.append(\
680 oar_id_node_dict[resource_id]['hostname'])
682 #hostname_list.append(oar_id_node_dict[resource_id]['hostname'])
683 return hostname_dict_list
685 def GetReservedNodes(self,username = None):
686 #Get the nodes in use and the reserved nodes
687 reservation_dict_list = \
688 self.oar.parser.SendRequest("GET_reserved_nodes", username = username)
691 for resa in reservation_dict_list:
692 logger.debug ("GetReservedNodes resa %s"%(resa))
693 #dict list of hostnames and their site
694 resa['reserved_nodes'] = \
695 self.__get_hostnames_from_oar_node_ids(resa['resource_ids'])
697 #del resa['resource_ids']
698 return reservation_dict_list
700 def GetNodes(self, node_filter_dict = None, return_fields_list = None):
702 node_filter_dict : dictionnary of lists
705 node_dict_by_id = self.oar.parser.SendRequest("GET_resources_full")
706 node_dict_list = node_dict_by_id.values()
707 logger.debug (" SLABDRIVER GetNodes node_filter_dict %s return_fields_list %s "%(node_filter_dict,return_fields_list))
708 #No filtering needed return the list directly
709 if not (node_filter_dict or return_fields_list):
710 return node_dict_list
712 return_node_list = []
714 for filter_key in node_filter_dict:
716 #Filter the node_dict_list by each value contained in the
717 #list node_filter_dict[filter_key]
718 for value in node_filter_dict[filter_key]:
719 for node in node_dict_list:
720 if node[filter_key] == value:
721 if return_fields_list :
723 for k in return_fields_list:
725 return_node_list.append(tmp)
727 return_node_list.append(node)
729 logger.log_exc("GetNodes KeyError")
733 return return_node_list
736 def GetSites(self, site_filter_name_list = None, return_fields_list = None):
737 site_dict = self.oar.parser.SendRequest("GET_sites")
738 #site_dict : dict where the key is the sit ename
739 return_site_list = []
740 if not ( site_filter_name_list or return_fields_list):
741 return_site_list = site_dict.values()
742 return return_site_list
744 for site_filter_name in site_filter_name_list:
745 if site_filter_name in site_dict:
746 if return_fields_list:
747 for field in return_fields_list:
750 tmp[field] = site_dict[site_filter_name][field]
752 logger.error("GetSites KeyError %s "%(field))
754 return_site_list.append(tmp)
756 return_site_list.append( site_dict[site_filter_name])
759 return return_site_list
763 def GetSlices(self, slice_filter = None, slice_filter_type = None):
764 #def GetSlices(self, slice_filter = None, slice_filter_type = None, \
765 #return_fields_list = None):
766 """ Get the slice records from the slab db.
767 Returns a slice ditc if slice_filter and slice_filter_type
769 Returns a list of slice dictionnaries if there are no filters
774 return_slice_list = []
777 authorized_filter_types_list = ['slice_hrn', 'record_id_user']
778 slicerec_dictlist = []
781 if slice_filter_type in authorized_filter_types_list:
784 def __get_slice_records(slice_filter = None, slice_filter_type = None):
787 #Get list of slices based on the slice hrn
788 if slice_filter_type == 'slice_hrn':
790 login = slice_filter.split(".")[1].split("_")[0]
792 #DO NOT USE RegSlice - reg_researchers to get the hrn of the user
793 #otherwise will mess up the RegRecord in Resolve, don't know
796 #Only one entry for one user = one slice in slice_senslab table
797 slicerec = slab_dbsession.query(SliceSenslab).filter_by(slice_hrn = slice_filter).first()
799 #Get slice based on user id
800 if slice_filter_type == 'record_id_user':
801 slicerec = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = slice_filter).first()
806 fixed_slicerec_dict = slicerec.dump_sqlalchemyobj_to_dict()
809 login = fixed_slicerec_dict['slice_hrn'].split(".")[1].split("_")[0]
810 return login, fixed_slicerec_dict
815 login, fixed_slicerec_dict = __get_slice_records(slice_filter, slice_filter_type)
816 logger.debug(" SLABDRIVER \tGetSlices login %s \
818 %(login, fixed_slicerec_dict))
822 #One slice can have multiple jobs
824 leases_list = self.GetReservedNodes(username = login)
825 #If no job is running or no job scheduled
826 if leases_list == [] :
827 return [fixed_slicerec_dict]
829 #Several jobs for one slice
830 for lease in leases_list :
834 #Check with OAR the status of the job if a job id is in
839 slicerec_dict['oar_job_id'] = lease['lease_id']
841 #for reserved_node in lease['reserved_nodes']:
842 #reserved_list.append(reserved_node['hostname'])
843 reserved_list = lease['reserved_nodes']
844 #slicerec_dict.update({'node_ids':[lease['reserved_nodes'][n]['hostname'] for n in lease['reserved_nodes']]})
845 slicerec_dict.update({'list_node_ids':{'hostname':reserved_list}})
846 slicerec_dict.update({'node_ids':lease['reserved_nodes']})
847 slicerec_dict.update(fixed_slicerec_dict)
848 slicerec_dict.update({'hrn':\
849 str(fixed_slicerec_dict['slice_hrn'])})
852 slicerec_dictlist.append(slicerec_dict)
853 logger.debug("SLABDRIVER.PY \tGetSlices slicerec_dict %s slicerec_dictlist %s lease['reserved_nodes'] %s" %(slicerec_dict, slicerec_dictlist,lease['reserved_nodes'] ))
855 logger.debug("SLABDRIVER.PY \tGetSlices RETURN slicerec_dictlist %s"\
856 %(slicerec_dictlist))
858 return slicerec_dictlist
863 slice_list = slab_dbsession.query(SliceSenslab).all()
864 leases_list = self.GetReservedNodes()
867 slicerec_dictlist = []
868 return_slice_list = []
869 for record in slice_list:
870 return_slice_list.append(record.dump_sqlalchemyobj_to_dict())
872 for fixed_slicerec_dict in return_slice_list:
874 owner = fixed_slicerec_dict['slice_hrn'].split(".")[1].split("_")[0]
875 for lease in leases_list:
876 if owner == lease['user']:
877 slicerec_dict['oar_job_id'] = lease['lease_id']
879 for reserved_node in lease['reserved_nodes']:
880 reserved_list.append(reserved_node['hostname'])
881 #slicerec_dict.update({'node_ids':{'hostname':reserved_list}})
882 #slicerec_dict.update({'node_ids':[lease['reserved_nodes'][n]['hostname'] for n in lease['reserved_nodes']]})
883 slicerec_dict.update({'node_ids':lease['reserved_nodes']})
884 slicerec_dict.update({'list_node_ids':{'hostname':reserved_list}})
885 slicerec_dict.update(fixed_slicerec_dict)
886 slicerec_dict.update({'hrn':\
887 str(fixed_slicerec_dict['slice_hrn'])})
888 slicerec_dictlist.append(slicerec_dict)
890 logger.debug("SLABDRIVER.PY \tGetSlices RETURN slices %s \
891 slice_filter %s " %(return_slice_list, slice_filter))
893 #if return_fields_list:
894 #return_slice_list = parse_filter(sliceslist, \
895 #slice_filter,'slice', return_fields_list)
897 return slicerec_dictlist
900 def testbed_name (self): return self.hrn
902 # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
903 def aggregate_version (self):
904 version_manager = VersionManager()
905 ad_rspec_versions = []
906 request_rspec_versions = []
907 for rspec_version in version_manager.versions:
908 if rspec_version.content_type in ['*', 'ad']:
909 ad_rspec_versions.append(rspec_version.to_dict())
910 if rspec_version.content_type in ['*', 'request']:
911 request_rspec_versions.append(rspec_version.to_dict())
913 'testbed':self.testbed_name(),
914 'geni_request_rspec_versions': request_rspec_versions,
915 'geni_ad_rspec_versions': ad_rspec_versions,
924 # Convert SFA fields to PLC fields for use when registering up updating
925 # registry record in the PLC database
927 # @param type type of record (user, slice, ...)
928 # @param hrn human readable name
929 # @param sfa_fields dictionary of SFA fields
930 # @param slab_fields dictionary of PLC fields (output)
932 def sfa_fields_to_slab_fields(self, sfa_type, hrn, record):
936 #for field in record:
937 # slab_record[field] = record[field]
939 if sfa_type == "slice":
940 #instantion used in get_slivers ?
941 if not "instantiation" in slab_record:
942 slab_record["instantiation"] = "senslab-instantiated"
943 #slab_record["hrn"] = hrn_to_pl_slicename(hrn)
944 #Unused hrn_to_pl_slicename because Slab's hrn already in the appropriate form SA 23/07/12
945 slab_record["hrn"] = hrn
946 logger.debug("SLABDRIVER.PY sfa_fields_to_slab_fields \
947 slab_record %s " %(slab_record['hrn']))
949 slab_record["url"] = record["url"]
950 if "description" in record:
951 slab_record["description"] = record["description"]
952 if "expires" in record:
953 slab_record["expires"] = int(record["expires"])
955 #nodes added by OAR only and then imported to SFA
956 #elif type == "node":
957 #if not "hostname" in slab_record:
958 #if not "hostname" in record:
959 #raise MissingSfaInfo("hostname")
960 #slab_record["hostname"] = record["hostname"]
961 #if not "model" in slab_record:
962 #slab_record["model"] = "geni"
965 #elif type == "authority":
966 #slab_record["login_base"] = hrn_to_slab_login_base(hrn)
968 #if not "name" in slab_record:
969 #slab_record["name"] = hrn
971 #if not "abbreviated_name" in slab_record:
972 #slab_record["abbreviated_name"] = hrn
974 #if not "enabled" in slab_record:
975 #slab_record["enabled"] = True
977 #if not "is_public" in slab_record:
978 #slab_record["is_public"] = True
985 def __transforms_timestamp_into_date(self, xp_utc_timestamp = None):
986 """ Transforms unix timestamp into valid OAR date format """
988 #Used in case of a scheduled experiment (not immediate)
989 #To run an XP immediately, don't specify date and time in RSpec
990 #They will be set to None.
992 #transform the xp_utc_timestamp into server readable time
993 xp_server_readable_date = datetime.fromtimestamp(int(\
994 xp_utc_timestamp)).strftime(self.time_format)
996 return xp_server_readable_date
1004 def LaunchExperimentOnOAR(self, added_nodes, slice_name, \
1005 lease_start_time, lease_duration, slice_user=None):
1007 lease_dict['lease_start_time'] = lease_start_time
1008 lease_dict['lease_duration'] = lease_duration
1009 lease_dict['added_nodes'] = added_nodes
1010 lease_dict['slice_name'] = slice_name
1011 lease_dict['slice_user'] = slice_user
1012 lease_dict['grain'] = self.GetLeaseGranularity()
1013 lease_dict['time_format'] = self.time_format
1015 def __create_job_structure_request_for_OAR(lease_dict):
1016 """ Creates the structure needed for a correct POST on OAR.
1017 Makes the timestamp transformation into the appropriate format.
1018 Sends the POST request to create the job with the resources in
1027 reqdict['workdir'] = '/tmp'
1028 reqdict['resource'] = "{network_address in ("
1030 for node in lease_dict['added_nodes']:
1031 logger.debug("\r\n \r\n OARrestapi \t __create_job_structure_request_for_OAR \
1034 # Get the ID of the node
1036 reqdict['resource'] += "'" + nodeid + "', "
1037 nodeid_list.append(nodeid)
1039 custom_length = len(reqdict['resource'])- 2
1040 reqdict['resource'] = reqdict['resource'][0:custom_length] + \
1041 ")}/nodes=" + str(len(nodeid_list))
1043 def __process_walltime(duration):
1044 """ Calculates the walltime in seconds from the duration in H:M:S
1045 specified in the RSpec.
1049 # Fixing the walltime by adding a few delays.
1050 # First put the walltime in seconds oarAdditionalDelay = 20;
1051 # additional delay for /bin/sleep command to
1052 # take in account prologue and epilogue scripts execution
1053 # int walltimeAdditionalDelay = 240; additional delay
1054 desired_walltime = duration
1055 total_walltime = desired_walltime + 240 #+4 min Update SA 23/10/12
1056 sleep_walltime = desired_walltime # 0 sec added Update SA 23/10/12
1058 #Put the walltime back in str form
1059 #First get the hours
1060 walltime.append(str(total_walltime / 3600))
1061 total_walltime = total_walltime - 3600 * int(walltime[0])
1062 #Get the remaining minutes
1063 walltime.append(str(total_walltime / 60))
1064 total_walltime = total_walltime - 60 * int(walltime[1])
1066 walltime.append(str(total_walltime))
1069 logger.log_exc(" __process_walltime duration null")
1071 return walltime, sleep_walltime
1074 walltime, sleep_walltime = \
1075 __process_walltime(int(lease_dict['lease_duration'])*lease_dict['grain'])
1078 reqdict['resource'] += ",walltime=" + str(walltime[0]) + \
1079 ":" + str(walltime[1]) + ":" + str(walltime[2])
1080 reqdict['script_path'] = "/bin/sleep " + str(sleep_walltime)
1082 #In case of a scheduled experiment (not immediate)
1083 #To run an XP immediately, don't specify date and time in RSpec
1084 #They will be set to None.
1085 if lease_dict['lease_start_time'] is not '0':
1086 #Readable time accepted by OAR
1087 start_time = datetime.fromtimestamp(int(lease_dict['lease_start_time'])).\
1088 strftime(lease_dict['time_format'])
1089 reqdict['reservation'] = start_time
1090 #If there is not start time, Immediate XP. No need to add special
1094 reqdict['type'] = "deploy"
1095 reqdict['directory'] = ""
1096 reqdict['name'] = "SFA_" + lease_dict['slice_user']
1101 #Create the request for OAR
1102 reqdict = __create_job_structure_request_for_OAR(lease_dict)
1103 # first step : start the OAR job and update the job
1104 logger.debug("SLABDRIVER.PY \tLaunchExperimentOnOAR reqdict %s\
1107 answer = self.oar.POSTRequestToOARRestAPI('POST_job', \
1108 reqdict, slice_user)
1109 logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid %s " %(answer))
1111 jobid = answer['id']
1113 logger.log_exc("SLABDRIVER \tLaunchExperimentOnOAR \
1114 Impossible to create job %s " %(answer))
1118 def __configure_experiment(jobid, added_nodes):
1119 # second step : configure the experiment
1120 # we need to store the nodes in a yaml (well...) file like this :
1121 # [1,56,23,14,45,75] with name /tmp/sfa<jobid>.json
1122 job_file = open('/tmp/sfa/'+ str(jobid) + '.json', 'w')
1124 job_file.write(str(added_nodes[0].strip('node')))
1125 for node in added_nodes[1:len(added_nodes)] :
1126 job_file.write(', '+ node.strip('node'))
1131 def __launch_senslab_experiment(jobid):
1132 # third step : call the senslab-experiment wrapper
1133 #command= "java -jar target/sfa-1.0-jar-with-dependencies.jar
1134 # "+str(jobid)+" "+slice_user
1135 javacmdline = "/usr/bin/java"
1137 "/opt/senslabexperimentwrapper/sfa-1.0-jar-with-dependencies.jar"
1138 #ret=subprocess.check_output(["/usr/bin/java", "-jar", ", \
1139 #str(jobid), slice_user])
1140 output = subprocess.Popen([javacmdline, "-jar", jarname, str(jobid), \
1141 slice_user],stdout=subprocess.PIPE).communicate()[0]
1143 logger.debug("SLABDRIVER \t __configure_experiment wrapper returns%s " \
1150 logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid %s \
1151 added_nodes %s slice_user %s" %(jobid, added_nodes, slice_user))
1154 __configure_experiment(jobid, added_nodes)
1155 __launch_senslab_experiment(jobid)
1159 def AddLeases(self, hostname_list, slice_record, lease_start_time, lease_duration):
1160 logger.debug("SLABDRIVER \r\n \r\n \t AddLeases hostname_list %s \
1161 slice_record %s lease_start_time %s lease_duration %s "\
1162 %( hostname_list, slice_record , lease_start_time, \
1165 tmp = slice_record['reg-researchers'][0].split(".")
1166 username = tmp[(len(tmp)-1)]
1167 self.LaunchExperimentOnOAR(hostname_list, slice_record['slice_hrn'], lease_start_time, lease_duration, username)
1168 start_time = datetime.fromtimestamp(int(lease_start_time)).strftime(self.time_format)
1169 logger.debug("SLABDRIVER \t AddLeases hostname_list start_time %s " %(start_time))
1174 #Delete the jobs from job_senslab table
1175 def DeleteSliceFromNodes(self, slice_record):
1177 self.DeleteJobs(slice_record['oar_job_id'], slice_record['hrn'])
1181 def GetLeaseGranularity(self):
1182 """ Returns the granularity of Senslab testbed.
1183 OAR returns seconds for experiments duration.
1184 Defined in seconds. """
1189 def GetLeases(self, lease_filter_dict=None):
1190 unfiltered_reservation_list = self.GetReservedNodes()
1192 ##Synchronize slice_table of sfa senslab db
1193 #self.synchronize_oar_and_slice_table(unfiltered_reservation_list)
1195 reservation_list = []
1196 #Find the slice associated with this user senslab ldap uid
1197 logger.debug(" SLABDRIVER.PY \tGetLeases ")
1198 #Create user dict first to avoir looking several times for
1199 #the same user in LDAP SA 27/07/12
1201 for resa in unfiltered_reservation_list:
1202 logger.debug("SLABDRIVER \tGetLeases USER %s"\
1204 if resa['user'] not in resa_user_dict:
1205 logger.debug("SLABDRIVER \tGetLeases userNOTIN ")
1206 ldap_info = self.ldap.LdapSearch('(uid='+resa['user']+')')
1207 ldap_info = ldap_info[0][1]
1208 user = dbsession.query(RegUser).filter_by(email = \
1209 ldap_info['mail'][0]).first()
1210 #Separated in case user not in database : record_id not defined SA 17/07//12
1211 query_slice_info = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = user.record_id)
1212 if query_slice_info:
1213 slice_info = query_slice_info.first()
1217 resa_user_dict[resa['user']] = {}
1218 resa_user_dict[resa['user']]['ldap_info'] = user
1219 resa_user_dict[resa['user']]['slice_info'] = slice_info
1221 logger.debug("SLABDRIVER \tGetLeases resa_user_dict %s"\
1223 for resa in unfiltered_reservation_list:
1227 resa['slice_hrn'] = resa_user_dict[resa['user']]['slice_info'].slice_hrn
1228 resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
1230 #resa['slice_id'] = hrn_to_urn(slice_info.slice_hrn, 'slice')
1231 resa['component_id_list'] = []
1232 #Transform the hostnames into urns (component ids)
1233 for node in resa['reserved_nodes']:
1234 #resa['component_id_list'].append(hostname_to_urn(self.hrn, \
1235 #self.root_auth, node['hostname']))
1236 slab_xrn = slab_xrn_object(self.root_auth, node)
1237 resa['component_id_list'].append(slab_xrn.urn)
1239 #Filter the reservation list if necessary
1240 #Returns all the leases associated with a given slice
1241 if lease_filter_dict:
1242 logger.debug("SLABDRIVER \tGetLeases lease_filter_dict %s"\
1243 %(lease_filter_dict))
1244 for resa in unfiltered_reservation_list:
1245 if lease_filter_dict['name'] == resa['slice_hrn']:
1246 reservation_list.append(resa)
1248 reservation_list = unfiltered_reservation_list
1250 logger.debug(" SLABDRIVER.PY \tGetLeases reservation_list %s"\
1251 %(reservation_list))
1252 return reservation_list
1254 def augment_records_with_testbed_info (self, sfa_records):
1255 return self.fill_record_info (sfa_records)
1257 def fill_record_info(self, record_list):
1259 Given a SFA record, fill in the senslab specific and SFA specific
1260 fields in the record.
1263 logger.debug("SLABDRIVER \tfill_record_info records %s " %(record_list))
1264 if not isinstance(record_list, list):
1265 record_list = [record_list]
1268 for record in record_list:
1269 #If the record is a SFA slice record, then add information
1270 #about the user of this slice. This kind of
1271 #information is in the Senslab's DB.
1272 if str(record['type']) == 'slice':
1273 #Get slab slice record.
1274 recslice_list = self.GetSlices(slice_filter = \
1275 str(record['hrn']),\
1276 slice_filter_type = 'slice_hrn')
1278 recuser = dbsession.query(RegRecord).filter_by(record_id = \
1279 recslice_list[0]['record_id_user']).first()
1280 logger.debug("SLABDRIVER \tfill_record_info TYPE SLICE RECUSER %s " %(recuser))
1281 record.update({'PI':[recuser.hrn],
1282 'researcher': [recuser.hrn],
1283 'name':record['hrn'],
1286 'person_ids':[recslice_list[0]['record_id_user']],
1287 'geni_urn':'', #For client_helper.py compatibility
1288 'keys':'', #For client_helper.py compatibility
1289 'key_ids':''}) #For client_helper.py compatibility
1292 for rec in recslice_list:
1293 record['oar_job_id'].append(rec['oar_job_id'])
1297 logger.debug( "SLABDRIVER.PY \t fill_record_info SLICE \
1298 recslice_list %s \r\n \t RECORD %s \r\n \r\n" %(recslice_list,record))
1299 if str(record['type']) == 'user':
1300 #The record is a SFA user record.
1301 #Get the information about his slice from Senslab's DB
1302 #and add it to the user record.
1303 recslice_list = self.GetSlices(\
1304 slice_filter = record['record_id'],\
1305 slice_filter_type = 'record_id_user')
1307 logger.debug( "SLABDRIVER.PY \t fill_record_info TYPE USER \
1308 recslice_list %s \r\n \t RECORD %s \r\n" %(recslice_list , record))
1309 #Append slice record in records list,
1310 #therefore fetches user and slice info again(one more loop)
1311 #Will update PIs and researcher for the slice
1312 recuser = dbsession.query(RegRecord).filter_by(record_id = \
1313 recslice_list[0]['record_id_user']).first()
1314 logger.debug( "SLABDRIVER.PY \t fill_record_info USER \
1315 recuser %s \r\n \r\n" %(recuser))
1317 recslice = recslice_list[0]
1318 recslice.update({'PI':[recuser.hrn],
1319 'researcher': [recuser.hrn],
1320 'name':record['hrn'],
1323 'person_ids':[recslice_list[0]['record_id_user']]})
1325 for rec in recslice_list:
1326 recslice['oar_job_id'].append(rec['oar_job_id'])
1330 recslice.update({'type':'slice', \
1331 'hrn':recslice_list[0]['slice_hrn']})
1334 #GetPersons takes [] as filters
1335 #user_slab = self.GetPersons([{'hrn':recuser.hrn}])
1336 user_slab = self.GetPersons([record])
1339 record.update(user_slab[0])
1340 #For client_helper.py compatibility
1341 record.update( { 'geni_urn':'',
1344 record_list.append(recslice)
1346 logger.debug("SLABDRIVER.PY \tfill_record_info ADDING SLICE\
1347 INFO TO USER records %s" %(record_list))
1348 logger.debug("SLABDRIVER.PY \tfill_record_info END \
1349 #record %s \r\n \r\n " %(record))
1351 except TypeError, error:
1352 logger.log_exc("SLABDRIVER \t fill_record_info EXCEPTION %s"\
1354 #logger.debug("SLABDRIVER.PY \t fill_record_info ENDENDEND ")
1358 #self.fill_record_slab_info(records)
1364 #TODO Update membership? update_membership_list SA 05/07/12
1365 #def update_membership_list(self, oldRecord, record, listName, addFunc, \
1367 ## get a list of the HRNs tht are members of the old and new records
1369 #oldList = oldRecord.get(listName, [])
1372 #newList = record.get(listName, [])
1374 ## if the lists are the same, then we don't have to update anything
1375 #if (oldList == newList):
1378 ## build a list of the new person ids, by looking up each person to get
1382 #records = table.find({'type': 'user', 'hrn': newList})
1383 #for rec in records:
1384 #newIdList.append(rec['pointer'])
1386 ## build a list of the old person ids from the person_ids field
1388 #oldIdList = oldRecord.get("person_ids", [])
1389 #containerId = oldRecord.get_pointer()
1391 ## if oldRecord==None, then we are doing a Register, instead of an
1394 #containerId = record.get_pointer()
1396 ## add people who are in the new list, but not the oldList
1397 #for personId in newIdList:
1398 #if not (personId in oldIdList):
1399 #addFunc(self.plauth, personId, containerId)
1401 ## remove people who are in the old list, but not the new list
1402 #for personId in oldIdList:
1403 #if not (personId in newIdList):
1404 #delFunc(self.plauth, personId, containerId)
1406 #def update_membership(self, oldRecord, record):
1408 #if record.type == "slice":
1409 #self.update_membership_list(oldRecord, record, 'researcher',
1410 #self.users.AddPersonToSlice,
1411 #self.users.DeletePersonFromSlice)
1412 #elif record.type == "authority":
1417 # I don't think you plan on running a component manager at this point
1418 # let me clean up the mess of ComponentAPI that is deprecated anyways
1421 #TODO FUNCTIONS SECTION 04/07/2012 SA
1423 #TODO : Is UnBindObjectFromPeer still necessary ? Currently does nothing
1425 def UnBindObjectFromPeer(self, auth, object_type, object_id, shortname):
1426 """ This method is a hopefully temporary hack to let the sfa correctly
1427 detach the objects it creates from a remote peer object. This is
1428 needed so that the sfa federation link can work in parallel with
1429 RefreshPeer, as RefreshPeer depends on remote objects being correctly
1432 auth : struct, API authentication structure
1433 AuthMethod : string, Authentication method to use
1434 object_type : string, Object type, among 'site','person','slice',
1436 object_id : int, object_id
1437 shortname : string, peer shortname
1441 logger.warning("SLABDRIVER \tUnBindObjectFromPeer EMPTY-\
1445 #TODO Is BindObjectToPeer still necessary ? Currently does nothing
1447 def BindObjectToPeer(self, auth, object_type, object_id, shortname=None, \
1448 remote_object_id=None):
1449 """This method is a hopefully temporary hack to let the sfa correctly
1450 attach the objects it creates to a remote peer object. This is needed
1451 so that the sfa federation link can work in parallel with RefreshPeer,
1452 as RefreshPeer depends on remote objects being correctly marked.
1454 shortname : string, peer shortname
1455 remote_object_id : int, remote object_id, set to 0 if unknown
1459 logger.warning("SLABDRIVER \tBindObjectToPeer EMPTY - DO NOTHING \r\n ")
1462 #TODO UpdateSlice 04/07/2012 SA
1463 #Funciton should delete and create another job since oin senslab slice=job
1464 def UpdateSlice(self, auth, slice_id_or_name, slice_fields=None):
1465 """Updates the parameters of an existing slice with the values in
1467 Users may only update slices of which they are members.
1468 PIs may update any of the slices at their sites, or any slices of
1469 which they are members. Admins may update any slice.
1470 Only PIs and admins may update max_nodes. Slices cannot be renewed
1471 (by updating the expires parameter) more than 8 weeks into the future.
1472 Returns 1 if successful, faults otherwise.
1476 logger.warning("SLABDRIVER UpdateSlice EMPTY - DO NOTHING \r\n ")
1479 #TODO UpdatePerson 04/07/2012 SA
1480 def UpdatePerson(self, auth, person_id_or_email, person_fields=None):
1481 """Updates a person. Only the fields specified in person_fields
1482 are updated, all other fields are left untouched.
1483 Users and techs can only update themselves. PIs can only update
1484 themselves and other non-PIs at their sites.
1485 Returns 1 if successful, faults otherwise.
1489 logger.warning("SLABDRIVER UpdatePerson EMPTY - DO NOTHING \r\n ")
1492 #TODO GetKeys 04/07/2012 SA
1493 def GetKeys(self, auth, key_filter=None, return_fields=None):
1494 """Returns an array of structs containing details about keys.
1495 If key_filter is specified and is an array of key identifiers,
1496 or a struct of key attributes, only keys matching the filter
1497 will be returned. If return_fields is specified, only the
1498 specified details will be returned.
1500 Admin may query all keys. Non-admins may only query their own keys.
1504 logger.warning("SLABDRIVER GetKeys EMPTY - DO NOTHING \r\n ")
1507 #TODO DeleteKey 04/07/2012 SA
1508 def DeleteKey(self, auth, key_id):
1510 Non-admins may only delete their own keys.
1511 Returns 1 if successful, faults otherwise.
1515 logger.warning("SLABDRIVER DeleteKey EMPTY - DO NOTHING \r\n ")
1519 #TODO : Check rights to delete person
1520 def DeletePerson(self, auth, person_record):
1521 """ Disable an existing account in senslab LDAP.
1522 Users and techs can only delete themselves. PIs can only
1523 delete themselves and other non-PIs at their sites.
1524 ins can delete anyone.
1525 Returns 1 if successful, faults otherwise.
1529 #Disable user account in senslab LDAP
1530 ret = self.ldap.LdapMarkUserAsDeleted(person_record)
1531 logger.warning("SLABDRIVER DeletePerson %s " %(person_record))
1534 #TODO Check DeleteSlice, check rights 05/07/2012 SA
1535 def DeleteSlice(self, auth, slice_record):
1536 """ Deletes the specified slice.
1537 Senslab : Kill the job associated with the slice if there is one
1538 using DeleteSliceFromNodes.
1539 Updates the slice record in slab db to remove the slice nodes.
1541 Users may only delete slices of which they are members. PIs may
1542 delete any of the slices at their sites, or any slices of which
1543 they are members. Admins may delete any slice.
1544 Returns 1 if successful, faults otherwise.
1548 self.DeleteSliceFromNodes(slice_record)
1549 logger.warning("SLABDRIVER DeleteSlice %s "%(slice_record))
1552 #TODO AddPerson 04/07/2012 SA
1553 #def AddPerson(self, auth, person_fields=None):
1554 def AddPerson(self, record):#TODO fixing 28/08//2012 SA
1555 """Adds a new account. Any fields specified in records are used,
1556 otherwise defaults are used.
1557 Accounts are disabled by default. To enable an account,
1559 Returns the new person_id (> 0) if successful, faults otherwise.
1563 ret = self.ldap.LdapAddUser(record)
1564 logger.warning("SLABDRIVER AddPerson return code %s \r\n ", ret)
1567 #TODO AddPersonToSite 04/07/2012 SA
1568 def AddPersonToSite (self, auth, person_id_or_email, \
1569 site_id_or_login_base=None):
1570 """ Adds the specified person to the specified site. If the person is
1571 already a member of the site, no errors are returned. Does not change
1572 the person's primary site.
1573 Returns 1 if successful, faults otherwise.
1577 logger.warning("SLABDRIVER AddPersonToSite EMPTY - DO NOTHING \r\n ")
1580 #TODO AddRoleToPerson : Not sure if needed in senslab 04/07/2012 SA
1581 def AddRoleToPerson(self, auth, role_id_or_name, person_id_or_email):
1582 """Grants the specified role to the person.
1583 PIs can only grant the tech and user roles to users and techs at their
1584 sites. Admins can grant any role to any user.
1585 Returns 1 if successful, faults otherwise.
1589 logger.warning("SLABDRIVER AddRoleToPerson EMPTY - DO NOTHING \r\n ")
1592 #TODO AddPersonKey 04/07/2012 SA
1593 def AddPersonKey(self, auth, person_id_or_email, key_fields=None):
1594 """Adds a new key to the specified account.
1595 Non-admins can only modify their own keys.
1596 Returns the new key_id (> 0) if successful, faults otherwise.
1600 logger.warning("SLABDRIVER AddPersonKey EMPTY - DO NOTHING \r\n ")
1603 def DeleteLeases(self, leases_id_list, slice_hrn ):
1604 for job_id in leases_id_list:
1605 self.DeleteJobs(job_id, slice_hrn)
1607 logger.debug("SLABDRIVER DeleteLeases leases_id_list %s slice_hrn %s \
1608 \r\n " %(leases_id_list, slice_hrn))