3 from datetime import datetime
5 from sfa.util.faults import SliverDoesNotExist, UnknownSfaType
6 from sfa.util.sfalogging import logger
8 from sfa.storage.alchemy import dbsession
9 from sfa.storage.model import RegRecord, RegUser
11 from sfa.trust.credential import Credential
14 from sfa.managers.driver import Driver
15 from sfa.rspecs.version_manager import VersionManager
16 from sfa.rspecs.rspec import RSpec
18 from sfa.util.xrn import hrn_to_urn
21 ## thierry: everything that is API-related (i.e. handling incoming requests)
23 # SlabDriver should be really only about talking to the senslab testbed
26 from sfa.senslab.OARrestapi import OARrestapi
27 from sfa.senslab.LDAPapi import LDAPapi
29 from sfa.senslab.slabpostgres import SlabDB, slab_dbsession, SliceSenslab
31 from sfa.senslab.slabaggregate import SlabAggregate, slab_xrn_to_hostname, \
33 from sfa.senslab.slabslices import SlabSlices
38 # this inheritance scheme is so that the driver object can receive
39 # GetNodes or GetSites sorts of calls directly
40 # and thus minimize the differences in the managers with the pl version
41 class SlabDriver(Driver):
42 """ Senslab Driver class inherited from Driver generic class.
44 Contains methods compliant with the SFA standard and the testbed
45 infrastructure (calls to LDAP and OAR).
47 def __init__(self, config):
48 Driver.__init__ (self, config)
50 self.hrn = config.SFA_INTERFACE_HRN
51 self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
52 self.oar = OARrestapi()
54 self.time_format = "%Y-%m-%d %H:%M:%S"
55 self.db = SlabDB(config, debug = True)
59 def sliver_status(self, slice_urn, slice_hrn):
60 """Receive a status request for slice named urn/hrn
61 urn:publicid:IDN+senslab+nturro_slice hrn senslab.nturro_slice
62 shall return a structure as described in
63 http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
64 NT : not sure if we should implement this or not, but used by sface.
68 #First get the slice with the slice hrn
69 slice_list = self.GetSlices(slice_filter = slice_hrn, \
70 slice_filter_type = 'slice_hrn')
72 if len(slice_list) is 0:
73 raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
75 #Slice has the same slice hrn for each slice in the slice/lease list
76 #So fetch the info on the user once
77 one_slice = slice_list[0]
78 recuser = dbsession.query(RegRecord).filter_by(record_id = \
79 one_slice['record_id_user']).first()
81 #Make a list of all the nodes hostnames in use for this slice
84 for node in sl['node_ids']:
85 slice_nodes_list.append(node['hostname'])
87 #Get all the corresponding nodes details
88 nodes_all = self.GetNodes({'hostname':slice_nodes_list},
89 ['node_id', 'hostname','site','boot_state'])
90 nodeall_byhostname = dict([(n['hostname'], n) for n in nodes_all])
97 top_level_status = 'empty'
100 ['geni_urn','pl_login','geni_status','geni_resources'], None)
101 result['pl_login'] = recuser.hrn
102 logger.debug("Slabdriver - sliver_status Sliver status \
103 urn %s hrn %s sl %s \r\n " \
104 %(slice_urn, slice_hrn, sl))
106 nodes_in_slice = sl['node_ids']
109 result['geni_status'] = top_level_status
110 result['geni_resources'] = []
113 top_level_status = 'ready'
115 #A job is running on Senslab for this slice
116 # report about the local nodes that are in the slice only
118 result['geni_urn'] = slice_urn
122 #timestamp = float(sl['startTime']) + float(sl['walltime'])
123 #result['pl_expires'] = strftime(self.time_format, \
124 #gmtime(float(timestamp)))
125 #result['slab_expires'] = strftime(self.time_format,\
126 #gmtime(float(timestamp)))
129 for node in sl['node_ids']:
131 #res['slab_hostname'] = node['hostname']
132 #res['slab_boot_state'] = node['boot_state']
134 res['pl_hostname'] = node['hostname']
135 res['pl_boot_state'] = \
136 nodeall_byhostname[node['hostname']]['boot_state']
137 #res['pl_last_contact'] = strftime(self.time_format, \
138 #gmtime(float(timestamp)))
139 sliver_id = Xrn(slice_urn, type='slice', \
140 id=nodeall_byhostname[node['hostname']]['node_id'], \
141 authority=self.hrn).urn
143 res['geni_urn'] = sliver_id
144 if nodeall_byhostname[node['hostname']]['boot_state'] == 'Alive':
146 res['geni_status'] = 'ready'
148 res['geni_status'] = 'failed'
149 top_level_status = 'failed'
151 res['geni_error'] = ''
153 resources.append(res)
155 result['geni_status'] = top_level_status
156 result['geni_resources'] = resources
157 logger.debug("SLABDRIVER \tsliver_statusresources %s res %s "\
162 def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, \
164 aggregate = SlabAggregate(self)
166 slices = SlabSlices(self)
167 peer = slices.get_peer(slice_hrn)
168 sfa_peer = slices.get_sfa_peer(slice_hrn)
171 if not isinstance(creds, list):
175 slice_record = users[0].get('slice_record', {})
178 rspec = RSpec(rspec_string)
179 logger.debug("SLABDRIVER.PY \t create_sliver \tr spec.version \
180 %s slice_record %s " \
181 %(rspec.version,slice_record))
183 # ensure site record exists?
184 # ensure slice record exists
185 #Removed options to verify_slice SA 14/08/12
186 sfa_slice = slices.verify_slice(slice_hrn, slice_record, peer, \
189 #requested_attributes returned by rspec.version.get_slice_attributes()
190 #unused, removed SA 13/08/12
191 rspec.version.get_slice_attributes()
193 logger.debug("SLABDRIVER.PY create_sliver slice %s " %(sfa_slice))
195 # ensure person records exists
196 #verify_persons returns added persons but since the return value
198 slices.verify_persons(slice_hrn, sfa_slice, users, peer, \
199 sfa_peer, options=options)
203 # add/remove slice from nodes
205 requested_slivers = [node.get('component_name') \
206 for node in rspec.version.get_nodes_with_slivers()]
207 l = [ node for node in rspec.version.get_nodes_with_slivers() ]
208 logger.debug("SLADRIVER \tcreate_sliver requested_slivers \
209 requested_slivers %s listnodes %s" \
210 %(requested_slivers,l))
211 #verify_slice_nodes returns nodes, but unused here. Removed SA 13/08/12.
212 #slices.verify_slice_nodes(sfa_slice, requested_slivers, peer)
215 requested_lease_list = []
217 logger.debug("SLABDRIVER.PY \tcreate_sliver AVANTLEASE " )
218 rspec_requested_leases = rspec.version.get_leases()
219 for lease in rspec.version.get_leases():
220 single_requested_lease = {}
221 logger.debug("SLABDRIVER.PY \tcreate_sliver lease %s " %(lease))
222 if not lease.get('lease_id'):
223 single_requested_lease['hostname'] = \
224 slab_xrn_to_hostname(\
225 lease.get('component_id').strip())
226 single_requested_lease['start_time'] = lease.get('start_time')
227 single_requested_lease['duration'] = lease.get('duration')
229 if single_requested_lease.get('hostname'):
230 requested_lease_list.append(single_requested_lease)
232 logger.debug("SLABDRIVER.PY \tcreate_sliver APRESLEASE" )
233 #dCreate dict of leases by start_time, regrouping nodes reserved
235 #time, for the same amount of time = one job on OAR
236 requested_job_dict = {}
237 for lease in requested_lease_list:
239 #In case it is an asap experiment start_time is empty
240 if lease['start_time'] == '':
241 lease['start_time'] = '0'
243 if lease['start_time'] not in requested_job_dict:
244 if isinstance(lease['hostname'], str):
245 lease['hostname'] = [lease['hostname']]
247 requested_job_dict[lease['start_time']] = lease
250 job_lease = requested_job_dict[lease['start_time']]
251 if lease['duration'] == job_lease['duration'] :
252 job_lease['hostname'].append(lease['hostname'])
257 logger.debug("SLABDRIVER.PY \tcreate_sliver requested_job_dict %s "\
258 %(requested_job_dict))
259 #verify_slice_leases returns the leases , but the return value is unused
260 #here. Removed SA 13/08/12
261 slices.verify_slice_leases(sfa_slice, \
262 requested_job_dict, peer)
264 return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
267 def delete_sliver (self, slice_urn, slice_hrn, creds, options):
269 sfa_slice_list = self.GetSlices(slice_filter = slice_hrn, \
270 slice_filter_type = 'slice_hrn')
272 if not sfa_slice_list:
275 #Delete all in the slice
276 for sfa_slice in sfa_slice_list:
279 logger.debug("SLABDRIVER.PY delete_sliver slice %s" %(sfa_slice))
280 slices = SlabSlices(self)
281 # determine if this is a peer slice
283 peer = slices.get_peer(slice_hrn)
284 #TODO delete_sliver SA : UnBindObjectFromPeer should be
285 #used when there is another
286 #senslab testbed, which is not the case 14/08/12 .
288 logger.debug("SLABDRIVER.PY delete_sliver peer %s" %(peer))
291 self.UnBindObjectFromPeer('slice', \
292 sfa_slice['record_id_slice'], \
294 self.DeleteSliceFromNodes(sfa_slice)
297 self.BindObjectToPeer('slice', \
298 sfa_slice['record_id_slice'], \
299 peer, sfa_slice['peer_slice_id'])
303 def AddSlice(self, slice_record):
304 slab_slice = SliceSenslab( slice_hrn = slice_record['slice_hrn'], \
305 record_id_slice= slice_record['record_id_slice'] , \
306 record_id_user= slice_record['record_id_user'], \
307 peer_authority = slice_record['peer_authority'])
308 logger.debug("SLABDRIVER.PY \tAddSlice slice_record %s slab_slice %s" \
309 %(slice_record,slab_slice))
310 slab_dbsession.add(slab_slice)
311 slab_dbsession.commit()
314 # first 2 args are None in case of resource discovery
315 def list_resources (self, slice_urn, slice_hrn, creds, options):
316 #cached_requested = options.get('cached', True)
318 version_manager = VersionManager()
319 # get the rspec's return format from options
321 version_manager.get_version(options.get('geni_rspec_version'))
322 version_string = "rspec_%s" % (rspec_version)
324 #panos adding the info option to the caching key (can be improved)
325 if options.get('info'):
326 version_string = version_string + "_" + \
327 options.get('info', 'default')
329 # look in cache first
330 #if cached_requested and self.cache and not slice_hrn:
331 #rspec = self.cache.get(version_string)
333 #logger.debug("SlabDriver.ListResources: \
334 #returning cached advertisement")
337 #panos: passing user-defined options
338 aggregate = SlabAggregate(self)
339 origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
340 options.update({'origin_hrn':origin_hrn})
341 rspec = aggregate.get_rspec(slice_xrn=slice_urn, \
342 version=rspec_version, options=options)
345 #if self.cache and not slice_hrn:
346 #logger.debug("Slab.ListResources: stores advertisement in cache")
347 #self.cache.add(version_string, rspec)
352 def list_slices (self, creds, options):
353 # look in cache first
355 #slices = self.cache.get('slices')
357 #logger.debug("PlDriver.list_slices returns from cache")
362 slices = self.GetSlices()
363 logger.debug("SLABDRIVER.PY \tlist_slices hrn %s \r\n \r\n" %(slices))
364 slice_hrns = [slab_slice['slice_hrn'] for slab_slice in slices]
365 #slice_hrns = [slicename_to_hrn(self.hrn, slab_slice['slice_hrn']) \
366 #for slab_slice in slices]
367 slice_urns = [hrn_to_urn(slice_hrn, 'slice') \
368 for slice_hrn in slice_hrns]
372 #logger.debug ("SlabDriver.list_slices stores value in cache")
373 #self.cache.add('slices', slice_urns)
378 def register (self, sfa_record, hrn, pub_key):
380 Adding new user, slice, node or site should not be handled
384 Adding users = LDAP Senslab
385 Adding slice = Import from LDAP users
390 #No site or node record update allowed
391 def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
392 pointer = old_sfa_record['pointer']
393 old_sfa_record_type = old_sfa_record['type']
395 # new_key implemented for users only
396 if new_key and old_sfa_record_type not in [ 'user' ]:
397 raise UnknownSfaType(old_sfa_record_type)
399 #if (type == "authority"):
400 #self.shell.UpdateSite(pointer, new_sfa_record)
402 if old_sfa_record_type == "slice":
403 slab_record = self.sfa_fields_to_slab_fields(old_sfa_record_type, \
405 if 'name' in slab_record:
406 slab_record.pop('name')
407 #Prototype should be UpdateSlice(self,
408 #auth, slice_id_or_name, slice_fields)
409 #Senslab cannot update slice since slice = job
410 #so we must delete and create another job
411 self.UpdateSlice(pointer, slab_record)
413 elif old_sfa_record_type == "user":
415 all_fields = new_sfa_record
416 for key in all_fields.keys():
417 if key in ['first_name', 'last_name', 'title', 'email',
418 'password', 'phone', 'url', 'bio', 'accepted_aup',
420 update_fields[key] = all_fields[key]
421 self.UpdatePerson(pointer, update_fields)
424 # must check this key against the previous one if it exists
425 persons = self.GetPersons([pointer], ['key_ids'])
427 keys = person['key_ids']
428 keys = self.GetKeys(person['key_ids'])
430 # Delete all stale keys
433 if new_key != key['key']:
434 self.DeleteKey(key['key_id'])
438 self.AddPersonKey(pointer, {'key_type': 'ssh', \
445 def remove (self, sfa_record):
446 sfa_record_type = sfa_record['type']
447 hrn = sfa_record['hrn']
448 if sfa_record_type == 'user':
450 #get user from senslab ldap
451 person = self.GetPersons(sfa_record)
452 #No registering at a given site in Senslab.
453 #Once registered to the LDAP, all senslab sites are
456 #Mark account as disabled in ldap
457 self.DeletePerson(sfa_record)
458 elif sfa_record_type == 'slice':
459 if self.GetSlices(slice_filter = hrn, \
460 slice_filter_type = 'slice_hrn'):
461 self.DeleteSlice(sfa_record)
463 #elif type == 'authority':
464 #if self.GetSites(pointer):
465 #self.DeleteSite(pointer)
471 #TODO clean GetPeers. 05/07/12SA
472 def GetPeers (self, auth = None, peer_filter=None, return_fields_list=None):
474 existing_records = {}
475 existing_hrns_by_types = {}
476 logger.debug("SLABDRIVER \tGetPeers auth = %s, peer_filter %s, \
477 return_field %s " %(auth , peer_filter, return_fields_list))
478 all_records = dbsession.query(RegRecord).filter(RegRecord.type.like('%authority%')).all()
479 for record in all_records:
480 existing_records[(record.hrn, record.type)] = record
481 if record.type not in existing_hrns_by_types:
482 existing_hrns_by_types[record.type] = [record.hrn]
483 logger.debug("SLABDRIVER \tGetPeer\t NOT IN \
484 existing_hrns_by_types %s " %( existing_hrns_by_types))
487 logger.debug("SLABDRIVER \tGetPeer\t \INNN type %s hrn %s " \
488 %(record.type,record.hrn))
489 existing_hrns_by_types[record.type].append(record.hrn)
492 logger.debug("SLABDRIVER \tGetPeer\texisting_hrns_by_types %s "\
493 %( existing_hrns_by_types))
498 records_list.append(existing_records[(peer_filter,'authority')])
500 for hrn in existing_hrns_by_types['authority']:
501 records_list.append(existing_records[(hrn,'authority')])
503 logger.debug("SLABDRIVER \tGetPeer \trecords_list %s " \
509 return_records = records_list
510 if not peer_filter and not return_fields_list:
514 logger.debug("SLABDRIVER \tGetPeer return_records %s " \
516 return return_records
519 #TODO : Handling OR request in make_ldap_filters_from_records
520 #instead of the for loop
521 #over the records' list
522 def GetPersons(self, person_filter=None):
524 person_filter should be a list of dictionnaries when not set to None.
525 Returns a list of users whose accounts are enabled found in ldap.
528 logger.debug("SLABDRIVER \tGetPersons person_filter %s" \
531 if person_filter and isinstance(person_filter, list):
532 #If we are looking for a list of users (list of dict records)
533 #Usually the list contains only one user record
534 for searched_attributes in person_filter:
536 #Get only enabled user accounts in senslab LDAP :
537 #add a filter for make_ldap_filters_from_record
538 person = self.ldap.LdapFindUser(searched_attributes, \
539 is_user_enabled=True)
540 person_list.append(person)
543 #Get only enabled user accounts in senslab LDAP :
544 #add a filter for make_ldap_filters_from_record
545 person_list = self.ldap.LdapFindUser(is_user_enabled=True)
549 def GetTimezone(self):
550 server_timestamp, server_tz = self.oar.parser.\
551 SendRequest("GET_timezone")
552 return server_timestamp, server_tz
555 def DeleteJobs(self, job_id, slice_hrn):
556 if not job_id or job_id is -1:
558 username = slice_hrn.split(".")[-1].rstrip("_slice")
560 reqdict['method'] = "delete"
561 reqdict['strval'] = str(job_id)
564 answer = self.oar.POSTRequestToOARRestAPI('DELETE_jobs_id', \
566 logger.debug("SLABDRIVER \tDeleteJobs jobid %s \r\n answer %s \
567 username %s" %(job_id,answer, username))
572 ##TODO : Unused GetJobsId ? SA 05/07/12
573 #def GetJobsId(self, job_id, username = None ):
575 #Details about a specific job.
576 #Includes details about submission time, jot type, state, events,
577 #owner, assigned ressources, walltime etc...
581 #node_list_k = 'assigned_network_address'
582 ##Get job info from OAR
583 #job_info = self.oar.parser.SendRequest(req, job_id, username)
585 #logger.debug("SLABDRIVER \t GetJobsId %s " %(job_info))
587 #if job_info['state'] == 'Terminated':
588 #logger.debug("SLABDRIVER \t GetJobsId job %s TERMINATED"\
591 #if job_info['state'] == 'Error':
592 #logger.debug("SLABDRIVER \t GetJobsId ERROR message %s "\
597 #logger.error("SLABDRIVER \tGetJobsId KeyError")
600 #parsed_job_info = self.get_info_on_reserved_nodes(job_info, \
602 ##Replaces the previous entry
603 ##"assigned_network_address" / "reserved_resources"
605 #job_info.update({'node_ids':parsed_job_info[node_list_k]})
606 #del job_info[node_list_k]
607 #logger.debug(" \r\nSLABDRIVER \t GetJobsId job_info %s " %(job_info))
611 def GetJobsResources(self, job_id, username = None):
612 #job_resources=['reserved_resources', 'assigned_resources',\
613 #'job_id', 'job_uri', 'assigned_nodes',\
615 #assigned_res = ['resource_id', 'resource_uri']
616 #assigned_n = ['node', 'node_uri']
618 req = "GET_jobs_id_resources"
621 #Get job resources list from OAR
622 node_id_list = self.oar.parser.SendRequest(req, job_id, username)
623 logger.debug("SLABDRIVER \t GetJobsResources %s " %(node_id_list))
626 self.__get_hostnames_from_oar_node_ids(node_id_list)
629 #Replaces the previous entry "assigned_network_address" /
630 #"reserved_resources"
632 job_info = {'node_ids': hostname_list}
637 def get_info_on_reserved_nodes(self, job_info, node_list_name):
638 #Get the list of the testbed nodes records and make a
639 #dictionnary keyed on the hostname out of it
640 node_list_dict = self.GetNodes()
641 #node_hostname_list = []
642 node_hostname_list = [node['hostname'] for node in node_list_dict]
643 #for node in node_list_dict:
644 #node_hostname_list.append(node['hostname'])
645 node_dict = dict(zip(node_hostname_list, node_list_dict))
647 reserved_node_hostname_list = []
648 for index in range(len(job_info[node_list_name])):
649 #job_info[node_list_name][k] =
650 reserved_node_hostname_list[index] = \
651 node_dict[job_info[node_list_name][index]]['hostname']
653 logger.debug("SLABDRIVER \t get_info_on_reserved_nodes \
654 reserved_node_hostname_list %s" \
655 %(reserved_node_hostname_list))
657 logger.error("SLABDRIVER \t get_info_on_reserved_nodes KEYERROR " )
659 return reserved_node_hostname_list
661 def GetNodesCurrentlyInUse(self):
662 """Returns a list of all the nodes already involved in an oar job"""
663 return self.oar.parser.SendRequest("GET_running_jobs")
665 def __get_hostnames_from_oar_node_ids(self, resource_id_list ):
666 full_nodes_dict_list = self.GetNodes()
667 #Put the full node list into a dictionary keyed by oar node id
668 oar_id_node_dict = {}
669 for node in full_nodes_dict_list:
670 oar_id_node_dict[node['oar_id']] = node
672 #logger.debug("SLABDRIVER \t __get_hostnames_from_oar_node_ids\
673 #oar_id_node_dict %s" %(oar_id_node_dict))
675 hostname_dict_list = []
676 for resource_id in resource_id_list:
677 #Because jobs requested "asap" do not have defined resources
678 if resource_id is not "Undefined":
679 hostname_dict_list.append(\
680 oar_id_node_dict[resource_id]['hostname'])
682 #hostname_list.append(oar_id_node_dict[resource_id]['hostname'])
683 return hostname_dict_list
685 def GetReservedNodes(self,username = None):
686 #Get the nodes in use and the reserved nodes
687 reservation_dict_list = \
688 self.oar.parser.SendRequest("GET_reserved_nodes", username = username)
691 for resa in reservation_dict_list:
692 logger.debug ("GetReservedNodes resa %s"%(resa))
693 #dict list of hostnames and their site
694 resa['reserved_nodes'] = \
695 self.__get_hostnames_from_oar_node_ids(resa['resource_ids'])
697 #del resa['resource_ids']
698 return reservation_dict_list
700 def GetNodes(self, node_filter_dict = None, return_fields_list = None):
702 node_filter_dict : dictionnary of lists
705 node_dict_by_id = self.oar.parser.SendRequest("GET_resources_full")
706 node_dict_list = node_dict_by_id.values()
707 logger.debug (" SLABDRIVER GetNodes node_filter_dict %s return_fields_list %s "%(node_filter_dict,return_fields_list))
708 #No filtering needed return the list directly
709 if not (node_filter_dict or return_fields_list):
710 return node_dict_list
712 return_node_list = []
714 for filter_key in node_filter_dict:
716 #Filter the node_dict_list by each value contained in the
717 #list node_filter_dict[filter_key]
718 for value in node_filter_dict[filter_key]:
719 for node in node_dict_list:
720 if node[filter_key] == value:
721 if return_fields_list :
723 for k in return_fields_list:
725 return_node_list.append(tmp)
727 return_node_list.append(node)
729 logger.log_exc("GetNodes KeyError")
733 return return_node_list
736 def GetSites(self, site_filter_name_list = None, return_fields_list = None):
737 site_dict = self.oar.parser.SendRequest("GET_sites")
738 #site_dict : dict where the key is the sit ename
739 return_site_list = []
740 if not ( site_filter_name_list or return_fields_list):
741 return_site_list = site_dict.values()
742 return return_site_list
744 for site_filter_name in site_filter_name_list:
745 if site_filter_name in site_dict:
746 if return_fields_list:
747 for field in return_fields_list:
750 tmp[field] = site_dict[site_filter_name][field]
752 logger.error("GetSites KeyError %s "%(field))
754 return_site_list.append(tmp)
756 return_site_list.append( site_dict[site_filter_name])
759 return return_site_list
763 def GetSlices(self, slice_filter = None, slice_filter_type = None):
764 #def GetSlices(self, slice_filter = None, slice_filter_type = None, \
765 #return_fields_list = None):
766 """ Get the slice records from the slab db.
767 Returns a slice ditc if slice_filter and slice_filter_type
769 Returns a list of slice dictionnaries if there are no filters
774 return_slice_list = []
777 authorized_filter_types_list = ['slice_hrn', 'record_id_user']
778 slicerec_dictlist = []
781 if slice_filter_type in authorized_filter_types_list:
784 def __get_slice_records(slice_filter = None, slice_filter_type = None):
787 #Get list of slices based on the slice hrn
788 if slice_filter_type == 'slice_hrn':
790 login = slice_filter.split(".")[1].split("_")[0]
792 #DO NOT USE RegSlice - reg_researchers to get the hrn of the user
793 #otherwise will mess up the RegRecord in Resolve, don't know
796 #Only one entry for one user = one slice in slice_senslab table
797 slicerec = slab_dbsession.query(SliceSenslab).filter_by(slice_hrn = slice_filter).first()
799 #Get slice based on user id
800 if slice_filter_type == 'record_id_user':
801 slicerec = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = slice_filter).first()
806 fixed_slicerec_dict = slicerec.dump_sqlalchemyobj_to_dict()
809 login = fixed_slicerec_dict['slice_hrn'].split(".")[1].split("_")[0]
810 return login, fixed_slicerec_dict
815 login, fixed_slicerec_dict = __get_slice_records(slice_filter, slice_filter_type)
816 logger.debug(" SLABDRIVER \tGetSlices login %s \
818 %(login, fixed_slicerec_dict))
822 #One slice can have multiple jobs
824 leases_list = self.GetReservedNodes(username = login)
825 #If no job is running or no job scheduled
826 if leases_list == [] :
827 return [fixed_slicerec_dict]
829 #Several jobs for one slice
830 for lease in leases_list :
834 #Check with OAR the status of the job if a job id is in
839 slicerec_dict['oar_job_id'] = lease['lease_id']
841 #for reserved_node in lease['reserved_nodes']:
842 #reserved_list.append(reserved_node['hostname'])
843 reserved_list = lease['reserved_nodes']
844 #slicerec_dict.update({'node_ids':[lease['reserved_nodes'][n]['hostname'] for n in lease['reserved_nodes']]})
845 slicerec_dict.update({'list_node_ids':{'hostname':reserved_list}})
846 slicerec_dict.update({'node_ids':lease['reserved_nodes']})
847 #If the slice does not belong to senslab:
848 if fixed_slicerec_dict:
849 slicerec_dict.update(fixed_slicerec_dict)
850 slicerec_dict.update({'hrn':\
851 str(fixed_slicerec_dict['slice_hrn'])})
854 slicerec_dictlist.append(slicerec_dict)
855 logger.debug("SLABDRIVER.PY \tGetSlices slicerec_dict %s slicerec_dictlist %s lease['reserved_nodes'] %s" %(slicerec_dict, slicerec_dictlist,lease['reserved_nodes'] ))
857 logger.debug("SLABDRIVER.PY \tGetSlices RETURN slicerec_dictlist %s"\
858 %(slicerec_dictlist))
860 return slicerec_dictlist
865 slice_list = slab_dbsession.query(SliceSenslab).all()
866 leases_list = self.GetReservedNodes()
869 slicerec_dictlist = []
870 return_slice_list = []
871 for record in slice_list:
872 return_slice_list.append(record.dump_sqlalchemyobj_to_dict())
874 for fixed_slicerec_dict in return_slice_list:
876 owner = fixed_slicerec_dict['slice_hrn'].split(".")[1].split("_")[0]
877 for lease in leases_list:
878 if owner == lease['user']:
879 slicerec_dict['oar_job_id'] = lease['lease_id']
882 #for reserved_node in lease['reserved_nodes']:
883 logger.debug("SLABDRIVER.PY \tGetSlices lease %s " %(lease ))
884 #reserved_list.append(reserved_node['hostname'])
885 reserved_list.extend(lease['reserved_nodes'])
886 #slicerec_dict.update({'node_ids':{'hostname':reserved_list}})
887 #slicerec_dict.update({'node_ids':[lease['reserved_nodes'][n]['hostname'] for n in lease['reserved_nodes']]})
888 slicerec_dict.update({'node_ids':lease['reserved_nodes']})
889 slicerec_dict.update({'list_node_ids':{'hostname':reserved_list}})
890 slicerec_dict.update(fixed_slicerec_dict)
891 slicerec_dict.update({'hrn':\
892 str(fixed_slicerec_dict['slice_hrn'])})
893 slicerec_dictlist.append(slicerec_dict)
895 logger.debug("SLABDRIVER.PY \tGetSlices RETURN slices %s \
896 slice_filter %s " %(return_slice_list, slice_filter))
898 #if return_fields_list:
899 #return_slice_list = parse_filter(sliceslist, \
900 #slice_filter,'slice', return_fields_list)
902 return slicerec_dictlist
905 def testbed_name (self): return self.hrn
907 # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
908 def aggregate_version (self):
909 version_manager = VersionManager()
910 ad_rspec_versions = []
911 request_rspec_versions = []
912 for rspec_version in version_manager.versions:
913 if rspec_version.content_type in ['*', 'ad']:
914 ad_rspec_versions.append(rspec_version.to_dict())
915 if rspec_version.content_type in ['*', 'request']:
916 request_rspec_versions.append(rspec_version.to_dict())
918 'testbed':self.testbed_name(),
919 'geni_request_rspec_versions': request_rspec_versions,
920 'geni_ad_rspec_versions': ad_rspec_versions,
929 # Convert SFA fields to PLC fields for use when registering up updating
930 # registry record in the PLC database
932 # @param type type of record (user, slice, ...)
933 # @param hrn human readable name
934 # @param sfa_fields dictionary of SFA fields
935 # @param slab_fields dictionary of PLC fields (output)
937 def sfa_fields_to_slab_fields(self, sfa_type, hrn, record):
941 #for field in record:
942 # slab_record[field] = record[field]
944 if sfa_type == "slice":
945 #instantion used in get_slivers ?
946 if not "instantiation" in slab_record:
947 slab_record["instantiation"] = "senslab-instantiated"
948 #slab_record["hrn"] = hrn_to_pl_slicename(hrn)
949 #Unused hrn_to_pl_slicename because Slab's hrn already in the appropriate form SA 23/07/12
950 slab_record["hrn"] = hrn
951 logger.debug("SLABDRIVER.PY sfa_fields_to_slab_fields \
952 slab_record %s " %(slab_record['hrn']))
954 slab_record["url"] = record["url"]
955 if "description" in record:
956 slab_record["description"] = record["description"]
957 if "expires" in record:
958 slab_record["expires"] = int(record["expires"])
960 #nodes added by OAR only and then imported to SFA
961 #elif type == "node":
962 #if not "hostname" in slab_record:
963 #if not "hostname" in record:
964 #raise MissingSfaInfo("hostname")
965 #slab_record["hostname"] = record["hostname"]
966 #if not "model" in slab_record:
967 #slab_record["model"] = "geni"
970 #elif type == "authority":
971 #slab_record["login_base"] = hrn_to_slab_login_base(hrn)
973 #if not "name" in slab_record:
974 #slab_record["name"] = hrn
976 #if not "abbreviated_name" in slab_record:
977 #slab_record["abbreviated_name"] = hrn
979 #if not "enabled" in slab_record:
980 #slab_record["enabled"] = True
982 #if not "is_public" in slab_record:
983 #slab_record["is_public"] = True
990 def __transforms_timestamp_into_date(self, xp_utc_timestamp = None):
991 """ Transforms unix timestamp into valid OAR date format """
993 #Used in case of a scheduled experiment (not immediate)
994 #To run an XP immediately, don't specify date and time in RSpec
995 #They will be set to None.
997 #transform the xp_utc_timestamp into server readable time
998 xp_server_readable_date = datetime.fromtimestamp(int(\
999 xp_utc_timestamp)).strftime(self.time_format)
1001 return xp_server_readable_date
1009 def LaunchExperimentOnOAR(self, added_nodes, slice_name, \
1010 lease_start_time, lease_duration, slice_user=None):
1012 lease_dict['lease_start_time'] = lease_start_time
1013 lease_dict['lease_duration'] = lease_duration
1014 lease_dict['added_nodes'] = added_nodes
1015 lease_dict['slice_name'] = slice_name
1016 lease_dict['slice_user'] = slice_user
1017 lease_dict['grain'] = self.GetLeaseGranularity()
1018 lease_dict['time_format'] = self.time_format
1020 def __create_job_structure_request_for_OAR(lease_dict):
1021 """ Creates the structure needed for a correct POST on OAR.
1022 Makes the timestamp transformation into the appropriate format.
1023 Sends the POST request to create the job with the resources in
1032 reqdict['workdir'] = '/tmp'
1033 reqdict['resource'] = "{network_address in ("
1035 for node in lease_dict['added_nodes']:
1036 logger.debug("\r\n \r\n OARrestapi \t __create_job_structure_request_for_OAR \
1039 # Get the ID of the node
1041 reqdict['resource'] += "'" + nodeid + "', "
1042 nodeid_list.append(nodeid)
1044 custom_length = len(reqdict['resource'])- 2
1045 reqdict['resource'] = reqdict['resource'][0:custom_length] + \
1046 ")}/nodes=" + str(len(nodeid_list))
1048 def __process_walltime(duration):
1049 """ Calculates the walltime in seconds from the duration in H:M:S
1050 specified in the RSpec.
1054 # Fixing the walltime by adding a few delays.
1055 # First put the walltime in seconds oarAdditionalDelay = 20;
1056 # additional delay for /bin/sleep command to
1057 # take in account prologue and epilogue scripts execution
1058 # int walltimeAdditionalDelay = 240; additional delay
1059 desired_walltime = duration
1060 total_walltime = desired_walltime + 240 #+4 min Update SA 23/10/12
1061 sleep_walltime = desired_walltime # 0 sec added Update SA 23/10/12
1063 #Put the walltime back in str form
1064 #First get the hours
1065 walltime.append(str(total_walltime / 3600))
1066 total_walltime = total_walltime - 3600 * int(walltime[0])
1067 #Get the remaining minutes
1068 walltime.append(str(total_walltime / 60))
1069 total_walltime = total_walltime - 60 * int(walltime[1])
1071 walltime.append(str(total_walltime))
1074 logger.log_exc(" __process_walltime duration null")
1076 return walltime, sleep_walltime
1079 walltime, sleep_walltime = \
1080 __process_walltime(int(lease_dict['lease_duration'])*lease_dict['grain'])
1083 reqdict['resource'] += ",walltime=" + str(walltime[0]) + \
1084 ":" + str(walltime[1]) + ":" + str(walltime[2])
1085 reqdict['script_path'] = "/bin/sleep " + str(sleep_walltime)
1087 #In case of a scheduled experiment (not immediate)
1088 #To run an XP immediately, don't specify date and time in RSpec
1089 #They will be set to None.
1090 if lease_dict['lease_start_time'] is not '0':
1091 #Readable time accepted by OAR
1092 start_time = datetime.fromtimestamp(int(lease_dict['lease_start_time'])).\
1093 strftime(lease_dict['time_format'])
1094 reqdict['reservation'] = start_time
1095 #If there is not start time, Immediate XP. No need to add special
1099 reqdict['type'] = "deploy"
1100 reqdict['directory'] = ""
1101 reqdict['name'] = "SFA_" + lease_dict['slice_user']
1106 #Create the request for OAR
1107 reqdict = __create_job_structure_request_for_OAR(lease_dict)
1108 # first step : start the OAR job and update the job
1109 logger.debug("SLABDRIVER.PY \tLaunchExperimentOnOAR reqdict %s\
1112 answer = self.oar.POSTRequestToOARRestAPI('POST_job', \
1113 reqdict, slice_user)
1114 logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid %s " %(answer))
1116 jobid = answer['id']
1118 logger.log_exc("SLABDRIVER \tLaunchExperimentOnOAR \
1119 Impossible to create job %s " %(answer))
1123 def __configure_experiment(jobid, added_nodes):
1124 # second step : configure the experiment
1125 # we need to store the nodes in a yaml (well...) file like this :
1126 # [1,56,23,14,45,75] with name /tmp/sfa<jobid>.json
1127 job_file = open('/tmp/sfa/'+ str(jobid) + '.json', 'w')
1129 job_file.write(str(added_nodes[0].strip('node')))
1130 for node in added_nodes[1:len(added_nodes)] :
1131 job_file.write(', '+ node.strip('node'))
1136 def __launch_senslab_experiment(jobid):
1137 # third step : call the senslab-experiment wrapper
1138 #command= "java -jar target/sfa-1.0-jar-with-dependencies.jar
1139 # "+str(jobid)+" "+slice_user
1140 javacmdline = "/usr/bin/java"
1142 "/opt/senslabexperimentwrapper/sfa-1.0-jar-with-dependencies.jar"
1143 #ret=subprocess.check_output(["/usr/bin/java", "-jar", ", \
1144 #str(jobid), slice_user])
1145 output = subprocess.Popen([javacmdline, "-jar", jarname, str(jobid), \
1146 slice_user],stdout=subprocess.PIPE).communicate()[0]
1148 logger.debug("SLABDRIVER \t __configure_experiment wrapper returns%s " \
1155 logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid %s \
1156 added_nodes %s slice_user %s" %(jobid, added_nodes, slice_user))
1159 __configure_experiment(jobid, added_nodes)
1160 __launch_senslab_experiment(jobid)
1164 def AddLeases(self, hostname_list, slice_record, lease_start_time, lease_duration):
1165 logger.debug("SLABDRIVER \r\n \r\n \t AddLeases hostname_list %s \
1166 slice_record %s lease_start_time %s lease_duration %s "\
1167 %( hostname_list, slice_record , lease_start_time, \
1170 tmp = slice_record['reg-researchers'][0].split(".")
1171 username = tmp[(len(tmp)-1)]
1172 self.LaunchExperimentOnOAR(hostname_list, slice_record['slice_hrn'], lease_start_time, lease_duration, username)
1173 start_time = datetime.fromtimestamp(int(lease_start_time)).strftime(self.time_format)
1174 logger.debug("SLABDRIVER \t AddLeases hostname_list start_time %s " %(start_time))
1179 #Delete the jobs from job_senslab table
1180 def DeleteSliceFromNodes(self, slice_record):
1182 self.DeleteJobs(slice_record['oar_job_id'], slice_record['hrn'])
1186 def GetLeaseGranularity(self):
1187 """ Returns the granularity of Senslab testbed.
1188 OAR returns seconds for experiments duration.
1189 Defined in seconds. """
1194 def GetLeases(self, lease_filter_dict=None):
1195 unfiltered_reservation_list = self.GetReservedNodes()
1197 ##Synchronize slice_table of sfa senslab db
1198 #self.synchronize_oar_and_slice_table(unfiltered_reservation_list)
1200 reservation_list = []
1201 #Find the slice associated with this user senslab ldap uid
1202 logger.debug(" SLABDRIVER.PY \tGetLeases ")
1203 #Create user dict first to avoir looking several times for
1204 #the same user in LDAP SA 27/07/12
1206 for resa in unfiltered_reservation_list:
1207 logger.debug("SLABDRIVER \tGetLeases USER %s"\
1209 if resa['user'] not in resa_user_dict:
1210 logger.debug("SLABDRIVER \tGetLeases userNOTIN ")
1211 ldap_info = self.ldap.LdapSearch('(uid='+resa['user']+')')
1212 ldap_info = ldap_info[0][1]
1213 user = dbsession.query(RegUser).filter_by(email = \
1214 ldap_info['mail'][0]).first()
1215 #Separated in case user not in database : record_id not defined SA 17/07//12
1216 query_slice_info = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = user.record_id)
1217 if query_slice_info:
1218 slice_info = query_slice_info.first()
1222 resa_user_dict[resa['user']] = {}
1223 resa_user_dict[resa['user']]['ldap_info'] = user
1224 resa_user_dict[resa['user']]['slice_info'] = slice_info
1226 logger.debug("SLABDRIVER \tGetLeases resa_user_dict %s"\
1228 for resa in unfiltered_reservation_list:
1232 resa['slice_hrn'] = resa_user_dict[resa['user']]['slice_info'].slice_hrn
1233 resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
1235 #resa['slice_id'] = hrn_to_urn(slice_info.slice_hrn, 'slice')
1236 resa['component_id_list'] = []
1237 #Transform the hostnames into urns (component ids)
1238 for node in resa['reserved_nodes']:
1239 #resa['component_id_list'].append(hostname_to_urn(self.hrn, \
1240 #self.root_auth, node['hostname']))
1241 slab_xrn = slab_xrn_object(self.root_auth, node)
1242 resa['component_id_list'].append(slab_xrn.urn)
1244 #Filter the reservation list if necessary
1245 #Returns all the leases associated with a given slice
1246 if lease_filter_dict:
1247 logger.debug("SLABDRIVER \tGetLeases lease_filter_dict %s"\
1248 %(lease_filter_dict))
1249 for resa in unfiltered_reservation_list:
1250 if lease_filter_dict['name'] == resa['slice_hrn']:
1251 reservation_list.append(resa)
1253 reservation_list = unfiltered_reservation_list
1255 logger.debug(" SLABDRIVER.PY \tGetLeases reservation_list %s"\
1256 %(reservation_list))
1257 return reservation_list
1259 def augment_records_with_testbed_info (self, sfa_records):
1260 return self.fill_record_info (sfa_records)
1262 def fill_record_info(self, record_list):
1264 Given a SFA record, fill in the senslab specific and SFA specific
1265 fields in the record.
1268 logger.debug("SLABDRIVER \tfill_record_info records %s " %(record_list))
1269 if not isinstance(record_list, list):
1270 record_list = [record_list]
1273 for record in record_list:
1274 #If the record is a SFA slice record, then add information
1275 #about the user of this slice. This kind of
1276 #information is in the Senslab's DB.
1277 if str(record['type']) == 'slice':
1278 #Get slab slice record.
1279 recslice_list = self.GetSlices(slice_filter = \
1280 str(record['hrn']),\
1281 slice_filter_type = 'slice_hrn')
1283 recuser = dbsession.query(RegRecord).filter_by(record_id = \
1284 recslice_list[0]['record_id_user']).first()
1285 logger.debug("SLABDRIVER \tfill_record_info TYPE SLICE RECUSER %s " %(recuser))
1286 record.update({'PI':[recuser.hrn],
1287 'researcher': [recuser.hrn],
1288 'name':record['hrn'],
1291 'person_ids':[recslice_list[0]['record_id_user']],
1292 'geni_urn':'', #For client_helper.py compatibility
1293 'keys':'', #For client_helper.py compatibility
1294 'key_ids':''}) #For client_helper.py compatibility
1297 for rec in recslice_list:
1298 record['oar_job_id'].append(rec['oar_job_id'])
1299 record['node_ids'] = [ self.root_auth + hostname for hostname in rec['node_ids']]
1303 logger.debug( "SLABDRIVER.PY \t fill_record_info SLICE \
1304 recslice_list %s \r\n \t RECORD %s \r\n \r\n" %(recslice_list,record))
1305 if str(record['type']) == 'user':
1306 #The record is a SFA user record.
1307 #Get the information about his slice from Senslab's DB
1308 #and add it to the user record.
1309 recslice_list = self.GetSlices(\
1310 slice_filter = record['record_id'],\
1311 slice_filter_type = 'record_id_user')
1313 logger.debug( "SLABDRIVER.PY \t fill_record_info TYPE USER \
1314 recslice_list %s \r\n \t RECORD %s \r\n" %(recslice_list , record))
1315 #Append slice record in records list,
1316 #therefore fetches user and slice info again(one more loop)
1317 #Will update PIs and researcher for the slice
1318 recuser = dbsession.query(RegRecord).filter_by(record_id = \
1319 recslice_list[0]['record_id_user']).first()
1320 logger.debug( "SLABDRIVER.PY \t fill_record_info USER \
1321 recuser %s \r\n \r\n" %(recuser))
1323 recslice = recslice_list[0]
1324 recslice.update({'PI':[recuser.hrn],
1325 'researcher': [recuser.hrn],
1326 'name':record['hrn'],
1329 'person_ids':[recslice_list[0]['record_id_user']]})
1331 for rec in recslice_list:
1332 recslice['oar_job_id'].append(rec['oar_job_id'])
1336 recslice.update({'type':'slice', \
1337 'hrn':recslice_list[0]['slice_hrn']})
1340 #GetPersons takes [] as filters
1341 #user_slab = self.GetPersons([{'hrn':recuser.hrn}])
1342 user_slab = self.GetPersons([record])
1345 record.update(user_slab[0])
1346 #For client_helper.py compatibility
1347 record.update( { 'geni_urn':'',
1350 record_list.append(recslice)
1352 logger.debug("SLABDRIVER.PY \tfill_record_info ADDING SLICE\
1353 INFO TO USER records %s" %(record_list))
1354 logger.debug("SLABDRIVER.PY \tfill_record_info END \
1355 #record %s \r\n \r\n " %(record))
1357 except TypeError, error:
1358 logger.log_exc("SLABDRIVER \t fill_record_info EXCEPTION %s"\
1360 #logger.debug("SLABDRIVER.PY \t fill_record_info ENDENDEND ")
1364 #self.fill_record_slab_info(records)
1370 #TODO Update membership? update_membership_list SA 05/07/12
1371 #def update_membership_list(self, oldRecord, record, listName, addFunc, \
1373 ## get a list of the HRNs tht are members of the old and new records
1375 #oldList = oldRecord.get(listName, [])
1378 #newList = record.get(listName, [])
1380 ## if the lists are the same, then we don't have to update anything
1381 #if (oldList == newList):
1384 ## build a list of the new person ids, by looking up each person to get
1388 #records = table.find({'type': 'user', 'hrn': newList})
1389 #for rec in records:
1390 #newIdList.append(rec['pointer'])
1392 ## build a list of the old person ids from the person_ids field
1394 #oldIdList = oldRecord.get("person_ids", [])
1395 #containerId = oldRecord.get_pointer()
1397 ## if oldRecord==None, then we are doing a Register, instead of an
1400 #containerId = record.get_pointer()
1402 ## add people who are in the new list, but not the oldList
1403 #for personId in newIdList:
1404 #if not (personId in oldIdList):
1405 #addFunc(self.plauth, personId, containerId)
1407 ## remove people who are in the old list, but not the new list
1408 #for personId in oldIdList:
1409 #if not (personId in newIdList):
1410 #delFunc(self.plauth, personId, containerId)
1412 #def update_membership(self, oldRecord, record):
1414 #if record.type == "slice":
1415 #self.update_membership_list(oldRecord, record, 'researcher',
1416 #self.users.AddPersonToSlice,
1417 #self.users.DeletePersonFromSlice)
1418 #elif record.type == "authority":
1423 # I don't think you plan on running a component manager at this point
1424 # let me clean up the mess of ComponentAPI that is deprecated anyways
1427 #TODO FUNCTIONS SECTION 04/07/2012 SA
1429 #TODO : Is UnBindObjectFromPeer still necessary ? Currently does nothing
1431 def UnBindObjectFromPeer(self, auth, object_type, object_id, shortname):
1432 """ This method is a hopefully temporary hack to let the sfa correctly
1433 detach the objects it creates from a remote peer object. This is
1434 needed so that the sfa federation link can work in parallel with
1435 RefreshPeer, as RefreshPeer depends on remote objects being correctly
1438 auth : struct, API authentication structure
1439 AuthMethod : string, Authentication method to use
1440 object_type : string, Object type, among 'site','person','slice',
1442 object_id : int, object_id
1443 shortname : string, peer shortname
1447 logger.warning("SLABDRIVER \tUnBindObjectFromPeer EMPTY-\
1451 #TODO Is BindObjectToPeer still necessary ? Currently does nothing
1453 def BindObjectToPeer(self, auth, object_type, object_id, shortname=None, \
1454 remote_object_id=None):
1455 """This method is a hopefully temporary hack to let the sfa correctly
1456 attach the objects it creates to a remote peer object. This is needed
1457 so that the sfa federation link can work in parallel with RefreshPeer,
1458 as RefreshPeer depends on remote objects being correctly marked.
1460 shortname : string, peer shortname
1461 remote_object_id : int, remote object_id, set to 0 if unknown
1465 logger.warning("SLABDRIVER \tBindObjectToPeer EMPTY - DO NOTHING \r\n ")
1468 #TODO UpdateSlice 04/07/2012 SA
1469 #Funciton should delete and create another job since oin senslab slice=job
1470 def UpdateSlice(self, auth, slice_id_or_name, slice_fields=None):
1471 """Updates the parameters of an existing slice with the values in
1473 Users may only update slices of which they are members.
1474 PIs may update any of the slices at their sites, or any slices of
1475 which they are members. Admins may update any slice.
1476 Only PIs and admins may update max_nodes. Slices cannot be renewed
1477 (by updating the expires parameter) more than 8 weeks into the future.
1478 Returns 1 if successful, faults otherwise.
1482 logger.warning("SLABDRIVER UpdateSlice EMPTY - DO NOTHING \r\n ")
1485 #TODO UpdatePerson 04/07/2012 SA
1486 def UpdatePerson(self, auth, person_id_or_email, person_fields=None):
1487 """Updates a person. Only the fields specified in person_fields
1488 are updated, all other fields are left untouched.
1489 Users and techs can only update themselves. PIs can only update
1490 themselves and other non-PIs at their sites.
1491 Returns 1 if successful, faults otherwise.
1495 logger.warning("SLABDRIVER UpdatePerson EMPTY - DO NOTHING \r\n ")
1498 #TODO GetKeys 04/07/2012 SA
1499 def GetKeys(self, auth, key_filter=None, return_fields=None):
1500 """Returns an array of structs containing details about keys.
1501 If key_filter is specified and is an array of key identifiers,
1502 or a struct of key attributes, only keys matching the filter
1503 will be returned. If return_fields is specified, only the
1504 specified details will be returned.
1506 Admin may query all keys. Non-admins may only query their own keys.
1510 logger.warning("SLABDRIVER GetKeys EMPTY - DO NOTHING \r\n ")
1513 #TODO DeleteKey 04/07/2012 SA
1514 def DeleteKey(self, auth, key_id):
1516 Non-admins may only delete their own keys.
1517 Returns 1 if successful, faults otherwise.
1521 logger.warning("SLABDRIVER DeleteKey EMPTY - DO NOTHING \r\n ")
1525 #TODO : Check rights to delete person
1526 def DeletePerson(self, auth, person_record):
1527 """ Disable an existing account in senslab LDAP.
1528 Users and techs can only delete themselves. PIs can only
1529 delete themselves and other non-PIs at their sites.
1530 ins can delete anyone.
1531 Returns 1 if successful, faults otherwise.
1535 #Disable user account in senslab LDAP
1536 ret = self.ldap.LdapMarkUserAsDeleted(person_record)
1537 logger.warning("SLABDRIVER DeletePerson %s " %(person_record))
1540 #TODO Check DeleteSlice, check rights 05/07/2012 SA
1541 def DeleteSlice(self, auth, slice_record):
1542 """ Deletes the specified slice.
1543 Senslab : Kill the job associated with the slice if there is one
1544 using DeleteSliceFromNodes.
1545 Updates the slice record in slab db to remove the slice nodes.
1547 Users may only delete slices of which they are members. PIs may
1548 delete any of the slices at their sites, or any slices of which
1549 they are members. Admins may delete any slice.
1550 Returns 1 if successful, faults otherwise.
1554 self.DeleteSliceFromNodes(slice_record)
1555 logger.warning("SLABDRIVER DeleteSlice %s "%(slice_record))
1558 #TODO AddPerson 04/07/2012 SA
1559 #def AddPerson(self, auth, person_fields=None):
1560 def AddPerson(self, record):#TODO fixing 28/08//2012 SA
1561 """Adds a new account. Any fields specified in records are used,
1562 otherwise defaults are used.
1563 Accounts are disabled by default. To enable an account,
1565 Returns the new person_id (> 0) if successful, faults otherwise.
1569 ret = self.ldap.LdapAddUser(record)
1570 logger.warning("SLABDRIVER AddPerson return code %s \r\n ", ret)
1573 #TODO AddPersonToSite 04/07/2012 SA
1574 def AddPersonToSite (self, auth, person_id_or_email, \
1575 site_id_or_login_base=None):
1576 """ Adds the specified person to the specified site. If the person is
1577 already a member of the site, no errors are returned. Does not change
1578 the person's primary site.
1579 Returns 1 if successful, faults otherwise.
1583 logger.warning("SLABDRIVER AddPersonToSite EMPTY - DO NOTHING \r\n ")
1586 #TODO AddRoleToPerson : Not sure if needed in senslab 04/07/2012 SA
1587 def AddRoleToPerson(self, auth, role_id_or_name, person_id_or_email):
1588 """Grants the specified role to the person.
1589 PIs can only grant the tech and user roles to users and techs at their
1590 sites. Admins can grant any role to any user.
1591 Returns 1 if successful, faults otherwise.
1595 logger.warning("SLABDRIVER AddRoleToPerson EMPTY - DO NOTHING \r\n ")
1598 #TODO AddPersonKey 04/07/2012 SA
1599 def AddPersonKey(self, auth, person_id_or_email, key_fields=None):
1600 """Adds a new key to the specified account.
1601 Non-admins can only modify their own keys.
1602 Returns the new key_id (> 0) if successful, faults otherwise.
1606 logger.warning("SLABDRIVER AddPersonKey EMPTY - DO NOTHING \r\n ")
1609 def DeleteLeases(self, leases_id_list, slice_hrn ):
1610 for job_id in leases_id_list:
1611 self.DeleteJobs(job_id, slice_hrn)
1613 logger.debug("SLABDRIVER DeleteLeases leases_id_list %s slice_hrn %s \
1614 \r\n " %(leases_id_list, slice_hrn))