3 from datetime import datetime
4 from time import gmtime
6 from sfa.util.faults import SliverDoesNotExist, UnknownSfaType
7 from sfa.util.sfalogging import logger
9 from sfa.storage.alchemy import dbsession
10 from sfa.storage.model import RegRecord, RegUser
12 from sfa.trust.credential import Credential
15 from sfa.managers.driver import Driver
16 from sfa.rspecs.version_manager import VersionManager
17 from sfa.rspecs.rspec import RSpec
19 from sfa.util.xrn import hrn_to_urn, urn_to_sliver_id, get_leaf
22 ## thierry: everything that is API-related (i.e. handling incoming requests)
24 # SlabDriver should be really only about talking to the senslab testbed
27 from sfa.senslab.OARrestapi import OARrestapi
28 from sfa.senslab.LDAPapi import LDAPapi
30 from sfa.senslab.slabpostgres import SlabDB, slab_dbsession, SliceSenslab
32 from sfa.senslab.slabaggregate import SlabAggregate, slab_xrn_to_hostname, \
34 from sfa.senslab.slabslices import SlabSlices
41 # this inheritance scheme is so that the driver object can receive
42 # GetNodes or GetSites sorts of calls directly
43 # and thus minimize the differences in the managers with the pl version
44 class SlabDriver(Driver):
46 def __init__(self, config):
47 Driver.__init__ (self, config)
49 self.hrn = config.SFA_INTERFACE_HRN
51 self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
53 self.oar = OARrestapi()
55 self.time_format = "%Y-%m-%d %H:%M:%S"
56 self.db = SlabDB(config,debug = True)
60 def sliver_status(self, slice_urn, slice_hrn):
61 """Receive a status request for slice named urn/hrn
62 urn:publicid:IDN+senslab+nturro_slice hrn senslab.nturro_slice
63 shall return a structure as described in
64 http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
65 NT : not sure if we should implement this or not, but used by sface.
69 #First get the slice with the slice hrn
70 slice_list = self.GetSlices(slice_filter = slice_hrn, \
71 slice_filter_type = 'slice_hrn')
73 if len(slice_list) is 0:
74 raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
76 #Slice has the same slice hrn for each slice in the slice/lease list
77 #So fetch the info on the user once
78 one_slice = slice_list[0]
79 recuser = dbsession.query(RegRecord).filter_by(record_id = \
80 one_slice['record_id_user']).first()
82 #Make a list of all the nodes hostnames in use for this slice
85 for node in sl['node_ids']:
86 slice_nodes_list.append(node['hostname'])
88 #Get all the corresponding nodes details
89 nodes_all = self.GetNodes({'hostname':slice_nodes_list},
90 ['node_id', 'hostname','site','boot_state'])
91 nodeall_byhostname = dict([(n['hostname'], n) for n in nodes_all])
98 top_level_status = 'empty'
100 result.fromkeys(['geni_urn','pl_login','geni_status','geni_resources'],None)
101 result['pl_login'] = recuser.hrn
102 logger.debug("Slabdriver - sliver_status Sliver status urn %s hrn %s sl\
103 %s \r\n " %(slice_urn, slice_hrn, sl))
105 nodes_in_slice = sl['node_ids']
108 result['geni_status'] = top_level_status
109 result['geni_resources'] = []
112 top_level_status = 'ready'
114 #A job is running on Senslab for this slice
115 # report about the local nodes that are in the slice only
117 result['geni_urn'] = slice_urn
121 #timestamp = float(sl['startTime']) + float(sl['walltime'])
122 #result['pl_expires'] = strftime(self.time_format, \
123 #gmtime(float(timestamp)))
124 #result['slab_expires'] = strftime(self.time_format,\
125 #gmtime(float(timestamp)))
128 for node in sl['node_ids']:
130 #res['slab_hostname'] = node['hostname']
131 #res['slab_boot_state'] = node['boot_state']
133 res['pl_hostname'] = node['hostname']
134 res['pl_boot_state'] = nodeall_byhostname[node['hostname']]['boot_state']
135 #res['pl_last_contact'] = strftime(self.time_format, \
136 #gmtime(float(timestamp)))
137 sliver_id = Xrn(slice_urn, type='slice', \
138 id=nodeall_byhostname[node['hostname']]['node_id'], \
139 authority=self.hrn).urn
141 res['geni_urn'] = sliver_id
142 if nodeall_byhostname[node['hostname']]['boot_state'] == 'Alive':
144 res['geni_status'] = 'ready'
146 res['geni_status'] = 'failed'
147 top_level_status = 'failed'
149 res['geni_error'] = ''
151 resources.append(res)
153 result['geni_status'] = top_level_status
154 result['geni_resources'] = resources
155 logger.debug("SLABDRIVER \tsliver_statusresources %s res %s "\
160 def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, \
162 aggregate = SlabAggregate(self)
164 slices = SlabSlices(self)
165 peer = slices.get_peer(slice_hrn)
166 sfa_peer = slices.get_sfa_peer(slice_hrn)
169 if not isinstance(creds, list):
173 slice_record = users[0].get('slice_record', {})
176 rspec = RSpec(rspec_string)
177 logger.debug("SLABDRIVER.PY \t create_sliver \tr spec.version %s slice_record %s " \
178 %(rspec.version,slice_record))
180 #self.synchronize_oar_and_slice_table(slice_hrn)
181 # ensure site record exists?
182 # ensure slice record exists
183 #Removed options to verify_slice SA 14/08/12
184 sfa_slice = slices.verify_slice(slice_hrn, slice_record, peer, \
187 #requested_attributes returned by rspec.version.get_slice_attributes()
188 #unused, removed SA 13/08/12
189 rspec.version.get_slice_attributes()
191 logger.debug("SLABDRIVER.PY create_sliver slice %s " %(sfa_slice))
193 # ensure person records exists
194 #verify_persons returns added persons but since the return value
196 slices.verify_persons(slice_hrn, sfa_slice, users, peer, \
197 sfa_peer, options=options)
201 # add/remove slice from nodes
203 requested_slivers = [node.get('component_name') \
204 for node in rspec.version.get_nodes_with_slivers()]
205 l = [ node for node in rspec.version.get_nodes_with_slivers() ]
206 logger.debug("SLADRIVER \tcreate_sliver requested_slivers \
207 requested_slivers %s listnodes %s" \
208 %(requested_slivers,l))
209 #verify_slice_nodes returns nodes, but unused here. Removed SA 13/08/12.
210 slices.verify_slice_nodes(sfa_slice, requested_slivers, peer)
213 requested_lease_list = []
215 for lease in rspec.version.get_leases():
216 single_requested_lease = {}
217 logger.debug("SLABDRIVER.PY \tcreate_sliver lease %s " %(lease))
218 if not lease.get('lease_id'):
219 single_requested_lease['hostname'] = \
220 slab_xrn_to_hostname(lease.get('component_id').strip())
221 single_requested_lease['start_time'] = lease.get('start_time')
222 single_requested_lease['duration'] = lease.get('duration')
224 kept_leases.append(int(lease['lease_id']))
225 if single_requested_lease.get('hostname'):
226 requested_lease_list.append(single_requested_lease)
228 #dCreate dict of leases by start_time, regrouping nodes reserved
230 #time, for the same amount of time = one job on OAR
231 requested_job_dict = {}
232 for lease in requested_lease_list:
234 #In case it is an asap experiment start_time is empty
235 if lease['start_time'] == '':
236 lease['start_time'] = '0'
238 if lease['start_time'] not in requested_job_dict:
239 if isinstance(lease['hostname'], str):
240 lease['hostname'] = [lease['hostname']]
242 requested_job_dict[lease['start_time']] = lease
245 job_lease = requested_job_dict[lease['start_time']]
246 if lease['duration'] == job_lease['duration'] :
247 job_lease['hostname'].append(lease['hostname'])
252 logger.debug("SLABDRIVER.PY \tcreate_sliver requested_job_dict %s " %(requested_job_dict))
253 #verify_slice_leases returns the leases , but the return value is unused
254 #here. Removed SA 13/08/12
255 slices.verify_slice_leases(sfa_slice, \
256 requested_job_dict, kept_leases, peer)
258 return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
261 def delete_sliver (self, slice_urn, slice_hrn, creds, options):
263 sfa_slice_list = self.GetSlices(slice_filter = slice_hrn, \
264 slice_filter_type = 'slice_hrn')
266 if not sfa_slice_list:
269 #Delete all in the slice
270 for sfa_slice in sfa_slice_list:
273 logger.debug("SLABDRIVER.PY delete_sliver slice %s" %(sfa_slice))
274 slices = SlabSlices(self)
275 # determine if this is a peer slice
277 peer = slices.get_peer(slice_hrn)
278 #TODO delete_sliver SA : UnBindObjectFromPeer should be
279 #used when there is another
280 #senslab testbed, which is not the case 14/08/12 .
282 logger.debug("SLABDRIVER.PY delete_sliver peer %s" %(peer))
285 self.UnBindObjectFromPeer('slice', \
286 sfa_slice['record_id_slice'], peer,None)
287 self.DeleteSliceFromNodes(sfa_slice)
290 self.BindObjectToPeer('slice', sfa_slice['record_id_slice'], \
291 peer, sfa_slice['peer_slice_id'])
295 def AddSlice(self, slice_record):
296 slab_slice = SliceSenslab( slice_hrn = slice_record['slice_hrn'], \
297 record_id_slice= slice_record['record_id_slice'] , \
298 record_id_user= slice_record['record_id_user'], \
299 peer_authority = slice_record['peer_authority'])
300 logger.debug("SLABDRIVER.PY \tAddSlice slice_record %s slab_slice %s" \
301 %(slice_record,slab_slice))
302 slab_dbsession.add(slab_slice)
303 slab_dbsession.commit()
306 # first 2 args are None in case of resource discovery
307 def list_resources (self, slice_urn, slice_hrn, creds, options):
308 #cached_requested = options.get('cached', True)
310 version_manager = VersionManager()
311 # get the rspec's return format from options
313 version_manager.get_version(options.get('geni_rspec_version'))
314 version_string = "rspec_%s" % (rspec_version)
316 #panos adding the info option to the caching key (can be improved)
317 if options.get('info'):
318 version_string = version_string + "_" + \
319 options.get('info', 'default')
321 # look in cache first
322 #if cached_requested and self.cache and not slice_hrn:
323 #rspec = self.cache.get(version_string)
325 #logger.debug("SlabDriver.ListResources: \
326 #returning cached advertisement")
329 #panos: passing user-defined options
330 aggregate = SlabAggregate(self)
331 origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
332 options.update({'origin_hrn':origin_hrn})
333 rspec = aggregate.get_rspec(slice_xrn=slice_urn, \
334 version=rspec_version, options=options)
337 #if self.cache and not slice_hrn:
338 #logger.debug("Slab.ListResources: stores advertisement in cache")
339 #self.cache.add(version_string, rspec)
344 def list_slices (self, creds, options):
345 # look in cache first
347 #slices = self.cache.get('slices')
349 #logger.debug("PlDriver.list_slices returns from cache")
354 slices = self.GetSlices()
355 logger.debug("SLABDRIVER.PY \tlist_slices hrn %s \r\n \r\n" %(slices))
356 slice_hrns = [slab_slice['slice_hrn'] for slab_slice in slices]
357 #slice_hrns = [slicename_to_hrn(self.hrn, slab_slice['slice_hrn']) \
358 #for slab_slice in slices]
359 slice_urns = [hrn_to_urn(slice_hrn, 'slice') \
360 for slice_hrn in slice_hrns]
364 #logger.debug ("SlabDriver.list_slices stores value in cache")
365 #self.cache.add('slices', slice_urns)
370 def register (self, sfa_record, hrn, pub_key):
372 Adding new user, slice, node or site should not be handled
376 Adding users = LDAP Senslab
377 Adding slice = Import from LDAP users
382 #No site or node record update allowed
383 def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
384 pointer = old_sfa_record['pointer']
385 old_sfa_record_type = old_sfa_record['type']
387 # new_key implemented for users only
388 if new_key and old_sfa_record_type not in [ 'user' ]:
389 raise UnknownSfaType(old_sfa_record_type)
391 #if (type == "authority"):
392 #self.shell.UpdateSite(pointer, new_sfa_record)
394 if old_sfa_record_type == "slice":
395 slab_record = self.sfa_fields_to_slab_fields(old_sfa_record_type, \
397 if 'name' in slab_record:
398 slab_record.pop('name')
399 #Prototype should be UpdateSlice(self,
400 #auth, slice_id_or_name, slice_fields)
401 #Senslab cannot update slice since slice = job
402 #so we must delete and create another job
403 self.UpdateSlice(pointer, slab_record)
405 elif old_sfa_record_type == "user":
407 all_fields = new_sfa_record
408 for key in all_fields.keys():
409 if key in ['first_name', 'last_name', 'title', 'email',
410 'password', 'phone', 'url', 'bio', 'accepted_aup',
412 update_fields[key] = all_fields[key]
413 self.UpdatePerson(pointer, update_fields)
416 # must check this key against the previous one if it exists
417 persons = self.GetPersons([pointer], ['key_ids'])
419 keys = person['key_ids']
420 keys = self.GetKeys(person['key_ids'])
422 # Delete all stale keys
425 if new_key != key['key']:
426 self.DeleteKey(key['key_id'])
430 self.AddPersonKey(pointer, {'key_type': 'ssh', \
437 def remove (self, sfa_record):
438 sfa_record_type = sfa_record['type']
439 hrn = sfa_record['hrn']
440 if sfa_record_type == 'user':
442 #get user from senslab ldap
443 person = self.GetPersons(sfa_record)
444 #No registering at a given site in Senslab.
445 #Once registered to the LDAP, all senslab sites are
448 #Mark account as disabled in ldap
449 self.DeletePerson(sfa_record)
450 elif sfa_record_type == 'slice':
451 if self.GetSlices(slice_filter = hrn, \
452 slice_filter_type = 'slice_hrn'):
453 self.DeleteSlice(sfa_record)
455 #elif type == 'authority':
456 #if self.GetSites(pointer):
457 #self.DeleteSite(pointer)
463 #TODO clean GetPeers. 05/07/12SA
464 def GetPeers (self, auth = None, peer_filter=None, return_fields_list=None):
466 existing_records = {}
467 existing_hrns_by_types = {}
468 logger.debug("SLABDRIVER \tGetPeers auth = %s, peer_filter %s, \
469 return_field %s " %(auth , peer_filter, return_fields_list))
470 all_records = dbsession.query(RegRecord).filter(RegRecord.type.like('%authority%')).all()
471 for record in all_records:
472 existing_records[(record.hrn, record.type)] = record
473 if record.type not in existing_hrns_by_types:
474 existing_hrns_by_types[record.type] = [record.hrn]
475 logger.debug("SLABDRIVER \tGetPeer\t NOT IN \
476 existing_hrns_by_types %s " %( existing_hrns_by_types))
479 logger.debug("SLABDRIVER \tGetPeer\t \INNN type %s hrn %s " \
480 %(record.type,record.hrn))
481 existing_hrns_by_types[record.type].append(record.hrn)
484 logger.debug("SLABDRIVER \tGetPeer\texisting_hrns_by_types %s "\
485 %( existing_hrns_by_types))
490 records_list.append(existing_records[(peer_filter,'authority')])
492 for hrn in existing_hrns_by_types['authority']:
493 records_list.append(existing_records[(hrn,'authority')])
495 logger.debug("SLABDRIVER \tGetPeer \trecords_list %s " \
501 return_records = records_list
502 if not peer_filter and not return_fields_list:
506 logger.debug("SLABDRIVER \tGetPeer return_records %s " \
508 return return_records
511 #TODO : Handling OR request in make_ldap_filters_from_records
512 #instead of the for loop
513 #over the records' list
514 def GetPersons(self, person_filter=None):
516 person_filter should be a list of dictionnaries when not set to None.
517 Returns a list of users whose accounts are enabled found in ldap.
520 logger.debug("SLABDRIVER \tGetPersons person_filter %s" \
523 if person_filter and isinstance(person_filter, list):
524 #If we are looking for a list of users (list of dict records)
525 #Usually the list contains only one user record
526 for searched_attributes in person_filter:
528 #Get only enabled user accounts in senslab LDAP :
529 #add a filter for make_ldap_filters_from_record
530 person = self.ldap.LdapFindUser(searched_attributes, \
531 is_user_enabled=True)
532 person_list.append(person)
535 #Get only enabled user accounts in senslab LDAP :
536 #add a filter for make_ldap_filters_from_record
537 person_list = self.ldap.LdapFindUser(is_user_enabled=True)
541 def GetTimezone(self):
542 server_timestamp, server_tz = self.oar.parser.\
543 SendRequest("GET_timezone")
544 return server_timestamp, server_tz
547 def DeleteJobs(self, job_id, slice_hrn):
548 if not job_id or job_id is -1:
550 username = slice_hrn.split(".")[-1].rstrip("_slice")
552 reqdict['method'] = "delete"
553 reqdict['strval'] = str(job_id)
556 answer = self.oar.POSTRequestToOARRestAPI('DELETE_jobs_id', \
558 logger.debug("SLABDRIVER \tDeleteJobs jobid %s \r\n answer %s \
559 username %s" %(job_id,answer, username))
564 ##TODO : Unused GetJobsId ? SA 05/07/12
565 #def GetJobsId(self, job_id, username = None ):
567 #Details about a specific job.
568 #Includes details about submission time, jot type, state, events,
569 #owner, assigned ressources, walltime etc...
573 #node_list_k = 'assigned_network_address'
574 ##Get job info from OAR
575 #job_info = self.oar.parser.SendRequest(req, job_id, username)
577 #logger.debug("SLABDRIVER \t GetJobsId %s " %(job_info))
579 #if job_info['state'] == 'Terminated':
580 #logger.debug("SLABDRIVER \t GetJobsId job %s TERMINATED"\
583 #if job_info['state'] == 'Error':
584 #logger.debug("SLABDRIVER \t GetJobsId ERROR message %s "\
589 #logger.error("SLABDRIVER \tGetJobsId KeyError")
592 #parsed_job_info = self.get_info_on_reserved_nodes(job_info, \
594 ##Replaces the previous entry
595 ##"assigned_network_address" / "reserved_resources"
597 #job_info.update({'node_ids':parsed_job_info[node_list_k]})
598 #del job_info[node_list_k]
599 #logger.debug(" \r\nSLABDRIVER \t GetJobsId job_info %s " %(job_info))
603 def GetJobsResources(self, job_id, username = None):
604 #job_resources=['reserved_resources', 'assigned_resources',\
605 #'job_id', 'job_uri', 'assigned_nodes',\
607 #assigned_res = ['resource_id', 'resource_uri']
608 #assigned_n = ['node', 'node_uri']
610 req = "GET_jobs_id_resources"
613 #Get job resources list from OAR
614 node_id_list = self.oar.parser.SendRequest(req, job_id, username)
615 logger.debug("SLABDRIVER \t GetJobsResources %s " %(node_id_list))
618 self.__get_hostnames_from_oar_node_ids(node_id_list)
621 #Replaces the previous entry "assigned_network_address" /
622 #"reserved_resources"
624 job_info = {'node_ids': hostname_list}
629 def get_info_on_reserved_nodes(self, job_info, node_list_name):
630 #Get the list of the testbed nodes records and make a
631 #dictionnary keyed on the hostname out of it
632 node_list_dict = self.GetNodes()
633 #node_hostname_list = []
634 node_hostname_list = [node['hostname'] for node in node_list_dict]
635 #for node in node_list_dict:
636 #node_hostname_list.append(node['hostname'])
637 node_dict = dict(zip(node_hostname_list, node_list_dict))
639 reserved_node_hostname_list = []
640 for index in range(len(job_info[node_list_name])):
641 #job_info[node_list_name][k] =
642 reserved_node_hostname_list[index] = \
643 node_dict[job_info[node_list_name][index]]['hostname']
645 logger.debug("SLABDRIVER \t get_info_on_reserved_nodes \
646 reserved_node_hostname_list %s" \
647 %(reserved_node_hostname_list))
649 logger.error("SLABDRIVER \t get_info_on_reserved_nodes KEYERROR " )
651 return reserved_node_hostname_list
653 def GetNodesCurrentlyInUse(self):
654 """Returns a list of all the nodes already involved in an oar job"""
655 return self.oar.parser.SendRequest("GET_running_jobs")
657 def __get_hostnames_from_oar_node_ids(self, resource_id_list ):
658 full_nodes_dict_list = self.GetNodes()
659 #Put the full node list into a dictionary keyed by oar node id
660 oar_id_node_dict = {}
661 for node in full_nodes_dict_list:
662 oar_id_node_dict[node['oar_id']] = node
664 logger.debug("SLABDRIVER \t __get_hostnames_from_oar_node_ids\
665 oar_id_node_dict %s" %(oar_id_node_dict))
667 hostname_dict_list = []
668 for resource_id in resource_id_list:
669 #Because jobs requested "asap" do not have defined resources
670 if resource_id is not "Undefined":
671 hostname_dict_list.append({'hostname' : \
672 oar_id_node_dict[resource_id]['hostname'],
673 'site_id' : oar_id_node_dict[resource_id]['site']})
675 #hostname_list.append(oar_id_node_dict[resource_id]['hostname'])
676 return hostname_dict_list
678 def GetReservedNodes(self,username = None):
679 #Get the nodes in use and the reserved nodes
680 reservation_dict_list = \
681 self.oar.parser.SendRequest("GET_reserved_nodes", username = username)
684 for resa in reservation_dict_list:
685 logger.debug ("GetReservedNodes resa %s"%(resa))
686 #dict list of hostnames and their site
687 resa['reserved_nodes'] = \
688 self.__get_hostnames_from_oar_node_ids(resa['resource_ids'])
690 #del resa['resource_ids']
691 return reservation_dict_list
693 def GetNodes(self, node_filter_dict = None, return_fields_list = None):
695 node_filter_dict : dictionnary of lists
698 node_dict_by_id = self.oar.parser.SendRequest("GET_resources_full")
699 node_dict_list = node_dict_by_id.values()
701 #No filtering needed return the list directly
702 if not (node_filter_dict or return_fields_list):
703 return node_dict_list
705 return_node_list = []
707 for filter_key in node_filter_dict:
709 #Filter the node_dict_list by each value contained in the
710 #list node_filter_dict[filter_key]
711 for value in node_filter_dict[filter_key]:
712 for node in node_dict_list:
713 if node[filter_key] == value:
714 if return_fields_list :
716 for k in return_fields_list:
718 return_node_list.append(tmp)
720 return_node_list.append(node)
722 logger.log_exc("GetNodes KeyError")
726 return return_node_list
729 def GetSites(self, site_filter_name_list = None, return_fields_list = None):
730 site_dict = self.oar.parser.SendRequest("GET_sites")
731 #site_dict : dict where the key is the sit ename
732 return_site_list = []
733 if not ( site_filter_name_list or return_fields_list):
734 return_site_list = site_dict.values()
735 return return_site_list
737 for site_filter_name in site_filter_name_list:
738 if site_filter_name in site_dict:
739 if return_fields_list:
740 for field in return_fields_list:
743 tmp[field] = site_dict[site_filter_name][field]
745 logger.error("GetSites KeyError %s "%(field))
747 return_site_list.append(tmp)
749 return_site_list.append( site_dict[site_filter_name])
752 return return_site_list
756 def GetSlices(self, slice_filter = None, slice_filter_type = None):
757 #def GetSlices(self, slice_filter = None, slice_filter_type = None, \
758 #return_fields_list = None):
759 """ Get the slice records from the slab db.
760 Returns a slice ditc if slice_filter and slice_filter_type
762 Returns a list of slice dictionnaries if there are no filters
767 return_slice_list = []
770 authorized_filter_types_list = ['slice_hrn', 'record_id_user']
771 slicerec_dictlist = []
774 if slice_filter_type in authorized_filter_types_list:
777 def __get_slice_records(slice_filter = None, slice_filter_type = None):
780 #Get list of slices based on the slice hrn
781 if slice_filter_type == 'slice_hrn':
783 login = slice_filter.split(".")[1].split("_")[0]
785 #DO NOT USE RegSlice - reg_researchers to get the hrn of the user
786 #otherwise will mess up the RegRecord in Resolve, don't know
789 #Only one entry for one user = one slice in slice_senslab table
790 slicerec = slab_dbsession.query(SliceSenslab).filter_by(slice_hrn = slice_filter).first()
792 #Get slice based on user id
793 if slice_filter_type == 'record_id_user':
794 slicerec = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = slice_filter).first()
799 fixed_slicerec_dict = slicerec.dump_sqlalchemyobj_to_dict()
802 login = fixed_slicerec_dict['slice_hrn'].split(".")[1].split("_")[0]
803 return login, fixed_slicerec_dict
808 login, fixed_slicerec_dict = __get_slice_records(slice_filter, slice_filter_type)
809 logger.debug(" SLABDRIVER \tGetSlices login %s \
811 %(login, fixed_slicerec_dict))
815 #One slice can have multiple jobs
817 leases_list = self.GetReservedNodes(username = login)
818 #If no job is running or no job scheduled
819 if leases_list == [] :
820 return [fixed_slicerec_dict]
822 #Several jobs for one slice
823 for lease in leases_list :
827 #Check with OAR the status of the job if a job id is in
832 slicerec_dict['oar_job_id'] = lease['lease_id']
833 slicerec_dict.update({'node_ids':lease['reserved_nodes']})
834 slicerec_dict.update(fixed_slicerec_dict)
835 slicerec_dict.update({'hrn':\
836 str(fixed_slicerec_dict['slice_hrn'])})
839 slicerec_dictlist.append(slicerec_dict)
840 logger.debug("SLABDRIVER.PY \tGetSlices slicerec_dict %s slicerec_dictlist %s" %(slicerec_dict, slicerec_dictlist))
842 logger.debug("SLABDRIVER.PY \tGetSlices RETURN slicerec_dictlist %s"\
843 %(slicerec_dictlist))
845 return slicerec_dictlist
850 slice_list = slab_dbsession.query(SliceSenslab).all()
851 leases_list = self.GetReservedNodes()
854 slicerec_dictlist = []
855 return_slice_list = []
856 for record in slice_list:
857 return_slice_list.append(record.dump_sqlalchemyobj_to_dict())
859 for fixed_slicerec_dict in return_slice_list:
861 owner = fixed_slicerec_dict['slice_hrn'].split(".")[1].split("_")[0]
862 for lease in leases_list:
863 if owner == lease['user']:
864 slicerec_dict['oar_job_id'] = lease['lease_id']
865 slicerec_dict.update({'node_ids':lease['reserved_nodes']})
866 slicerec_dict.update(fixed_slicerec_dict)
867 slicerec_dict.update({'hrn':\
868 str(fixed_slicerec_dict['slice_hrn'])})
869 slicerec_dictlist.append(slicerec_dict)
871 logger.debug("SLABDRIVER.PY \tGetSlices RETURN slices %s \
872 slice_filter %s " %(return_slice_list, slice_filter))
874 #if return_fields_list:
875 #return_slice_list = parse_filter(sliceslist, \
876 #slice_filter,'slice', return_fields_list)
878 return slicerec_dictlist
881 def testbed_name (self): return self.hrn
883 # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
884 def aggregate_version (self):
885 version_manager = VersionManager()
886 ad_rspec_versions = []
887 request_rspec_versions = []
888 for rspec_version in version_manager.versions:
889 if rspec_version.content_type in ['*', 'ad']:
890 ad_rspec_versions.append(rspec_version.to_dict())
891 if rspec_version.content_type in ['*', 'request']:
892 request_rspec_versions.append(rspec_version.to_dict())
894 'testbed':self.testbed_name(),
895 'geni_request_rspec_versions': request_rspec_versions,
896 'geni_ad_rspec_versions': ad_rspec_versions,
905 # Convert SFA fields to PLC fields for use when registering up updating
906 # registry record in the PLC database
908 # @param type type of record (user, slice, ...)
909 # @param hrn human readable name
910 # @param sfa_fields dictionary of SFA fields
911 # @param slab_fields dictionary of PLC fields (output)
913 def sfa_fields_to_slab_fields(self, sfa_type, hrn, record):
917 #for field in record:
918 # slab_record[field] = record[field]
920 if sfa_type == "slice":
921 #instantion used in get_slivers ?
922 if not "instantiation" in slab_record:
923 slab_record["instantiation"] = "senslab-instantiated"
924 #slab_record["hrn"] = hrn_to_pl_slicename(hrn)
925 #Unused hrn_to_pl_slicename because Slab's hrn already in the appropriate form SA 23/07/12
926 slab_record["hrn"] = hrn
927 logger.debug("SLABDRIVER.PY sfa_fields_to_slab_fields \
928 slab_record %s " %(slab_record['hrn']))
930 slab_record["url"] = record["url"]
931 if "description" in record:
932 slab_record["description"] = record["description"]
933 if "expires" in record:
934 slab_record["expires"] = int(record["expires"])
936 #nodes added by OAR only and then imported to SFA
937 #elif type == "node":
938 #if not "hostname" in slab_record:
939 #if not "hostname" in record:
940 #raise MissingSfaInfo("hostname")
941 #slab_record["hostname"] = record["hostname"]
942 #if not "model" in slab_record:
943 #slab_record["model"] = "geni"
946 #elif type == "authority":
947 #slab_record["login_base"] = hrn_to_slab_login_base(hrn)
949 #if not "name" in slab_record:
950 #slab_record["name"] = hrn
952 #if not "abbreviated_name" in slab_record:
953 #slab_record["abbreviated_name"] = hrn
955 #if not "enabled" in slab_record:
956 #slab_record["enabled"] = True
958 #if not "is_public" in slab_record:
959 #slab_record["is_public"] = True
966 def __transforms_timestamp_into_date(self, xp_utc_timestamp = None):
967 """ Transforms unix timestamp into valid OAR date format """
969 #Used in case of a scheduled experiment (not immediate)
970 #To run an XP immediately, don't specify date and time in RSpec
971 #They will be set to None.
973 #transform the xp_utc_timestamp into server readable time
974 xp_server_readable_date = datetime.fromtimestamp(int(\
975 xp_utc_timestamp)).strftime(self.time_format)
977 return xp_server_readable_date
985 def LaunchExperimentOnOAR(self, added_nodes, slice_name, \
986 lease_start_time, lease_duration, slice_user=None):
988 lease_dict['lease_start_time'] = lease_start_time
989 lease_dict['lease_duration'] = lease_duration
990 lease_dict['added_nodes'] = added_nodes
991 lease_dict['slice_name'] = slice_name
992 lease_dict['slice_user'] = slice_user
993 lease_dict['grain'] = self.GetLeaseGranularity()
994 lease_dict['time_format'] = self.time_format
996 def __create_job_structure_request_for_OAR(lease_dict):
997 """ Creates the structure needed for a correct POST on OAR.
998 Makes the timestamp transformation into the appropriate format.
999 Sends the POST request to create the job with the resources in
1008 reqdict['workdir'] = '/tmp'
1009 reqdict['resource'] = "{network_address in ("
1011 for node in lease_dict['added_nodes']:
1012 logger.debug("\r\n \r\n OARrestapi \t __create_job_structure_request_for_OAR \
1015 # Get the ID of the node
1017 reqdict['resource'] += "'" + nodeid + "', "
1018 nodeid_list.append(nodeid)
1020 custom_length = len(reqdict['resource'])- 2
1021 reqdict['resource'] = reqdict['resource'][0:custom_length] + \
1022 ")}/nodes=" + str(len(nodeid_list))
1024 def __process_walltime(duration):
1025 """ Calculates the walltime in seconds from the duration in H:M:S
1026 specified in the RSpec.
1030 # Fixing the walltime by adding a few delays.
1031 # First put the walltime in seconds oarAdditionalDelay = 20;
1032 # additional delay for /bin/sleep command to
1033 # take in account prologue and epilogue scripts execution
1034 # int walltimeAdditionalDelay = 120; additional delay
1035 desired_walltime = duration
1036 total_walltime = desired_walltime + 140#+2 min 20
1037 sleep_walltime = desired_walltime + 20 #+20 sec
1039 #Put the walltime back in str form
1040 #First get the hours
1041 walltime.append(str(total_walltime / 3600))
1042 total_walltime = total_walltime - 3600 * int(walltime[0])
1043 #Get the remaining minutes
1044 walltime.append(str(total_walltime / 60))
1045 total_walltime = total_walltime - 60 * int(walltime[1])
1047 walltime.append(str(total_walltime))
1050 logger.log_exc(" __process_walltime duration null")
1052 return walltime, sleep_walltime
1055 walltime, sleep_walltime = \
1056 __process_walltime(int(lease_dict['lease_duration'])*lease_dict['grain'])
1059 reqdict['resource'] += ",walltime=" + str(walltime[0]) + \
1060 ":" + str(walltime[1]) + ":" + str(walltime[2])
1061 reqdict['script_path'] = "/bin/sleep " + str(sleep_walltime)
1063 #In case of a scheduled experiment (not immediate)
1064 #To run an XP immediately, don't specify date and time in RSpec
1065 #They will be set to None.
1066 if lease_dict['lease_start_time'] is not '0':
1067 #Readable time accepted by OAR
1068 start_time = datetime.fromtimestamp(int(lease_dict['lease_start_time'])).\
1069 strftime(lease_dict['time_format'])
1070 reqdict['reservation'] = start_time
1071 #If there is not start time, Immediate XP. No need to add special
1075 reqdict['type'] = "deploy"
1076 reqdict['directory'] = ""
1077 reqdict['name'] = "SFA_" + lease_dict['slice_user']
1082 #Create the request for OAR
1083 reqdict = __create_job_structure_request_for_OAR(lease_dict)
1084 # first step : start the OAR job and update the job
1085 logger.debug("SLABDRIVER.PY \tLaunchExperimentOnOAR reqdict %s\
1088 answer = self.oar.POSTRequestToOARRestAPI('POST_job', \
1089 reqdict, slice_user)
1090 logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid %s " %(answer))
1092 jobid = answer['id']
1094 logger.log_exc("SLABDRIVER \tLaunchExperimentOnOAR \
1095 Impossible to create job %s " %(answer))
1099 def __configure_experiment(jobid, added_nodes):
1100 # second step : configure the experiment
1101 # we need to store the nodes in a yaml (well...) file like this :
1102 # [1,56,23,14,45,75] with name /tmp/sfa<jobid>.json
1103 job_file = open('/tmp/sfa/'+ str(jobid) + '.json', 'w')
1105 job_file.write(str(added_nodes[0].strip('node')))
1106 for node in added_nodes[1:len(added_nodes)] :
1107 job_file.write(', '+ node.strip('node'))
1112 def __launch_senslab_experiment(jobid):
1113 # third step : call the senslab-experiment wrapper
1114 #command= "java -jar target/sfa-1.0-jar-with-dependencies.jar
1115 # "+str(jobid)+" "+slice_user
1116 javacmdline = "/usr/bin/java"
1118 "/opt/senslabexperimentwrapper/sfa-1.0-jar-with-dependencies.jar"
1119 #ret=subprocess.check_output(["/usr/bin/java", "-jar", ", \
1120 #str(jobid), slice_user])
1121 output = subprocess.Popen([javacmdline, "-jar", jarname, str(jobid), \
1122 slice_user],stdout=subprocess.PIPE).communicate()[0]
1124 logger.debug("SLABDRIVER \t __configure_experiment wrapper returns%s " \
1131 logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid %s \
1132 added_nodes %s slice_user %s" %(jobid, added_nodes, slice_user))
1135 __configure_experiment(jobid, added_nodes)
1136 __launch_senslab_experiment(jobid)
1140 def AddLeases(self, hostname_list, slice_record, lease_start_time, lease_duration):
1141 logger.debug("SLABDRIVER \r\n \r\n \t AddLeases hostname_list %s \
1142 slice_record %s lease_start_time %s lease_duration %s "\
1143 %( hostname_list, slice_record , lease_start_time, \
1146 tmp = slice_record['PI'][0].split(".")
1147 username = tmp[(len(tmp)-1)]
1148 self.LaunchExperimentOnOAR(hostname_list, slice_record['name'], lease_start_time, lease_duration, username)
1149 start_time = datetime.fromtimestamp(int(lease_start_time)).strftime(self.time_format)
1150 logger.debug("SLABDRIVER \t AddLeases hostname_list start_time %s " %(start_time))
1155 #Delete the jobs from job_senslab table
1156 def DeleteSliceFromNodes(self, slice_record):
1158 self.DeleteJobs(slice_record['oar_job_id'], slice_record['hrn'])
1162 def GetLeaseGranularity(self):
1163 """ Returns the granularity of Senslab testbed.
1164 Defined in seconds. """
1169 def GetLeases(self, lease_filter_dict=None):
1170 unfiltered_reservation_list = self.GetReservedNodes()
1172 ##Synchronize slice_table of sfa senslab db
1173 #self.synchronize_oar_and_slice_table(unfiltered_reservation_list)
1175 reservation_list = []
1176 #Find the slice associated with this user senslab ldap uid
1177 logger.debug(" SLABDRIVER.PY \tGetLeases ")
1178 #Create user dict first to avoir looking several times for
1179 #the same user in LDAP SA 27/07/12
1181 for resa in unfiltered_reservation_list:
1182 logger.debug("SLABDRIVER \tGetLeases USER %s"\
1184 if resa['user'] not in resa_user_dict:
1185 logger.debug("SLABDRIVER \tGetLeases userNOTIN ")
1186 ldap_info = self.ldap.LdapSearch('(uid='+resa['user']+')')
1187 ldap_info = ldap_info[0][1]
1188 user = dbsession.query(RegUser).filter_by(email = \
1189 ldap_info['mail'][0]).first()
1190 #Separated in case user not in database : record_id not defined SA 17/07//12
1191 query_slice_info = slab_dbsession.query(SliceSenslab).filter_by(record_id_user = user.record_id)
1192 if query_slice_info:
1193 slice_info = query_slice_info.first()
1197 resa_user_dict[resa['user']] = {}
1198 resa_user_dict[resa['user']]['ldap_info'] = user
1199 resa_user_dict[resa['user']]['slice_info'] = slice_info
1201 logger.debug("SLABDRIVER \tGetLeases resa_user_dict %s"\
1203 for resa in unfiltered_reservation_list:
1207 resa['slice_hrn'] = resa_user_dict[resa['user']]['slice_info'].slice_hrn
1208 resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
1210 #resa['slice_id'] = hrn_to_urn(slice_info.slice_hrn, 'slice')
1211 resa['component_id_list'] = []
1212 #Transform the hostnames into urns (component ids)
1213 for node in resa['reserved_nodes']:
1214 #resa['component_id_list'].append(hostname_to_urn(self.hrn, \
1215 #self.root_auth, node['hostname']))
1216 slab_xrn = slab_xrn_object(self.root_auth, node['hostname'])
1217 resa['component_id_list'].append(slab_xrn.urn)
1219 #Filter the reservation list if necessary
1220 #Returns all the leases associated with a given slice
1221 if lease_filter_dict:
1222 logger.debug("SLABDRIVER \tGetLeases lease_filter_dict %s"\
1223 %(lease_filter_dict))
1224 for resa in unfiltered_reservation_list:
1225 if lease_filter_dict['name'] == resa['slice_hrn']:
1226 reservation_list.append(resa)
1228 reservation_list = unfiltered_reservation_list
1230 logger.debug(" SLABDRIVER.PY \tGetLeases reservation_list %s"\
1231 %(reservation_list))
1232 return reservation_list
1234 def augment_records_with_testbed_info (self, sfa_records):
1235 return self.fill_record_info (sfa_records)
1237 def fill_record_info(self, record_list):
1239 Given a SFA record, fill in the senslab specific and SFA specific
1240 fields in the record.
1243 logger.debug("SLABDRIVER \tfill_record_info records %s " %(record_list))
1244 if not isinstance(record_list, list):
1245 record_list = [record_list]
1248 for record in record_list:
1249 #If the record is a SFA slice record, then add information
1250 #about the user of this slice. This kind of
1251 #information is in the Senslab's DB.
1252 if str(record['type']) == 'slice':
1253 #Get slab slice record.
1254 recslice_list = self.GetSlices(slice_filter = \
1255 str(record['hrn']),\
1256 slice_filter_type = 'slice_hrn')
1258 recuser = dbsession.query(RegRecord).filter_by(record_id = \
1259 recslice_list[0]['record_id_user']).first()
1260 logger.debug("SLABDRIVER \tfill_record_info TYPE SLICE RECUSER %s " %(recuser))
1261 record.update({'PI':[recuser.hrn],
1262 'researcher': [recuser.hrn],
1263 'name':record['hrn'],
1264 'oar_job_id':[rec['oar_job_id'] for rec in recslice_list],
1266 'person_ids':[recslice_list[0]['record_id_user']],
1267 'geni_urn':'', #For client_helper.py compatibility
1268 'keys':'', #For client_helper.py compatibility
1269 'key_ids':''}) #For client_helper.py compatibility
1271 #for rec in recslice_list:
1272 #record['oar_job_id'].append(rec['oar_job_id'])
1273 logger.debug( "SLABDRIVER.PY \t fill_record_info SLICE \
1274 recslice_list %s \r\n \t RECORD %s \r\n \r\n" %(recslice_list,record))
1275 if str(record['type']) == 'user':
1276 #The record is a SFA user record.
1277 #Get the information about his slice from Senslab's DB
1278 #and add it to the user record.
1279 recslice_list = self.GetSlices(\
1280 slice_filter = record['record_id'],\
1281 slice_filter_type = 'record_id_user')
1283 logger.debug( "SLABDRIVER.PY \t fill_record_info TYPE USER \
1284 recslice_list %s \r\n \t RECORD %s \r\n" %(recslice_list , record))
1285 #Append slice record in records list,
1286 #therefore fetches user and slice info again(one more loop)
1287 #Will update PIs and researcher for the slice
1288 recuser = dbsession.query(RegRecord).filter_by(record_id = \
1289 recslice_list[0]['record_id_user']).first()
1290 logger.debug( "SLABDRIVER.PY \t fill_record_info USER \
1291 recuser %s \r\n \r\n" %(recuser))
1293 recslice = recslice_list[0]
1294 recslice.update({'PI':[recuser.hrn],
1295 'researcher': [recuser.hrn],
1296 'name':record['hrn'],
1298 'oar_job_id': [rec['oar_job_id'] for rec in recslice_list],
1299 'person_ids':[recslice_list[0]['record_id_user']]})
1300 recslice.update({'type':'slice', \
1301 'hrn':recslice_list[0]['slice_hrn']})
1302 #for rec in recslice_list:
1303 #recslice['oar_job_id'].append(rec['oar_job_id'])
1305 #GetPersons takes [] as filters
1306 #user_slab = self.GetPersons([{'hrn':recuser.hrn}])
1307 user_slab = self.GetPersons([record])
1310 record.update(user_slab[0])
1311 #For client_helper.py compatibility
1312 record.update( { 'geni_urn':'',
1315 record_list.append(recslice)
1317 logger.debug("SLABDRIVER.PY \tfill_record_info ADDING SLICE\
1318 INFO TO USER records %s" %(record_list))
1319 logger.debug("SLABDRIVER.PY \tfill_record_info END \
1320 #record %s \r\n \r\n " %(record))
1322 except TypeError, error:
1323 logger.log_exc("SLABDRIVER \t fill_record_info EXCEPTION %s"\
1325 #logger.debug("SLABDRIVER.PY \t fill_record_info ENDENDEND ")
1329 #self.fill_record_slab_info(records)
1335 #TODO Update membership? update_membership_list SA 05/07/12
1336 #def update_membership_list(self, oldRecord, record, listName, addFunc, \
1338 ## get a list of the HRNs tht are members of the old and new records
1340 #oldList = oldRecord.get(listName, [])
1343 #newList = record.get(listName, [])
1345 ## if the lists are the same, then we don't have to update anything
1346 #if (oldList == newList):
1349 ## build a list of the new person ids, by looking up each person to get
1353 #records = table.find({'type': 'user', 'hrn': newList})
1354 #for rec in records:
1355 #newIdList.append(rec['pointer'])
1357 ## build a list of the old person ids from the person_ids field
1359 #oldIdList = oldRecord.get("person_ids", [])
1360 #containerId = oldRecord.get_pointer()
1362 ## if oldRecord==None, then we are doing a Register, instead of an
1365 #containerId = record.get_pointer()
1367 ## add people who are in the new list, but not the oldList
1368 #for personId in newIdList:
1369 #if not (personId in oldIdList):
1370 #addFunc(self.plauth, personId, containerId)
1372 ## remove people who are in the old list, but not the new list
1373 #for personId in oldIdList:
1374 #if not (personId in newIdList):
1375 #delFunc(self.plauth, personId, containerId)
1377 #def update_membership(self, oldRecord, record):
1379 #if record.type == "slice":
1380 #self.update_membership_list(oldRecord, record, 'researcher',
1381 #self.users.AddPersonToSlice,
1382 #self.users.DeletePersonFromSlice)
1383 #elif record.type == "authority":
1388 # I don't think you plan on running a component manager at this point
1389 # let me clean up the mess of ComponentAPI that is deprecated anyways
1392 #TODO FUNCTIONS SECTION 04/07/2012 SA
1394 #TODO : Is UnBindObjectFromPeer still necessary ? Currently does nothing
1396 def UnBindObjectFromPeer(self, auth, object_type, object_id, shortname):
1397 """ This method is a hopefully temporary hack to let the sfa correctly
1398 detach the objects it creates from a remote peer object. This is
1399 needed so that the sfa federation link can work in parallel with
1400 RefreshPeer, as RefreshPeer depends on remote objects being correctly
1403 auth : struct, API authentication structure
1404 AuthMethod : string, Authentication method to use
1405 object_type : string, Object type, among 'site','person','slice',
1407 object_id : int, object_id
1408 shortname : string, peer shortname
1412 logger.warning("SLABDRIVER \tUnBindObjectFromPeer EMPTY-\
1416 #TODO Is BindObjectToPeer still necessary ? Currently does nothing
1418 def BindObjectToPeer(self, auth, object_type, object_id, shortname=None, \
1419 remote_object_id=None):
1420 """This method is a hopefully temporary hack to let the sfa correctly
1421 attach the objects it creates to a remote peer object. This is needed
1422 so that the sfa federation link can work in parallel with RefreshPeer,
1423 as RefreshPeer depends on remote objects being correctly marked.
1425 shortname : string, peer shortname
1426 remote_object_id : int, remote object_id, set to 0 if unknown
1430 logger.warning("SLABDRIVER \tBindObjectToPeer EMPTY - DO NOTHING \r\n ")
1433 #TODO UpdateSlice 04/07/2012 SA
1434 #Funciton should delete and create another job since oin senslab slice=job
1435 def UpdateSlice(self, auth, slice_id_or_name, slice_fields=None):
1436 """Updates the parameters of an existing slice with the values in
1438 Users may only update slices of which they are members.
1439 PIs may update any of the slices at their sites, or any slices of
1440 which they are members. Admins may update any slice.
1441 Only PIs and admins may update max_nodes. Slices cannot be renewed
1442 (by updating the expires parameter) more than 8 weeks into the future.
1443 Returns 1 if successful, faults otherwise.
1447 logger.warning("SLABDRIVER UpdateSlice EMPTY - DO NOTHING \r\n ")
1450 #TODO UpdatePerson 04/07/2012 SA
1451 def UpdatePerson(self, auth, person_id_or_email, person_fields=None):
1452 """Updates a person. Only the fields specified in person_fields
1453 are updated, all other fields are left untouched.
1454 Users and techs can only update themselves. PIs can only update
1455 themselves and other non-PIs at their sites.
1456 Returns 1 if successful, faults otherwise.
1460 logger.warning("SLABDRIVER UpdatePerson EMPTY - DO NOTHING \r\n ")
1463 #TODO GetKeys 04/07/2012 SA
1464 def GetKeys(self, auth, key_filter=None, return_fields=None):
1465 """Returns an array of structs containing details about keys.
1466 If key_filter is specified and is an array of key identifiers,
1467 or a struct of key attributes, only keys matching the filter
1468 will be returned. If return_fields is specified, only the
1469 specified details will be returned.
1471 Admin may query all keys. Non-admins may only query their own keys.
1475 logger.warning("SLABDRIVER GetKeys EMPTY - DO NOTHING \r\n ")
1478 #TODO DeleteKey 04/07/2012 SA
1479 def DeleteKey(self, auth, key_id):
1481 Non-admins may only delete their own keys.
1482 Returns 1 if successful, faults otherwise.
1486 logger.warning("SLABDRIVER DeleteKey EMPTY - DO NOTHING \r\n ")
1490 #TODO : Check rights to delete person
1491 def DeletePerson(self, auth, person_record):
1492 """ Disable an existing account in senslab LDAP.
1493 Users and techs can only delete themselves. PIs can only
1494 delete themselves and other non-PIs at their sites.
1495 ins can delete anyone.
1496 Returns 1 if successful, faults otherwise.
1500 #Disable user account in senslab LDAP
1501 ret = self.ldap.LdapMarkUserAsDeleted(person_record)
1502 logger.warning("SLABDRIVER DeletePerson %s " %(person_record))
1505 #TODO Check DeleteSlice, check rights 05/07/2012 SA
1506 def DeleteSlice(self, auth, slice_record):
1507 """ Deletes the specified slice.
1508 Senslab : Kill the job associated with the slice if there is one
1509 using DeleteSliceFromNodes.
1510 Updates the slice record in slab db to remove the slice nodes.
1512 Users may only delete slices of which they are members. PIs may
1513 delete any of the slices at their sites, or any slices of which
1514 they are members. Admins may delete any slice.
1515 Returns 1 if successful, faults otherwise.
1519 self.DeleteSliceFromNodes(slice_record)
1520 logger.warning("SLABDRIVER DeleteSlice %s "%(slice_record))
1523 #TODO AddPerson 04/07/2012 SA
1524 #def AddPerson(self, auth, person_fields=None):
1525 def AddPerson(self, record):#TODO fixing 28/08//2012 SA
1526 """Adds a new account. Any fields specified in records are used,
1527 otherwise defaults are used.
1528 Accounts are disabled by default. To enable an account,
1530 Returns the new person_id (> 0) if successful, faults otherwise.
1534 ret = self.ldap.LdapAddUser(record)
1535 logger.warning("SLABDRIVER AddPerson return code %s \r\n ", ret)
1538 #TODO AddPersonToSite 04/07/2012 SA
1539 def AddPersonToSite (self, auth, person_id_or_email, \
1540 site_id_or_login_base=None):
1541 """ Adds the specified person to the specified site. If the person is
1542 already a member of the site, no errors are returned. Does not change
1543 the person's primary site.
1544 Returns 1 if successful, faults otherwise.
1548 logger.warning("SLABDRIVER AddPersonToSite EMPTY - DO NOTHING \r\n ")
1551 #TODO AddRoleToPerson : Not sure if needed in senslab 04/07/2012 SA
1552 def AddRoleToPerson(self, auth, role_id_or_name, person_id_or_email):
1553 """Grants the specified role to the person.
1554 PIs can only grant the tech and user roles to users and techs at their
1555 sites. Admins can grant any role to any user.
1556 Returns 1 if successful, faults otherwise.
1560 logger.warning("SLABDRIVER AddRoleToPerson EMPTY - DO NOTHING \r\n ")
1563 #TODO AddPersonKey 04/07/2012 SA
1564 def AddPersonKey(self, auth, person_id_or_email, key_fields=None):
1565 """Adds a new key to the specified account.
1566 Non-admins can only modify their own keys.
1567 Returns the new key_id (> 0) if successful, faults otherwise.
1571 logger.warning("SLABDRIVER AddPersonKey EMPTY - DO NOTHING \r\n ")
1574 def DeleteLeases(self, leases_id_list, slice_hrn ):
1575 for job_id in leases_id_list:
1576 self.DeleteJobs(job_id, slice_hrn)
1578 logger.debug("SLABDRIVER DeleteLeases leases_id_list %s slice_hrn %s \
1579 \r\n " %(leases_id_list, slice_hrn))