from sfa.util.version import version_core
from sfa.util.callids import Callids
-# XX FIX ME: should merge result from multiple aggregates instead of
-# calling aggregate implementation
-from sfa.managers.aggregate_manager_pl import slice_status
-
# we have specialized xmlrpclib.ServerProxy to remember the input url
# OTOH it's not clear if we're only dealing with XMLRPCServerProxy instances
def get_serverproxy_url (server):
merged_rspec = merge_rspecs(results)
return merged_rspec
-def renew_slice(api, xrn, creds, expiration_time):
- hrn, type = urn_to_hrn(xrn)
+def RenewSliver(api, xrn, creds, expiration_time, call_id):
+ if Callids().already_handled(call_id): return True
+ (hrn, type) = urn_to_hrn(xrn)
# get the callers hrn
valid_cred = api.auth.checkCredentials(creds, 'renewsliver', hrn)[0]
caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
continue
server = api.aggregates[aggregate]
- threads.run(server.RenewSliver, xrn, [credential], expiration_time)
- threads.get_results()
- return 1
+ threads.run(server.RenewSliver, xrn, [credential], expiration_time, call_id)
+ # 'and' the results
+ return reduce (lambda x,y: x and y, threads.get_results() , True)
def get_ticket(api, xrn, creds, rspec, users):
slice_hrn, type = urn_to_hrn(xrn)
"""
return 1
-def get_slices(api, creds):
+# Thierry : caching at the slicemgr level makes sense to some extent
+caching=True
+#caching=False
+def ListSlices(api, creds, call_id):
+
+ if Callids().already_handled(call_id): return []
# look in cache first
- if api.cache:
+ if caching and api.cache:
slices = api.cache.get('slices')
if slices:
return slices
if caller_hrn == aggregate and aggregate != api.hrn:
continue
server = api.aggregates[aggregate]
- threads.run(server.ListSlices, credential)
+ threads.run(server.ListSlices, credential, call_id)
# combime results
results = threads.get_results()
slices.extend(result)
# cache the result
- if api.cache:
+ if caching and api.cache:
api.cache.add('slices', slices)
return slices
-# Thierry : caching at the slicemgr level makes sense to some extent
-caching=True
-#caching=False
def ListResources(api, creds, options, call_id):
- if Callids().already_handled(call_id):
- api.logger.info("%d received ListResources with known call_id %s"%(api.interface,call_id))
- return ""
+ if Callids().already_handled(call_id): return ""
# get slice's hrn from options
xrn = options.get('geni_slice_urn', '')
return merged_rspec
+# first draft at a merging SliverStatus
+def SliverStatus(api, slice_xrn, creds, call_id):
+ if Callids().already_handled(call_id): return {}
+ # attempt to use delegated credential first
+ credential = api.getDelegatedCredential(creds)
+ if not credential:
+ credential = api.getCredential()
+ threads = ThreadManager()
+ for aggregate in api.aggregates:
+ server = api.aggregates[aggregate]
+ threads.run (server.SliverStatus, slice_xrn, credential, call_id)
+ results = threads.get_results()
+
+ # get rid of any void result - e.g. when call_id was hit where by convention we return {}
+ results = [ result for result in results if result and result['geni_resources']]
+
+ # do not try to combine if there's no result
+ if not results : return {}
+
+ # otherwise let's merge stuff
+ overall = {}
+
+ # mmh, it is expected that all results carry the same urn
+ overall['geni_urn'] = results[0]['geni_urn']
+
+ # consolidate geni_status - simple model using max on a total order
+ states = [ 'ready', 'configuring', 'failed', 'unknown' ]
+ # hash name to index
+ shash = dict ( zip ( states, range(len(states)) ) )
+ def combine_status (x,y):
+ return shash [ max (shash(x),shash(y)) ]
+ overall['geni_status'] = reduce (combine_status, [ result['geni_status'] for result in results], 'ready' )
+
+ # {'ready':0,'configuring':1,'failed':2,'unknown':3}
+ # append all geni_resources
+ overall['geni_resources'] = \
+ reduce (lambda x,y: x+y, [ result['geni_resources'] for result in results] , [])
+
+ return overall
+
def main():
r = RSpec()
r.parseFile(sys.argv[1])