from sfa.util.specdict import *
from sfa.util.faults import *
from sfa.util.record import SfaRecord
+from sfa.rspecs.sfa_rspec import SfaRSpec
from sfa.util.policy import Policy
from sfa.util.prefixTree import prefixTree
from sfa.util.sfaticket import *
import sfa.util.xmlrpcprotocol as xmlrpcprotocol
import sfa.plc.peers as peers
from sfa.util.version import version_core
+from sfa.rspecs.rspec_version import RSpecVersion
from sfa.util.callids import Callids
-# XX FIX ME: should merge result from multiple aggregates instead of
-# calling aggregate implementation
-from sfa.managers.aggregate_manager_pl import slice_status
-
# we have specialized xmlrpclib.ServerProxy to remember the input url
# OTOH it's not clear if we're only dealing with XMLRPCServerProxy instances
def get_serverproxy_url (server):
server = api.aggregates[aggregate]
threads.run(server.CreateSliver, xrn, credential, rspec, users, call_id)
- results = threads.get_results()
- merged_rspec = merge_rspecs(results)
- return merged_rspec
+ results = threads.get_results()
+ rspec = SfaRSpec()
+ for result in results:
+ rspec.merge(result)
+ return rspec
def RenewSliver(api, xrn, creds, expiration_time, call_id):
if Callids().already_handled(call_id): return True
xrn = options.get('geni_slice_urn', '')
(hrn, type) = urn_to_hrn(xrn)
+ # get the rspec's return format from options
+ rspec_version = RSpecVersion(options.get('rspec_version', 'SFA 1'))
+ version_string = "rspec_%s_%s" % (rspec_version.format, rspec_version.version)
+
# get hrn of the original caller
origin_hrn = options.get('origin_hrn', None)
if not origin_hrn:
# look in cache first
if caching and api.cache and not xrn:
- rspec = api.cache.get('nodes')
+ rspec = api.cache.get(version_string)
if rspec:
return rspec
#threads.run(server.get_resources, cred, xrn, origin_hrn)
results = threads.get_results()
- merged_rspec = merge_rspecs(results)
+ rspec = SfaRSpec()
+ for result in results:
+ rspec.merge(result)
# cache the result
if caching and api.cache and not xrn:
- api.cache.add('nodes', merged_rspec)
+ api.cache.add(version_string, rspec)
- return merged_rspec
+ return rspec.toxml()
+
+# first draft at a merging SliverStatus
+def SliverStatus(api, slice_xrn, creds, call_id):
+ if Callids().already_handled(call_id): return {}
+ # attempt to use delegated credential first
+ credential = api.getDelegatedCredential(creds)
+ if not credential:
+ credential = api.getCredential()
+ threads = ThreadManager()
+ for aggregate in api.aggregates:
+ server = api.aggregates[aggregate]
+ threads.run (server.SliverStatus, slice_xrn, credential, call_id)
+ results = threads.get_results()
+
+ # get rid of any void result - e.g. when call_id was hit where by convention we return {}
+ results = [ result for result in results if result and result['geni_resources']]
+
+ # do not try to combine if there's no result
+ if not results : return {}
+
+ # otherwise let's merge stuff
+ overall = {}
+
+ # mmh, it is expected that all results carry the same urn
+ overall['geni_urn'] = results[0]['geni_urn']
+
+ # consolidate geni_status - simple model using max on a total order
+ states = [ 'ready', 'configuring', 'failed', 'unknown' ]
+ # hash name to index
+ shash = dict ( zip ( states, range(len(states)) ) )
+ def combine_status (x,y):
+ return shash [ max (shash(x),shash(y)) ]
+ overall['geni_status'] = reduce (combine_status, [ result['geni_status'] for result in results], 'ready' )
+
+ # {'ready':0,'configuring':1,'failed':2,'unknown':3}
+ # append all geni_resources
+ overall['geni_resources'] = \
+ reduce (lambda x,y: x+y, [ result['geni_resources'] for result in results] , [])
+
+ return overall
def main():
r = RSpec()