X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sfa%2Fmanagers%2Fslice_manager_pl.py;h=c6531d2975bf2f6ce93f5128f608622ed94b7ad2;hb=4ff67c801ceeb1d0c7ca2863c2b7bf8152182b8f;hp=d8977e3cebf3d0bf5003501bd476273722e95380;hpb=31b7f74a8aa673591ee2106cc9c2d7bb226f66b4;p=sfa.git diff --git a/sfa/managers/slice_manager_pl.py b/sfa/managers/slice_manager_pl.py index d8977e3c..c6531d29 100644 --- a/sfa/managers/slice_manager_pl.py +++ b/sfa/managers/slice_manager_pl.py @@ -23,22 +23,41 @@ from sfa.util.threadmanager import ThreadManager import sfa.util.xmlrpcprotocol as xmlrpcprotocol import sfa.plc.peers as peers from sfa.util.version import version_core +from sfa.util.callids import Callids # XX FIX ME: should merge result from multiple aggregates instead of # calling aggregate implementation from sfa.managers.aggregate_manager_pl import slice_status +# we have specialized xmlrpclib.ServerProxy to remember the input url +# OTOH it's not clear if we're only dealing with XMLRPCServerProxy instances +def get_serverproxy_url (server): + try: + return server.url + except: + sfa_logger().warning("GetVersion, falling back to xmlrpclib.ServerProxy internals") + return server._ServerProxy__host + server._ServerProxy__handler + def GetVersion(api): - peers =dict ([ (peername,v._ServerProxy__host) for (peername,v) in api.aggregates.items() + # peers explicitly in aggregates.xml + peers =dict ([ (peername,get_serverproxy_url(v)) for (peername,v) in api.aggregates.iteritems() if peername != api.hrn]) xrn=Xrn (api.hrn) - return version_core({'interface':'slicemgr', - 'hrn' : xrn.get_hrn(), - 'urn' : xrn.get_urn(), - 'peers': peers, - }) + sm_version=version_core({'interface':'slicemgr', + 'hrn' : xrn.get_hrn(), + 'urn' : xrn.get_urn(), + 'peers': peers, + }) + # local aggregate if present needs to have localhost resolved + if api.hrn in api.aggregates: + local_am_url=get_serverproxy_url(api.aggregates[api.hrn]) + sm_version['peers'][api.hrn]=local_am_url.replace('localhost',sm_version['hostname']) + return sm_version + +def CreateSliver(api, xrn, creds, rspec, users, call_id): + + if Callids().already_handled(call_id): return "" -def create_slice(api, xrn, creds, rspec, users): hrn, type = urn_to_hrn(xrn) # Validate the RSpec against PlanetLab's schema --disabled for now @@ -77,7 +96,7 @@ def create_slice(api, xrn, creds, rspec, users): # Just send entire RSpec to each aggregate server = api.aggregates[aggregate] - threads.run(server.CreateSliver, xrn, credential, rspec, users) + threads.run(server.CreateSliver, xrn, credential, rspec, users, call_id) results = threads.get_results() merged_rspec = merge_rspecs(results) @@ -125,7 +144,7 @@ def get_ticket(api, xrn, creds, rspec, users): if not credential: credential = api.getCredential() threads = ThreadManager() - for aggregate, aggregate_rspec in aggregate_rspecs.items(): + for (aggregate, aggregate_rspec) in aggregate_rspecs.iteritems(): # prevent infinite loop. Dont send request back to caller # unless the caller is the aggregate's SM if caller_hrn == aggregate and aggregate != api.hrn: @@ -305,12 +324,20 @@ def get_slices(api, creds): api.cache.add('slices', slices) return slices - -def get_rspec(api, creds, options): - + + +# Thierry : caching at the slicemgr level makes sense to some extent +caching=True +#caching=False +def ListResources(api, creds, options, call_id): + + if Callids().already_handled(call_id): + api.logger.info("%d received ListResources with known call_id %s"%(api.interface,call_id)) + return "" + # get slice's hrn from options xrn = options.get('geni_slice_urn', '') - hrn, type = urn_to_hrn(xrn) + (hrn, type) = urn_to_hrn(xrn) # get hrn of the original caller origin_hrn = options.get('origin_hrn', None) @@ -321,13 +348,11 @@ def get_rspec(api, creds, options): origin_hrn = Credential(string=creds).get_gid_caller().get_hrn() # look in cache first - if api.cache and not xrn: + if caching and api.cache and not xrn: rspec = api.cache.get('nodes') if rspec: return rspec - hrn, type = urn_to_hrn(xrn) - # get the callers hrn valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0] caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() @@ -346,14 +371,14 @@ def get_rspec(api, creds, options): server = api.aggregates[aggregate] my_opts = copy(options) my_opts['geni_compressed'] = False - threads.run(server.ListResources, credential, my_opts) + threads.run(server.ListResources, credential, my_opts, call_id) #threads.run(server.get_resources, cred, xrn, origin_hrn) results = threads.get_results() merged_rspec = merge_rspecs(results) # cache the result - if api.cache and not xrn: + if caching and api.cache and not xrn: api.cache.add('nodes', merged_rspec) return merged_rspec @@ -362,7 +387,7 @@ def main(): r = RSpec() r.parseFile(sys.argv[1]) rspec = r.toDict() - create_slice(None,'plc.princeton.tmacktestslice',rspec) + CreateSliver(None,'plc.princeton.tmacktestslice',rspec,'create-slice-tmacktestslice') if __name__ == "__main__": main()