X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sfa%2Fmanagers%2Fslice_manager_pl.py;h=5bb923b9291ecf8836c709f78ae10138841e7536;hb=5307e4e09dd0a7982f0c6dfdd13065425c802005;hp=0cee6523d6b139da13072b812613f636d5bf7cdf;hpb=f2242f087769c36c20c443b92350cdded3a7d314;p=sfa.git diff --git a/sfa/managers/slice_manager_pl.py b/sfa/managers/slice_manager_pl.py index 0cee6523..5bb923b9 100644 --- a/sfa/managers/slice_manager_pl.py +++ b/sfa/managers/slice_manager_pl.py @@ -23,58 +23,41 @@ from sfa.util.threadmanager import ThreadManager import sfa.util.xmlrpcprotocol as xmlrpcprotocol import sfa.plc.peers as peers from sfa.util.version import version_core +from sfa.util.callids import Callids + +# XX FIX ME: should merge result from multiple aggregates instead of +# calling aggregate implementation +from sfa.managers.aggregate_manager_pl import slice_status + +# we have specialized xmlrpclib.ServerProxy to remember the input url +# OTOH it's not clear if we're only dealing with XMLRPCServerProxy instances +def get_serverproxy_url (server): + try: + return server.url + except: + sfa_logger().warning("GetVersion, falling back to xmlrpclib.ServerProxy internals") + return server._ServerProxy__host + server._ServerProxy__handler def GetVersion(api): - peers =dict ([ (peername,v._ServerProxy__host) for (peername,v) in api.aggregates.items() + # peers explicitly in aggregates.xml + peers =dict ([ (peername,get_serverproxy_url(v)) for (peername,v) in api.aggregates.iteritems() if peername != api.hrn]) xrn=Xrn (api.hrn) - return version_core({'interface':'slicemgr', - 'hrn' : xrn.get_hrn(), - 'urn' : xrn.get_urn(), - 'peers': peers, - }) - -def slice_status(api, slice_xrn, creds ): - hrn, type = urn_to_hrn(slice_xrn) - # find out where this slice is currently running - api.logger.info(hrn) - slicename = hrn_to_pl_slicename(hrn) - api.logger.info("Checking status for %s" % slicename) - slices = api.plshell.GetSlices(api.plauth, [slicename], ['node_ids','person_ids','name','expires']) - if len(slices) == 0: - raise Exception("Slice %s not found (used %s as slicename internally)" % (slice_xrn, slicename)) - slice = slices[0] - - nodes = api.plshell.GetNodes(api.plauth, slice['node_ids'], - ['hostname', 'boot_state', 'last_contact']) - api.logger.info(slice) - api.logger.info(nodes) - - result = {} - result['geni_urn'] = Xrn(slice_xrn, 'slice').get_urn() - result['geni_status'] = 'unknown' - result['pl_login'] = slice['name'] - result['pl_expires'] = datetime.datetime.fromtimestamp(slice['expires']).ctime() - - resources = [] - - for node in nodes: - res = {} - res['pl_hostname'] = node['hostname'] - res['pl_boot_state'] = node['boot_state'] - res['pl_last_contact'] = node['last_contact'] - if not node['last_contact'] is None: - res['pl_last_contact'] = datetime.datetime.fromtimestamp(node['last_contact']).ctime() - res['geni_urn'] = '' - res['geni_status'] = 'unknown' - res['geni_error'] = '' - - resources.append(res) - - result['geni_resources'] = resources - return result + sm_version=version_core({'interface':'slicemgr', + 'hrn' : xrn.get_hrn(), + 'urn' : xrn.get_urn(), + 'peers': peers, + }) + # local aggregate if present needs to have localhost resolved + if api.hrn in api.aggregates: + local_am_url=get_serverproxy_url(api.aggregates[api.hrn]) + sm_version['peers'][api.hrn]=local_am_url.replace('localhost',sm_version['hostname']) + return sm_version + +def CreateSliver(api, xrn, creds, rspec, users, call_id): + + if Callids().already_handled(call_id): return "" -def create_slice(api, xrn, creds, rspec, users): hrn, type = urn_to_hrn(xrn) # Validate the RSpec against PlanetLab's schema --disabled for now @@ -113,7 +96,7 @@ def create_slice(api, xrn, creds, rspec, users): # Just send entire RSpec to each aggregate server = api.aggregates[aggregate] - threads.run(server.CreateSliver, xrn, credential, rspec, users) + threads.run(server.CreateSliver, xrn, credential, rspec, users, call_id) results = threads.get_results() merged_rspec = merge_rspecs(results) @@ -161,7 +144,7 @@ def get_ticket(api, xrn, creds, rspec, users): if not credential: credential = api.getCredential() threads = ThreadManager() - for aggregate, aggregate_rspec in aggregate_rspecs.items(): + for (aggregate, aggregate_rspec) in aggregate_rspecs.iteritems(): # prevent infinite loop. Dont send request back to caller # unless the caller is the aggregate's SM if caller_hrn == aggregate and aggregate != api.hrn: @@ -220,9 +203,9 @@ def get_ticket(api, xrn, creds, rspec, users): return ticket.save_to_string(save_parents=True) -def delete_slice(api, xrn, creds): - hrn, type = urn_to_hrn(xrn) - +def DeleteSliver(api, xrn, creds, call_id): + if Callids().already_handled(call_id): return "" + (hrn, type) = urn_to_hrn(xrn) # get the callers hrn valid_cred = api.auth.checkCredentials(creds, 'deletesliver', hrn)[0] caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() @@ -238,7 +221,7 @@ def delete_slice(api, xrn, creds): if caller_hrn == aggregate and aggregate != api.hrn: continue server = api.aggregates[aggregate] - threads.run(server.DeleteSliver, xrn, credential) + threads.run(server.DeleteSliver, xrn, credential, call_id) threads.get_results() return 1 @@ -341,12 +324,20 @@ def get_slices(api, creds): api.cache.add('slices', slices) return slices - -def get_rspec(api, creds, options): - + + +# Thierry : caching at the slicemgr level makes sense to some extent +caching=True +#caching=False +def ListResources(api, creds, options, call_id): + + if Callids().already_handled(call_id): + api.logger.info("%d received ListResources with known call_id %s"%(api.interface,call_id)) + return "" + # get slice's hrn from options xrn = options.get('geni_slice_urn', '') - hrn, type = urn_to_hrn(xrn) + (hrn, type) = urn_to_hrn(xrn) # get hrn of the original caller origin_hrn = options.get('origin_hrn', None) @@ -357,13 +348,11 @@ def get_rspec(api, creds, options): origin_hrn = Credential(string=creds).get_gid_caller().get_hrn() # look in cache first - if api.cache and not xrn: + if caching and api.cache and not xrn: rspec = api.cache.get('nodes') if rspec: return rspec - hrn, type = urn_to_hrn(xrn) - # get the callers hrn valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0] caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() @@ -382,14 +371,14 @@ def get_rspec(api, creds, options): server = api.aggregates[aggregate] my_opts = copy(options) my_opts['geni_compressed'] = False - threads.run(server.ListResources, credential, my_opts) + threads.run(server.ListResources, credential, my_opts, call_id) #threads.run(server.get_resources, cred, xrn, origin_hrn) results = threads.get_results() merged_rspec = merge_rspecs(results) # cache the result - if api.cache and not xrn: + if caching and api.cache and not xrn: api.cache.add('nodes', merged_rspec) return merged_rspec @@ -398,7 +387,7 @@ def main(): r = RSpec() r.parseFile(sys.argv[1]) rspec = r.toDict() - create_slice(None,'plc.princeton.tmacktestslice',rspec) + CreateSliver(None,'plc.princeton.tmacktestslice',rspec,'create-slice-tmacktestslice') if __name__ == "__main__": main()