X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sfa%2Fmanagers%2Fslice_manager_pl.py;h=c6531d2975bf2f6ce93f5128f608622ed94b7ad2;hb=4ff67c801ceeb1d0c7ca2863c2b7bf8152182b8f;hp=0a2f48e0be5191a7cb8c6a85c47cd8a994f66dab;hpb=9fff01a53f0b24eddb18d830be4ee70cd5479b42;p=sfa.git diff --git a/sfa/managers/slice_manager_pl.py b/sfa/managers/slice_manager_pl.py index 0a2f48e0..c6531d29 100644 --- a/sfa/managers/slice_manager_pl.py +++ b/sfa/managers/slice_manager_pl.py @@ -1,287 +1,393 @@ -### $Id: slices.py 15842 2009-11-22 09:56:13Z anil $ -### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/plc/slices.py $ - -import datetime -import time -import traceback +# import sys - +import time,datetime +from StringIO import StringIO from types import StringTypes -from sfa.util.misc import * +from copy import deepcopy +from copy import copy +from lxml import etree + +from sfa.util.sfalogging import sfa_logger +from sfa.util.rspecHelper import merge_rspecs +from sfa.util.xrn import Xrn, urn_to_hrn, hrn_to_urn +from sfa.util.plxrn import hrn_to_pl_slicename from sfa.util.rspec import * from sfa.util.specdict import * from sfa.util.faults import * -from sfa.util.record import GeniRecord +from sfa.util.record import SfaRecord from sfa.util.policy import Policy from sfa.util.prefixTree import prefixTree -from sfa.util.rspec import * from sfa.util.sfaticket import * -from sfa.util.debug import log -from sfa.server.registry import Registries -from sfa.server.aggregate import Aggregates +from sfa.trust.credential import Credential +from sfa.util.threadmanager import ThreadManager +import sfa.util.xmlrpcprotocol as xmlrpcprotocol import sfa.plc.peers as peers +from sfa.util.version import version_core +from sfa.util.callids import Callids -def delete_slice(api, hrn, gid_origin_caller=None): - credential = api.getCredential() - credential.set_gid_origin_caller(gid_origin_caller) - aggregates = Aggregates(api) - for aggregate in aggregates: - success = False - # request hash is optional so lets try the call without it - try: - request_hash=None - aggregates[aggregate].delete_slice(credential, hrn, request_hash, origin_hrn) - success = True - except: - print >> log, "%s" % (traceback.format_exc()) - print >> log, "Error calling delete slice at aggregate %s" % aggregate - - # try sending the request hash if the previous call failed - if not success: - try: - arg_list = [credential, hrn] - request_hash = api.key.compute_hash(arg_list) - aggregates[aggregate].delete_slice(credential, hrn, request_hash) - success = True - except: - print >> log, "%s" % (traceback.format_exc()) - print >> log, "Error calling list nodes at aggregate %s" % aggregate - return 1 +# XX FIX ME: should merge result from multiple aggregates instead of +# calling aggregate implementation +from sfa.managers.aggregate_manager_pl import slice_status + +# we have specialized xmlrpclib.ServerProxy to remember the input url +# OTOH it's not clear if we're only dealing with XMLRPCServerProxy instances +def get_serverproxy_url (server): + try: + return server.url + except: + sfa_logger().warning("GetVersion, falling back to xmlrpclib.ServerProxy internals") + return server._ServerProxy__host + server._ServerProxy__handler + +def GetVersion(api): + # peers explicitly in aggregates.xml + peers =dict ([ (peername,get_serverproxy_url(v)) for (peername,v) in api.aggregates.iteritems() + if peername != api.hrn]) + xrn=Xrn (api.hrn) + sm_version=version_core({'interface':'slicemgr', + 'hrn' : xrn.get_hrn(), + 'urn' : xrn.get_urn(), + 'peers': peers, + }) + # local aggregate if present needs to have localhost resolved + if api.hrn in api.aggregates: + local_am_url=get_serverproxy_url(api.aggregates[api.hrn]) + sm_version['peers'][api.hrn]=local_am_url.replace('localhost',sm_version['hostname']) + return sm_version -def create_slice(api, hrn, rspec, gid_origin_caller=None): - spec = RSpec() - tempspec = RSpec() - spec.parseString(rspec) - slicename = hrn_to_pl_slicename(hrn) - specDict = spec.toDict() - if specDict.has_key('RSpec'): specDict = specDict['RSpec'] - if specDict.has_key('start_time'): start_time = specDict['start_time'] - else: start_time = 0 - if specDict.has_key('end_time'): end_time = specDict['end_time'] - else: end_time = 0 - - rspecs = {} - aggregates = Aggregates(api) - credential = api.getCredential() - credential.set_gid_origin_caller(gid_origin_caller) - # split the netspecs into individual rspecs - netspecs = spec.getDictsByTagName('NetSpec') - for netspec in netspecs: - net_hrn = netspec['name'] - resources = {'start_time': start_time, 'end_time': end_time, 'networks': netspec} - resourceDict = {'RSpec': resources} - tempspec.parseDict(resourceDict) - rspecs[net_hrn] = tempspec.toxml() - - #print "rspecs:", rspecs.keys() - #print "aggregates:", aggregates.keys() - # send each rspec to the appropriate aggregate/sm - for net_hrn in rspecs: +def CreateSliver(api, xrn, creds, rspec, users, call_id): + + if Callids().already_handled(call_id): return "" + + hrn, type = urn_to_hrn(xrn) + + # Validate the RSpec against PlanetLab's schema --disabled for now + # The schema used here needs to aggregate the PL and VINI schemas + # schema = "/var/www/html/schemas/pl.rng" + schema = None + if schema: try: - # if we are directly connected to the aggregate then we can just - # send them the rspec. if not, then we may be connected to an sm - # thats connected to the aggregate - if net_hrn in aggregates: - # send the whloe rspec to the local aggregate - if net_hrn in [api.hrn]: - try: - request_hash = None - aggregates[net_hrn].create_slice(credential, hrn, \ - rspec, request_hash) - except: - arg_list = [credential,hrn,rspec] - request_hash = api.key.compute_hash(arg_list) - aggregates[net_hrn].create_slice(credential, hrn, \ - rspec, request_hash) - else: - try: - request_hash = None - aggregates[net_hrn].create_slice(credential, hrn, \ - rspecs[net_hrn], request_hash) - except: - arg_list = [credential,hrn,rspecs[net_hrn]] - request_hash = api.key.compute_hash(arg_list) - aggregates[net_hrn].create_slice(credential, hrn, \ - rspecs[net_hrn], request_hash) - else: - # lets forward this rspec to a sm that knows about the network - arg_list = [credential, net_hrn] - request_hash = api.key.compute_hash(arg_list) - for aggregate in aggregates: - try: - network_found = aggregates[aggregate].get_aggregates(credential, net_hrn) - except: - network_found = aggregates[aggregate].get_aggregates(credential, net_hrn, request_hash) - if network_found: - try: - request_hash = None - aggregates[aggregate].create_slice(credential, hrn, \ - rspecs[net_hrn], request_hash, origin_hrn) - except: - arg_list = [credential, hrn, rspecs[net_hrn]] - request_hash = api.key.compute_hash(arg_list) - aggregates[aggregate].create_slice(credential, hrn, \ - rspecs[net_hrn], request_hash, origin_hrn) - - except: - print >> log, "Error creating slice %(hrn)s at aggregate %(net_hrn)s" % \ - locals() - traceback.print_exc() + tree = etree.parse(StringIO(rspec)) + except etree.XMLSyntaxError: + message = str(sys.exc_info()[1]) + raise InvalidRSpec(message) + + relaxng_doc = etree.parse(schema) + relaxng = etree.RelaxNG(relaxng_doc) + + if not relaxng(tree): + error = relaxng.error_log.last_error + message = "%s (line %s)" % (error.message, error.line) + raise InvalidRSpec(message) + + # get the callers hrn + valid_cred = api.auth.checkCredentials(creds, 'createsliver', hrn)[0] + caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() + + # attempt to use delegated credential first + credential = api.getDelegatedCredential(creds) + if not credential: + credential = api.getCredential() + threads = ThreadManager() + for aggregate in api.aggregates: + # prevent infinite loop. Dont send request back to caller + # unless the caller is the aggregate's SM + if caller_hrn == aggregate and aggregate != api.hrn: + continue + + # Just send entire RSpec to each aggregate + server = api.aggregates[aggregate] + threads.run(server.CreateSliver, xrn, credential, rspec, users, call_id) + + results = threads.get_results() + merged_rspec = merge_rspecs(results) + return merged_rspec + +def renew_slice(api, xrn, creds, expiration_time): + hrn, type = urn_to_hrn(xrn) + + # get the callers hrn + valid_cred = api.auth.checkCredentials(creds, 'renewsliver', hrn)[0] + caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() + + # attempt to use delegated credential first + credential = api.getDelegatedCredential(creds) + if not credential: + credential = api.getCredential() + threads = ThreadManager() + for aggregate in api.aggregates: + # prevent infinite loop. Dont send request back to caller + # unless the caller is the aggregate's SM + if caller_hrn == aggregate and aggregate != api.hrn: + continue + + server = api.aggregates[aggregate] + threads.run(server.RenewSliver, xrn, [credential], expiration_time) + threads.get_results() return 1 -def get_ticket(api, slice_hrn, rspec, gid_origin_caller=None): - +def get_ticket(api, xrn, creds, rspec, users): + slice_hrn, type = urn_to_hrn(xrn) # get the netspecs contained within the clients rspec - client_rspec = RSpec(xml=rspec) - netspecs = client_rspec.getDictsByTagName('NetSpec') + aggregate_rspecs = {} + tree= etree.parse(StringIO(rspec)) + elements = tree.findall('./network') + for element in elements: + aggregate_hrn = element.values()[0] + aggregate_rspecs[aggregate_hrn] = rspec + + # get the callers hrn + valid_cred = api.auth.checkCredentials(creds, 'getticket', slice_hrn)[0] + caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() + + # attempt to use delegated credential first + credential = api.getDelegatedCredential(creds) + if not credential: + credential = api.getCredential() + threads = ThreadManager() + for (aggregate, aggregate_rspec) in aggregate_rspecs.iteritems(): + # prevent infinite loop. Dont send request back to caller + # unless the caller is the aggregate's SM + if caller_hrn == aggregate and aggregate != api.hrn: + continue + server = None + if aggregate in api.aggregates: + server = api.aggregates[aggregate] + else: + net_urn = hrn_to_urn(aggregate, 'authority') + # we may have a peer that knows about this aggregate + for agg in api.aggregates: + target_aggs = api.aggregates[agg].get_aggregates(credential, net_urn) + if not target_aggs or not 'hrn' in target_aggs[0]: + continue + # send the request to this address + url = target_aggs[0]['url'] + server = xmlrpcprotocol.get_server(url, api.key_file, api.cert_file) + # aggregate found, no need to keep looping + break + if server is None: + continue + threads.run(server.GetTicket, xrn, credential, aggregate_rspec, users) + + results = threads.get_results() - # create an rspec for each individual rspec - rspecs = {} - temp_rspec = RSpec() - for netspec in netspecs: - net_hrn = netspec['name'] - resources = {'start_time': 0, 'end_time': 0 , - 'network': netspec} - resourceDict = {'RSpec': resources} - temp_rspec.parseDict(resourceDict) - rspecs[net_hrn] = temp_rspec.toxml() + # gather information from each ticket + rspecs = [] + initscripts = [] + slivers = [] + object_gid = None + for result in results: + agg_ticket = SfaTicket(string=result) + attrs = agg_ticket.get_attributes() + if not object_gid: + object_gid = agg_ticket.get_gid_object() + rspecs.append(agg_ticket.get_rspec()) + initscripts.extend(attrs.get('initscripts', [])) + slivers.extend(attrs.get('slivers', [])) - # send the rspec to the appropiate aggregate/sm - aggregates = Aggregates(api) - credential = api.getCredential() - credential.set_gid_origin_caller(gid_origin_caller) - tickets = {} - for net_hrn in rspecs: - try: - # if we are directly connected to the aggregate then we can just - # send them the request. if not, then we may be connected to an sm - # thats connected to the aggregate - if net_hrn in aggregates: - try: - ticket = aggregates[net_hrn].get_ticket(credential, slice_hrn, \ - rspecs[net_hrn], None) - tickets[net_hrn] = ticket - except: - arg_list = [credential,hrn,rspecs[net_hrn]] - request_hash = api.key.compute_hash(arg_list) - ticket = aggregates[net_hrn].get_ticket(credential, slice_hrn, \ - rspecs[net_hrn], request_hash) - tickets[net_hrn] = ticket - else: - # lets forward this rspec to a sm that knows about the network - arg_list = [credential, net_hrn] - request_hash = api.key.compute_hash(arg_list) - for agg in aggregates: - try: - network_found = aggregates[agg].get_aggregates(credential, \ - net_hrn) - except: - network_found = aggregates[agg].get_aggregates(credential, \ - net_hrn, request_hash) - if network_found: - try: - ticket = aggregates[aggregate].get_ticket(credential, \ - slice_hrn, rspecs[net_hrn], None) - tickets[aggregate] = ticket - except: - arg_list = [credential, hrn, rspecs[net_hrn]] - request_hash = api.key.compute_hash(arg_list) - aggregates[aggregate].get_ticket(credential, slice_hrn, \ - rspecs[net_hrn], request_hash) - tickets[aggregate] = ticket - except: - print >> log, "Error getting ticket for %(slice_hrn)s at aggregate %(net_hrn)s" % \ - locals() - + # merge info + attributes = {'initscripts': initscripts, + 'slivers': slivers} + merged_rspec = merge_rspecs(rspecs) + # create a new ticket - new_ticket = SfaTicket(subject = slice_hrn) - new_ticket.set_gid_caller(api.auth.client_gid) - - tmp_rspec = RSpec() - networks = [] - valid_data = {} - # merge data from aggregate ticket into new ticket - for agg_ticket in tickets.values(): - agg_ticket = SfaTicket(string=agg_ticket) - object_gid = agg_ticket.get_gid_object() - new_ticket.set_gid_object(object_gid) - new_ticket.set_issuer(key=api.key, subject=api.hrn) - new_ticket.set_pubkey(object_gid.get_pubkey()) - - #new_ticket.set_attributes(data) - tmp_rspec.parseString(agg_ticket.get_rspec()) - networks.extend([{'NetSpec': tmp_rspec.getDictsByTagName('NetSpec')}]) - + ticket = SfaTicket(subject = slice_hrn) + ticket.set_gid_caller(api.auth.client_gid) + ticket.set_issuer(key=api.key, subject=api.hrn) + ticket.set_gid_object(object_gid) + ticket.set_pubkey(object_gid.get_pubkey()) #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn)) - resources = {'networks': networks, 'start_time': 0, 'duration': 0} - resourceDict = {'RSpec': resources} - tmp_rspec.parseDict(resourceDict) - new_ticket.set_rspec(tmp_rspec.toxml()) - - new_ticket.encode() - new_ticket.sign() - return new_ticket.save_to_string(save_parents=True) - -def start_slice(api, hrn): - slicename = hrn_to_pl_slicename(hrn) - slices = api.plshell.GetSlices(api.plauth, {'name': slicename}, ['slice_id']) - if not slices: - raise RecordNotFound(hrn) - slice_id = slices[0] - attributes = api.plshell.GetSliceTags(api.plauth, {'slice_id': slice_id, 'name': 'enabled'}, ['slice_attribute_id']) - attribute_id = attreibutes[0]['slice_attribute_id'] - api.plshell.UpdateSliceTag(api.plauth, attribute_id, "1" ) + ticket.set_attributes(attributes) + ticket.set_rspec(merged_rspec) + ticket.encode() + ticket.sign() + return ticket.save_to_string(save_parents=True) + + +def delete_slice(api, xrn, creds): + hrn, type = urn_to_hrn(xrn) + + # get the callers hrn + valid_cred = api.auth.checkCredentials(creds, 'deletesliver', hrn)[0] + caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() + + # attempt to use delegated credential first + credential = api.getDelegatedCredential(creds) + if not credential: + credential = api.getCredential() + threads = ThreadManager() + for aggregate in api.aggregates: + # prevent infinite loop. Dont send request back to caller + # unless the caller is the aggregate's SM + if caller_hrn == aggregate and aggregate != api.hrn: + continue + server = api.aggregates[aggregate] + threads.run(server.DeleteSliver, xrn, credential) + threads.get_results() + return 1 + +def start_slice(api, xrn, creds): + hrn, type = urn_to_hrn(xrn) + + # get the callers hrn + valid_cred = api.auth.checkCredentials(creds, 'startslice', hrn)[0] + caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() + # attempt to use delegated credential first + credential = api.getDelegatedCredential(creds) + if not credential: + credential = api.getCredential() + threads = ThreadManager() + for aggregate in api.aggregates: + # prevent infinite loop. Dont send request back to caller + # unless the caller is the aggregate's SM + if caller_hrn == aggregate and aggregate != api.hrn: + continue + server = api.aggregates[aggregate] + threads.run(server.Start, xrn, credential) + threads.get_results() return 1 -def stop_slice(api, hrn): - slicename = hrn_to_pl_slicename(hrn) - slices = api.plshell.GetSlices(api.plauth, {'name': slicename}, ['slice_id']) - if not slices: - raise RecordNotFound(hrn) - slice_id = slices[0]['slice_id'] - attributes = api.plshell.GetSliceTags(api.plauth, {'slice_id': slice_id, 'name': 'enabled'}, ['slice_attribute_id']) - attribute_id = attributes[0]['slice_attribute_id'] - api.plshell.UpdateSliceTag(api.plauth, attribute_id, "0") +def stop_slice(api, xrn, creds): + hrn, type = urn_to_hrn(xrn) + + # get the callers hrn + valid_cred = api.auth.checkCredentials(creds, 'stopslice', hrn)[0] + caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() + + # attempt to use delegated credential first + credential = api.getDelegatedCredential(creds) + if not credential: + credential = api.getCredential() + threads = ThreadManager() + for aggregate in api.aggregates: + # prevent infinite loop. Dont send request back to caller + # unless the caller is the aggregate's SM + if caller_hrn == aggregate and aggregate != api.hrn: + continue + server = api.aggregates[aggregate] + threads.run(server.Stop, xrn, credential) + threads.get_results() + return 1 + +def reset_slice(api, xrn): + """ + Not implemented + """ return 1 -def reset_slice(api, hrn): - # XX not implemented at this interface +def shutdown(api, xrn, creds): + """ + Not implemented + """ return 1 -def get_slices(api): - # XX just import the legacy module and excute that until - # we transition the code to this module - from sfa.plc.slices import Slices - slices = Slices(api) - slices.refresh() - return slices['hrn'] - -def get_rspec(api, hrn=None, origin_gid_caller=None): - from sfa.plc.nodes import Nodes - nodes = Nodes(api, origin_gid_caller=origin_gid_caller) - if hrn: - rspec = nodes.get_rspec(hrn) - else: - nodes.refresh() - rspec = nodes['rspec'] - - return rspec - -""" -Returns the request context required by sfatables. At some point, this mechanism should be changed -to refer to "contexts", which is the information that sfatables is requesting. But for now, we just -return the basic information needed in a dict. -""" -def fetch_context(slice_hrn, user_hrn, contexts): - base_context = {'sfa':{'user':{'hrn':user_hrn}}} - return base_context +def status(api, xrn, creds): + """ + Not implemented + """ + return 1 + +def get_slices(api, creds): + + # look in cache first + if api.cache: + slices = api.cache.get('slices') + if slices: + return slices + + # get the callers hrn + valid_cred = api.auth.checkCredentials(creds, 'listslices', None)[0] + caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() + + # attempt to use delegated credential first + credential = api.getDelegatedCredential(creds) + if not credential: + credential = api.getCredential() + threads = ThreadManager() + # fetch from aggregates + for aggregate in api.aggregates: + # prevent infinite loop. Dont send request back to caller + # unless the caller is the aggregate's SM + if caller_hrn == aggregate and aggregate != api.hrn: + continue + server = api.aggregates[aggregate] + threads.run(server.ListSlices, credential) + + # combime results + results = threads.get_results() + slices = [] + for result in results: + slices.extend(result) + + # cache the result + if api.cache: + api.cache.add('slices', slices) + + return slices + + +# Thierry : caching at the slicemgr level makes sense to some extent +caching=True +#caching=False +def ListResources(api, creds, options, call_id): + + if Callids().already_handled(call_id): + api.logger.info("%d received ListResources with known call_id %s"%(api.interface,call_id)) + return "" + + # get slice's hrn from options + xrn = options.get('geni_slice_urn', '') + (hrn, type) = urn_to_hrn(xrn) + + # get hrn of the original caller + origin_hrn = options.get('origin_hrn', None) + if not origin_hrn: + if isinstance(creds, list): + origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn() + else: + origin_hrn = Credential(string=creds).get_gid_caller().get_hrn() + + # look in cache first + if caching and api.cache and not xrn: + rspec = api.cache.get('nodes') + if rspec: + return rspec + + # get the callers hrn + valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0] + caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn() + + # attempt to use delegated credential first + credential = api.getDelegatedCredential(creds) + if not credential: + credential = api.getCredential() + threads = ThreadManager() + for aggregate in api.aggregates: + # prevent infinite loop. Dont send request back to caller + # unless the caller is the aggregate's SM + if caller_hrn == aggregate and aggregate != api.hrn: + continue + # get the rspec from the aggregate + server = api.aggregates[aggregate] + my_opts = copy(options) + my_opts['geni_compressed'] = False + threads.run(server.ListResources, credential, my_opts, call_id) + #threads.run(server.get_resources, cred, xrn, origin_hrn) + + results = threads.get_results() + merged_rspec = merge_rspecs(results) + + # cache the result + if caching and api.cache and not xrn: + api.cache.add('nodes', merged_rspec) + + return merged_rspec def main(): r = RSpec() r.parseFile(sys.argv[1]) rspec = r.toDict() - create_slice(None,'plc.princeton.tmacktestslice',rspec) + CreateSliver(None,'plc.princeton.tmacktestslice',rspec,'create-slice-tmacktestslice') if __name__ == "__main__": main()