updates for MAX aggregate
authorTony Mack <tmack@paris.CS.Princeton.EDU>
Mon, 16 May 2011 19:52:01 +0000 (15:52 -0400)
committerTony Mack <tmack@paris.CS.Princeton.EDU>
Mon, 16 May 2011 19:52:01 +0000 (15:52 -0400)
sfa/managers/aggregate_manager_max.py
sfa/plc/api.py
sfa/util/config.py

index 9f99bcf..828e3d2 100644 (file)
-#!/usr/bin/python
-
-import sys
-import pdb
-import xml.dom.minidom
-
-from sfa.util.rspec import RSpec
-from sfa.util.xrn import urn_to_hrn, hrn_to_urn, get_authority
-from sfa.util.plxrn import hrn_to_pl_slicename
-from sfa.util.plxrn import hrn_to_pl_slicename
-from sfa.util.rspec import *
-from sfa.util.specdict import *
-from sfa.util.faults import *
-from sfa.util.storage import *
-from sfa.util.policy import Policy
-from sfa.server.aggregate import Aggregates
-from sfa.server.registry import Registries
-from sfa.util.faults import *
-from sfa.util.callids import Callids
-
-
-SFA_MAX_CONF_FILE = '/etc/sfa/max_allocations'
-SFA_MAX_DEFAULT_RSPEC = '/etc/sfa/max_physical.xml'
-SFA_MAX_CANNED_RSPEC = '/etc/sfa/max_physical_canned.xml'
-
-topology = {}
-
-class SfaOutOfResource(SfaFault):
-    def __init__(self, interface):
-        faultString = "Interface " + interface + " not available"
-        SfaFault.__init__(self, 100, faultString, '')
-
-class SfaNoPairRSpec(SfaFault):
-    def __init__(self, interface, interface2):
-        faultString = "Interface " + interface + " should be paired with " + interface2
-        SfaFault.__init__(self, 100, faultString, '')
-
-# Returns a mapping from interfaces to the nodes they lie on and their peer interfaces
-# i -> node,i_peer
-
-def get_interface_map():
-    r = RSpec()
-    r.parseFile(SFA_MAX_DEFAULT_RSPEC)
-    rspec = r.toDict()
-    capacity = rspec['rspec']['capacity']
-    netspec = capacity[0]['netspec'][0]
-    linkdefs = {}
-    for n in netspec['nodespec']:
-        ifspecs = n['ifspec']
-        nodename = n['node']
-        for i in ifspecs:
-            ifname = i['name']
-            linkid = i['linkid']
-
-            if (linkdefs.has_key(linkid)):
-                linkdefs[linkid].extend([(nodename,ifname)])
-            else:
-                linkdefs[linkid]=[(nodename,ifname)]
-    
-    # topology maps interface x interface -> link,node1,node2
-    topology={}
-
-    for k in linkdefs.keys():
-        (n1,i1) = linkdefs[k][0]
-        (n2,i2) = linkdefs[k][1]
-
-        topology[i1] = (n1, i2)
-        topology[i2] = (n2, i1)
-        
-
-    return topology    
-
-    
-def allocations_to_rspec(allocations):
-    rspec = xml.dom.minidom.parse(SFA_MAX_DEFAULT_RSPEC)
-    req = rspec.firstChild.appendChild(rspec.createElement("request"))
-    for (iname,ip) in allocations:
-        ifspec = req.appendChild(rspec.createElement("ifspec"))
-        ifspec.setAttribute("name","tns:"+iname)
-        ifspec.setAttribute("ip",ip)
-
-    return rspec.toxml()
-        
-    
-def if_endpoints(ifs):
-    nodes=[]
-    for l in ifs:
-        nodes.extend(topology[l][0])
-    return nodes
-
-def lock_state_file():
-    # Noop for demo
-    return True
-
-def unlock_state_file():
-    return True
-    # Noop for demo
-
-def read_alloc_dict():
-    alloc_dict={}
-    rows = open(SFA_MAX_CONF_FILE).read().split('\n')
-    for r in rows:
-        columns = r.split(' ')
-        if (len(columns)==2):
-            hrn = columns[0]
-            allocs = columns[1].split(',')
-            ipallocs = map(lambda alloc:alloc.split('/'), allocs)
-            alloc_dict[hrn]=ipallocs
-    return alloc_dict
-
-def commit_alloc_dict(d):
-    f = open(SFA_MAX_CONF_FILE, 'w')
-    for hrn in d.keys():
-        columns = d[hrn]
-        ipcolumns = map(lambda x:"/".join(x), columns)
-        row = hrn+' '+','.join(ipcolumns)+'\n'
-        f.write(row)
-    f.close()
-
-def collapse_alloc_dict(d):
-    ret = []
-    for k in d.keys():
-        ret.extend(d[k])
-    return ret
-
-
-def alloc_links(api, hrn, links_to_add, links_to_drop):
-    slicename=hrn_to_pl_slicename(hrn)
-    for (iface,ip) in links_to_add:
-        node = topology[iface][0][0]
-        try:
-            api.plshell.AddSliceTag(api.plauth, slicename, "ip_addresses", ip, node)
-            api.plshell.AddSliceTag(api.plauth, slicename, "vsys", "getvlan", node)
-        except Exception: 
-            # Probably a duplicate tag. XXX July 21
-            pass
-    return True
-
-def alloc_nodes(api,hrn, requested_ifs):
-    requested_nodes = if_endpoints(requested_ifs)
-    create_slice_max_aggregate(api, hrn, requested_nodes)
-
-# Taken from slices.py
-
-def create_slice_max_aggregate(api, hrn, nodes):
-    # Get the slice record 
-    global topology
-    topology = get_interface_map()
-    slice = {}
-    registries = Registries(api)
-    registry = registries[api.hrn]
-    credential = api.getCredential()
-    urn = hrn_to_urn(hrn, 'slice')
-    records = registry.Resolve(urn, credential)
-    for record in records:
-        if record.get_type() in ['slice']:
-            slice = record.as_dict()
-    if not slice:
-        raise RecordNotFound(hrn)   
-
-    # Make sure slice exists at plc, if it doesnt add it
-    slicename = hrn_to_pl_slicename(hrn)
-    slices = api.plshell.GetSlices(api.plauth, [slicename], ['node_ids'])
-    if not slices:
-        parts = slicename.split("_")
-        login_base = parts[0]
-        # if site doesnt exist add it
-        sites = api.plshell.GetSites(api.plauth, [login_base])
-        if not sites:
-            authority = get_authority(hrn)
-            authority_urn = hrn_to_urn(authority, 'authority')
-            site_records = registry.Resolve(authority_urn, credential)
-            site_record = {}
-            if not site_records:
-                raise RecordNotFound(authority)
-            site_record = site_records[0]
-            site = site_record.as_dict()
-                
-            # add the site
-            site.pop('site_id')
-            site_id = api.plshell.AddSite(api.plauth, site)
-        else:
-            site = sites[0]
-            
-        slice_fields = {}
-        slice_keys = ['name', 'url', 'description']
-        for key in slice_keys:
-            if key in slice and slice[key]:
-                slice_fields[key] = slice[key]  
-        api.plshell.AddSlice(api.plauth, slice_fields)
-        slice = slice_fields
-        slice['node_ids'] = 0
-    else:
-        slice = slices[0]    
-
-    # get the list of valid slice users from the registry and make 
-    # they are added to the slice 
-    researchers = record.get('researcher', [])
-    for researcher in researchers:
-        person_record = {}
-        researcher_urn = hrn_to_urn(researcher, 'user')
-        person_records = registry.Resolve(researcher_urn, credential)
-        for record in person_records:
-            if record.get_type() in ['user']:
-                person_record = record
-        if not person_record:
-            pass
-        person_dict = person_record.as_dict()
-        persons = api.plshell.GetPersons(api.plauth, [person_dict['email']],
-                                         ['person_id', 'key_ids'])
-
-        # Create the person record 
-        if not persons:
-            person_id=api.plshell.AddPerson(api.plauth, person_dict)
-
-            # The line below enables the user account on the remote aggregate
-            # soon after it is created.
-            # without this the user key is not transfered to the slice
-            # (as GetSlivers returns key of only enabled users),
-            # which prevents the user from login to the slice.
-            # We may do additional checks before enabling the user.
-
-            api.plshell.UpdatePerson(api.plauth, person_id, {'enabled' : True})
-            key_ids = []
-        else:
-            key_ids = persons[0]['key_ids']
-
-        api.plshell.AddPersonToSlice(api.plauth, person_dict['email'],
-                                     slicename)        
-
-        # Get this users local keys
-        keylist = api.plshell.GetKeys(api.plauth, key_ids, ['key'])
-        keys = [key['key'] for key in keylist]
-
-        # add keys that arent already there 
-        for personkey in person_dict['keys']:
-            if personkey not in keys:
-                key = {'key_type': 'ssh', 'key': personkey}
-                api.plshell.AddPersonKey(api.plauth, person_dict['email'], key)
-
-    # find out where this slice is currently running
-    nodelist = api.plshell.GetNodes(api.plauth, slice['node_ids'],
-                                    ['hostname'])
-    hostnames = [node['hostname'] for node in nodelist]
-
-    # remove nodes not in rspec
-    deleted_nodes = list(set(hostnames).difference(nodes))
-    # add nodes from rspec
-    added_nodes = list(set(nodes).difference(hostnames))
-
-    api.plshell.AddSliceToNodes(api.plauth, slicename, added_nodes) 
-    api.plshell.DeleteSliceFromNodes(api.plauth, slicename, deleted_nodes)
-
-    return 1
-
-
-def ListResources(api, creds, options, call_id):
-    if Callids().already_handled(call_id): return ""
-    # get slice's hrn from options
-    xrn = options.get('geni_slice_urn', '')
-    hrn, type = urn_to_hrn(xrn)
-    # Eg. config line:
-    # plc.princeton.sapan vlan23,vlan45
-
-    allocations = read_alloc_dict()
-    if (hrn and allocations.has_key(hrn)):
-            ret_rspec = allocations_to_rspec(allocations[hrn])
-    else:
-        ret_rspec = open(SFA_MAX_CANNED_RSPEC).read()
-
-    return (ret_rspec)
-
-
-def CreateSliver(api, xrn, creds, rspec_xml, users, call_id):
-    if Callids().already_handled(call_id): return ""
-
-    global topology
-    hrn = urn_to_hrn(xrn)[0]
-    topology = get_interface_map()
-
-    # Check if everything in rspec is either allocated by hrn
-    # or not allocated at all.
-    r = RSpec()
-    r.parseString(rspec_xml)
-    rspec = r.toDict()
-
-    lock_state_file()
-
-    allocations = read_alloc_dict()
-    requested_allocations = rspec_to_allocations (rspec)
-    current_allocations = collapse_alloc_dict(allocations)
-    try:
-        current_hrn_allocations=allocations[hrn]
-    except KeyError:
-        current_hrn_allocations=[]
-
-    # Check request against current allocations
-    requested_interfaces = map(lambda(elt):elt[0], requested_allocations)
-    current_interfaces = map(lambda(elt):elt[0], current_allocations)
-    current_hrn_interfaces = map(lambda(elt):elt[0], current_hrn_allocations)
-
-    for a in requested_interfaces:
-        if (a not in current_hrn_interfaces and a in current_interfaces):
-            raise SfaOutOfResource(a)
-        if (topology[a][1] not in requested_interfaces):
-            raise SfaNoPairRSpec(a,topology[a][1])
-    # Request OK
-
-    # Allocations to delete
-    allocations_to_delete = []
-    for a in current_hrn_allocations:
-        if (a not in requested_allocations):
-            allocations_to_delete.extend([a])
-
-    # Ok, let's do our thing
-    alloc_nodes(api, hrn, requested_interfaces)
-    alloc_links(api, hrn, requested_allocations, allocations_to_delete)
-    allocations[hrn] = requested_allocations
-    commit_alloc_dict(allocations)
-
-    unlock_state_file()
-
-    # xxx - should return altered rspec 
-    # with enough data for the client to understand what's happened
-    return rspec_xml
-
-def rspec_to_allocations(rspec):
-    ifs = []
-    try:
-        ifspecs = rspec['rspec']['request'][0]['ifspec']
-        for l in ifspecs:
-            ifs.extend([(l['name'].replace('tns:',''),l['ip'])])
-    except KeyError:
-        # Bad RSpec
-        pass
-    return ifs
-
-def main():
-    t = get_interface_map()
-    r = RSpec()
-    rspec_xml = open(sys.argv[1]).read()
-    #ListResources(None,'foo')
-    CreateSliver(None, "plc.princeton.sap0", rspec_xml, 'call-id-sap0')
-    
-if __name__ == "__main__":
-    main()
+-from sfa.util.rspec import RSpec
+-import sys
+-import pdb
+-from sfa.util.namespace import *
+-from sfa.util.rspec import *
+-from sfa.util.specdict import *
+-from sfa.util.faults import *
+-from sfa.util.storage import *
+-from sfa.util.policy import Policy
+-from sfa.util.debug import log
+-from sfa.server.aggregate import Aggregates
++from sfa.util.xrn import urn_to_hrn, hrn_to_urn, get_authority
++from sfa.util.plxrn import hrn_to_pl_slicename
++from sfa.util.plxrn import hrn_to_pl_slicename
+ from sfa.server.registry import Registries
++from sfa.util.rspec import RSpec
++from sfa.util.sfalogging import sfa_logger
+ from sfa.util.faults import *
+-
+-import xml.dom.minidom
+-
+-SFA_MAX_CONF_FILE = '/etc/sfa/max_allocations'
+-SFA_MAX_DEFAULT_RSPEC = '/etc/sfa/max_physical.xml'
+-SFA_MAX_CANNED_RSPEC = '/etc/sfa/max_physical_canned.xml'
+-
+-topology = {}
+-
+-class SfaOutOfResource(SfaFault):
+-    def __init__(self, interface):
+-        faultString = "Interface " + interface + " not available"
+-        SfaFault.__init__(self, 100, faultString, '')
+-
+-class SfaNoPairRSpec(SfaFault):
+-    def __init__(self, interface, interface2):
+-        faultString = "Interface " + interface + " should be paired with " + interface2
+-        SfaFault.__init__(self, 100, faultString, '')
+-
+-# Returns a mapping from interfaces to the nodes they lie on and their peer interfaces
+-# i -> node,i_peer
+-
+-def get_interface_map():
+-    r = RSpec()
+-    r.parseFile(SFA_MAX_DEFAULT_RSPEC)
+-    rspec = r.toDict()
+-    capacity = rspec['rspec']['capacity']
+-    netspec = capacity[0]['netspec'][0]
+-    linkdefs = {}
+-    for n in netspec['nodespec']:
+-        ifspecs = n['ifspec']
+-        nodename = n['node']
+-        for i in ifspecs:
+-            ifname = i['name']
+-            linkid = i['linkid']
+-
+-            if (linkdefs.has_key(linkid)):
+-                linkdefs[linkid].extend([(nodename,ifname)])
+-            else:
+-                linkdefs[linkid]=[(nodename,ifname)]
+-    
+-    # topology maps interface x interface -> link,node1,node2
+-    topology={}
+-
+-    for k in linkdefs.keys():
+-        (n1,i1) = linkdefs[k][0]
+-        (n2,i2) = linkdefs[k][1]
+-
+-        topology[i1] = (n1, i2)
+-        topology[i2] = (n2, i1)
+-        
+-
+-    return topology    
+-
+-    
+-def allocations_to_rspec(allocations):
+-    rspec = xml.dom.minidom.parse(SFA_MAX_DEFAULT_RSPEC)
+-    req = rspec.firstChild.appendChild(rspec.createElement("request"))
+-    for (iname,ip) in allocations:
+-        ifspec = req.appendChild(rspec.createElement("ifspec"))
+-        ifspec.setAttribute("name","tns:"+iname)
+-        ifspec.setAttribute("ip",ip)
+-
+-    return rspec.toxml()
+-        
+-    
+-def if_endpoints(ifs):
+-    nodes=[]
+-    for l in ifs:
+-        nodes.extend(topology[l][0])
+-    return nodes
+-
+-def lock_state_file():
+-    # Noop for demo
+-    return True
+-
+-def unlock_state_file():
+-    return True
+-    # Noop for demo
+-
+-def read_alloc_dict():
+-    alloc_dict={}
+-    rows = open(SFA_MAX_CONF_FILE).read().split('\n')
+-    for r in rows:
+-        columns = r.split(' ')
+-        if (len(columns)==2):
+-            hrn = columns[0]
+-            allocs = columns[1].split(',')
+-            ipallocs = map(lambda alloc:alloc.split('/'), allocs)
+-            alloc_dict[hrn]=ipallocs
+-    return alloc_dict
+-
+-def commit_alloc_dict(d):
+-    f = open(SFA_MAX_CONF_FILE, 'w')
+-    for hrn in d.keys():
+-        columns = d[hrn]
+-        ipcolumns = map(lambda x:"/".join(x), columns)
+-        row = hrn+' '+','.join(ipcolumns)+'\n'
+-        f.write(row)
+-    f.close()
+-
+-def collapse_alloc_dict(d):
+-    ret = []
+-    for k in d.keys():
+-        ret.extend(d[k])
++from sfa.util.config import Config
++from sfa.managers.aggregate_manager_pl import GetVersion
++import os
++import time
++
++RSPEC_TMP_FILE_PREFIX = "/tmp/max_rspec"
++
++# execute shell command and return both exit code and text output
++def shell_execute(cmd, timeout):
++    pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
++    pipe = os.popen(cmd + ' 2>&1', 'r')
++    text = ''
++    while timeout:
++        line = pipe.read()
++        text += line
++        time.sleep(1)
++        timeout = timeout-1
++    code = pipe.close()
++    if code is None: code = 0
++    if text[-1:] == '\n': text = text[:-1]
++    return code, text
++
++"""
++ call AM API client with command like in the following example:
++ cd aggregate_client; java -classpath AggregateWS-client-api.jar:lib/* \
++      net.geni.aggregate.client.examples.CreateSliceNetworkClient \
++      ./repo https://geni:8443/axis2/services/AggregateGENI \
++      ... params ...
++"""
++
++def call_am_apiclient(client_app, params, timeout):
++    (client_path, am_url) = Config().get_max_aggrMgr_info()
++    sys_cmd = "cd " + client_path + "; java -classpath AggregateWS-client-api.jar:lib/* net.geni.aggregate.client.examples." + client_app + " ./repo " + am_url + " " + ' '.join(params)
++    ret = shell_execute(sys_cmd, timeout)
++    sfa_logger().debug("shell_execute cmd: %s returns %s" % (sys_cmd, ret))
+     return ret
+-
+-def alloc_links(api, hrn, links_to_add, links_to_drop):
+-    slicename=hrn_to_pl_slicename(hrn)
+-    for (iface,ip) in links_to_add:
+-        node = topology[iface][0][0]
+-        try:
+-            api.plshell.AddSliceTag(api.plauth, slicename, "ip_addresses", ip, node)
+-            api.plshell.AddSliceTag(api.plauth, slicename, "vsys", "getvlan", node)
+-        except Exception: 
+-            # Probably a duplicate tag. XXX July 21
+-            pass
++# save request RSpec xml content to a tmp file
++def save_rspec_to_file(rspec):
++    path = RSPEC_TMP_FILE_PREFIX + "_" + time.strftime('%Y%m%dT%H:%M:%S', time.gmtime(time.time())) +".xml"
++    file = open(path, "w")
++    file.write(rspec)
++    file.close()
++    return path
++
++# get stripped down slice id/name plc:maxpl:xi_slice1 --> xi_slice1
++def get_short_slice_id(cred, hrn):
++    if hrn == None:
++        return None
++    slice_id = hrn[hrn.rfind('+')+1:]
++    if slice_id == None:
++        slice_id = hrn[hrn.rfind(':')+1:]
++    if slice_id == None:
++       return hrn
++       pass
++    return str(slice_id)
++
++# extract xml 
++def get_xml_by_tag(text, tag):
++    indx1 = text.find('<'+tag)
++    indx2 = text.find('/'+tag+'>')
++    xml = None
++    if indx1!=-1 and indx2>indx1:
++        xml = text[indx1:indx2+len(tag)+2]
++    return xml
++
++def create_slice(api, xrn, cred, rspec, users):
++    indx1 = rspec.find("<RSpec")
++    indx2 = rspec.find("</RSpec>")
++    if indx1 > -1 and indx2 > indx1:
++        rspec = rspec[indx1+len("<RSpec type=\"SFA\">"):indx2-1]
++    rspec_path = save_rspec_to_file(rspec)
++    (ret, output) = call_am_apiclient("CreateSliceNetworkClient", [rspec_path,], 3)
++    # parse output ?
++    rspec = "<RSpec type=\"SFA\"> Done! </RSpec>"
+     return True
+-def alloc_nodes(api,hrn, requested_ifs):
+-    requested_nodes = if_endpoints(requested_ifs)
+-    create_slice_max_aggregate(api, hrn, requested_nodes)
+-
+-# Taken from slices.py
+-
+-def create_slice_max_aggregate(api, hrn, nodes):
+-    # Get the slice record 
+-    global topology
+-    topology = get_interface_map()
+-    slice = {}
+-    registries = Registries(api)
+-    registry = registries[api.hrn]
+-    credential = api.getCredential()
+-    records = registry.resolve(credential, hrn)
+-    for record in records:
+-        if record.get_type() in ['slice']:
+-            slice = record.as_dict()
+-    if not slice:
+-        raise RecordNotFound(hrn)   
+-
+-    # Make sure slice exists at plc, if it doesnt add it
+-    slicename = hrn_to_pl_slicename(hrn)
+-    slices = api.plshell.GetSlices(api.plauth, [slicename], ['node_ids'])
+-    if not slices:
+-        parts = slicename.split("_")
+-        login_base = parts[0]
+-        # if site doesnt exist add it
+-        sites = api.plshell.GetSites(api.plauth, [login_base])
+-        if not sites:
+-            authority = get_authority(hrn)
+-            site_records = registry.resolve(credential, authority)
+-            site_record = {}
+-            if not site_records:
+-                raise RecordNotFound(authority)
+-            site_record = site_records[0]
+-            site = site_record.as_dict()
+-                
+-            # add the site
+-            site.pop('site_id')
+-            site_id = api.plshell.AddSite(api.plauth, site)
+-        else:
+-            site = sites[0]
+-            
+-        slice_fields = {}
+-        slice_keys = ['name', 'url', 'description']
+-        for key in slice_keys:
+-            if key in slice and slice[key]:
+-                slice_fields[key] = slice[key]  
+-        api.plshell.AddSlice(api.plauth, slice_fields)
+-        slice = slice_fields
+-        slice['node_ids'] = 0
+-    else:
+-        slice = slices[0]    
+-
+-    # get the list of valid slice users from the registry and make 
+-    # they are added to the slice 
+-    researchers = record.get('researcher', [])
+-    for researcher in researchers:
+-        person_record = {}
+-        person_records = registry.resolve(credential, researcher)
+-        for record in person_records:
+-            if record.get_type() in ['user']:
+-                person_record = record
+-        if not person_record:
+-            pass
+-        person_dict = person_record.as_dict()
+-        persons = api.plshell.GetPersons(api.plauth, [person_dict['email']],
+-                                         ['person_id', 'key_ids'])
+-
+-        # Create the person record 
+-        if not persons:
+-            person_id=api.plshell.AddPerson(api.plauth, person_dict)
+-
+-            # The line below enables the user account on the remote aggregate
+-            # soon after it is created.
+-            # without this the user key is not transfered to the slice
+-            # (as GetSlivers returns key of only enabled users),
+-            # which prevents the user from login to the slice.
+-            # We may do additional checks before enabling the user.
+-
+-            api.plshell.UpdatePerson(api.plauth, person_id, {'enabled' : True})
+-            key_ids = []
+-        else:
+-            key_ids = persons[0]['key_ids']
+-
+-        api.plshell.AddPersonToSlice(api.plauth, person_dict['email'],
+-                                     slicename)        
+-
+-        # Get this users local keys
+-        keylist = api.plshell.GetKeys(api.plauth, key_ids, ['key'])
+-        keys = [key['key'] for key in keylist]
+-
+-        # add keys that arent already there 
+-        for personkey in person_dict['keys']:
+-            if personkey not in keys:
+-                key = {'key_type': 'ssh', 'key': personkey}
+-                api.plshell.AddPersonKey(api.plauth, person_dict['email'], key)
+-
+-    # find out where this slice is currently running
+-    nodelist = api.plshell.GetNodes(api.plauth, slice['node_ids'],
+-                                    ['hostname'])
+-    hostnames = [node['hostname'] for node in nodelist]
+-
+-    # remove nodes not in rspec
+-    deleted_nodes = list(set(hostnames).difference(nodes))
+-    # add nodes from rspec
+-    added_nodes = list(set(nodes).difference(hostnames))
+-
+-    api.plshell.AddSliceToNodes(api.plauth, slicename, added_nodes) 
+-    api.plshell.DeleteSliceFromNodes(api.plauth, slicename, deleted_nodes)
+-
++def delete_slice(api, xrn, cred):
++    slice_id = get_short_slice_id(cred, xrn)
++    (ret, output) = call_am_apiclient("DeleteSliceNetworkClient", [slice_id,], 3)
++    # parse output ?
+     return 1
+-
+-def get_rspec(api, creds, options):
+-    # get slice's hrn from options
+-    xrn = options.get('geni_slice_urn', None)
+-    hrn, type = urn_to_hrn(xrn)
+-    # Eg. config line:
+-    # plc.princeton.sapan vlan23,vlan45
+-
+-    allocations = read_alloc_dict()
+-    if (hrn and allocations.has_key(hrn)):
+-            ret_rspec = allocations_to_rspec(allocations[hrn])
++def get_rspec(api, cred, options):
++    #geni_slice_urn: urn:publicid:IDN+plc:maxpl+slice+xi_rspec_test1
++    urn = options.get('geni_slice_urn')
++    slice_id = get_short_slice_id(cred, urn)
++    if slice_id == None:
++        (ret, output) = call_am_apiclient("GetResourceTopology", ['all', '\"\"'], 5)
+     else:
+-        ret_rspec = open(SFA_MAX_CANNED_RSPEC).read()
+-
+-    return (ret_rspec)
+-
+-
+-def create_slice(api, xrn, creds, rspec_xml, users):
+-    global topology
+-    hrn = urn_to_hrn(xrn)[0]
+-    topology = get_interface_map()
+-
+-    # Check if everything in rspec is either allocated by hrn
+-    # or not allocated at all.
+-    r = RSpec()
+-    r.parseString(rspec_xml)
+-    rspec = r.toDict()
+-
+-    lock_state_file()
+-
+-    allocations = read_alloc_dict()
+-    requested_allocations = rspec_to_allocations (rspec)
+-    current_allocations = collapse_alloc_dict(allocations)
+-    try:
+-        current_hrn_allocations=allocations[hrn]
+-    except KeyError:
+-        current_hrn_allocations=[]
+-
+-    # Check request against current allocations
+-    requested_interfaces = map(lambda(elt):elt[0], requested_allocations)
+-    current_interfaces = map(lambda(elt):elt[0], current_allocations)
+-    current_hrn_interfaces = map(lambda(elt):elt[0], current_hrn_allocations)
+-
+-    for a in requested_interfaces:
+-        if (a not in current_hrn_interfaces and a in current_interfaces):
+-            raise SfaOutOfResource(a)
+-        if (topology[a][1] not in requested_interfaces):
+-            raise SfaNoPairRSpec(a,topology[a][1])
+-    # Request OK
+-
+-    # Allocations to delete
+-    allocations_to_delete = []
+-    for a in current_hrn_allocations:
+-        if (a not in requested_allocations):
+-            allocations_to_delete.extend([a])
+-
+-    # Ok, let's do our thing
+-    alloc_nodes(api, hrn, requested_interfaces)
+-    alloc_links(api, hrn, requested_allocations, allocations_to_delete)
+-    allocations[hrn] = requested_allocations
+-    commit_alloc_dict(allocations)
+-
+-    unlock_state_file()
+-
+-    return True
+-
+-def rspec_to_allocations(rspec):
+-    ifs = []
+-    try:
+-        ifspecs = rspec['rspec']['request'][0]['ifspec']
+-        for l in ifspecs:
+-            ifs.extend([(l['name'].replace('tns:',''),l['ip'])])
+-    except KeyError:
+-        # Bad RSpec
+-        pass
+-    return ifs
++        (ret, output) = call_am_apiclient("GetResourceTopology", ['all', slice_id,], 5)
++    # parse output into rspec XML
++    if output.find("No resouce found") > 0:
++        rspec = "<RSpec type=\"SFA\"> <Fault>No resource found</Fault> </RSpec>"
++    else:
++        comp_rspec = get_xml_by_tag(output, 'computeResource')
++        sfa_logger().debug("#### computeResource %s" % comp_rspec)
++        topo_rspec = get_xml_by_tag(output, 'topology')
++        sfa_logger().debug("#### topology %s" % topo_rspec)
++        rspec = "<RSpec type=\"SFA\"> <network name=\"" + Config().get_interface_hrn() + "\">";
++        if comp_rspec != None:
++            rspec = rspec + get_xml_by_tag(output, 'computeResource')
++        if topo_rspec != None:
++            rspec = rspec + get_xml_by_tag(output, 'topology')
++        rspec = rspec + "</network> </RSpec>"
++
++    return (rspec)
++
++def start_slice(api, xrn, cred):
++    # service not supported
++    return None
++
++def stop_slice(api, xrn, cred):
++    # service not supported
++    return None
++
++def reset_slices(api, xrn):
++    # service not supported
++    return None
++
++"""
++Returns the request context required by sfatables. At some point, this mechanism should be changed
++to refer to "contexts", which is the information that sfatables is requesting. But for now, we just
++return the basic information needed in a dict.
++"""
++def fetch_context(slice_hrn, user_hrn, contexts):
++    base_context = {'sfa':{'user':{'hrn':user_hrn}}}
++    return base_context
+ def main():
+-    t = get_interface_map()
++    api = SfaAPI()
+     r = RSpec()
+     rspec_xml = open(sys.argv[1]).read()
+-    #get_rspec(None,'foo')
+-    create_slice(None, "plc.princeton.sap0", rspec_xml)
+-    
++    create_slice(api, "plc.maxpl.test000", None, rspec_xml, None)
++
+ if __name__ == "__main__":
+     main()
index d4781f3..a5d5f2a 100644 (file)
@@ -99,7 +99,8 @@ class SfaAPI(BaseAPI):
         self.credential = None
         # Initialize the PLC shell only if SFA wraps a myPLC
         rspec_type = self.config.get_aggregate_type()
-        if (rspec_type == 'pl' or rspec_type == 'vini' or rspec_type == 'eucalyptus'):
+        if (rspec_type == 'pl' or rspec_type == 'vini' or \
+            rspec_type == 'eucalyptus' or rspec_type == 'max'):
             self.plshell = self.getPLCShell()
             self.plshell_version = "4.3"
 
index a3fe098..0f513ec 100644 (file)
@@ -98,6 +98,12 @@ class Config:
         else:
             return "pl"
 
+    def get_interface_hrn(self):
+        if (hasattr(self,'SFA_INTERFACE_HRN')):
+            return self.SFA_INTERFACE_HRN
+        else:
+            return "plc"
+
     def get_plc_dbinfo(self):
         return {
             'dbname' : self.SFA_PLC_DB_NAME,
@@ -107,6 +113,18 @@ class Config:
             'password' : self.SFA_PLC_DB_PASSWORD
             }
 
+    # TODO: find a better place to put this method
+    def get_max_aggrMgr_info(self):
+        am_apiclient_path = '/usr/local/MAXGENI_AM_APIClient'
+        if (hasattr(self,'MAXGENI_AM_APICLIENT_PATH')):
+            am_client_path = self.MAXGENI_AM_APICLIENT_PATH
+
+        am_url = 'https://geni.dragon.maxgigapop.net:8443/axis2/services/AggregateGENI'
+        if (hasattr(self,'MAXGENI_AM_URL')):
+            am_url = self.MAXGENI_AM_URL
+
+        return (am_apiclient_path,am_url)
+
     ##
     # SFA uses a PLCAPI connection to perform operations on the registry,
     # such as creating and deleting slices. This connection requires an account