These files have moved to sfa.managers.vini
authorAndy Bavier <acb@cs.princeton.edu>
Thu, 28 Jan 2010 16:28:35 +0000 (16:28 +0000)
committerAndy Bavier <acb@cs.princeton.edu>
Thu, 28 Jan 2010 16:28:35 +0000 (16:28 +0000)
sfa/rspecs/aggregates/rspec_manager_vini.py [deleted file]
sfa/rspecs/aggregates/vini/__init__.py [deleted file]
sfa/rspecs/aggregates/vini/request.xml [deleted file]
sfa/rspecs/aggregates/vini/rspec.py [deleted file]
sfa/rspecs/aggregates/vini/topology.py [deleted file]
sfa/rspecs/aggregates/vini/utils.py [deleted file]
sfa/rspecs/aggregates/vini/vini.rnc [deleted file]
sfa/rspecs/aggregates/vini/vini.xml [deleted file]

diff --git a/sfa/rspecs/aggregates/rspec_manager_vini.py b/sfa/rspecs/aggregates/rspec_manager_vini.py
deleted file mode 100644 (file)
index 20dc7fb..0000000
+++ /dev/null
@@ -1,206 +0,0 @@
-from sfa.util.faults import *
-from sfa.util.namespace import *
-from sfa.util.rspec import RSpec
-from sfa.server.registry import Registries
-from sfa.plc.nodes import *
-from sfa.rspecs.aggregates.vini.utils import *
-from sfa.rspecs.aggregates.vini.rspec import *
-import sys
-
-SFA_VINI_WHITELIST = '/etc/sfa/vini.whitelist'
-
-"""
-Copied from create_slice_aggregate() in sfa.plc.slices
-"""
-def create_slice_vini_aggregate(api, hrn, nodes):
-    # Get the slice record from SFA
-    slice = {}
-    registries = Registries(api)
-    registry = registries[api.hrn]
-    credential = api.getCredential()
-    records = registry.resolve(credential, hrn)
-    for record in records:
-        if record['type'] in ['slice']:
-            slice = record
-    if not slice:
-        raise RecordNotFound(hrn)   
-
-    # Make sure slice exists at plc, if it doesnt add it
-    slicename = hrn_to_pl_slicename(hrn)
-    slices = api.plshell.GetSlices(api.plauth, [slicename], ['node_ids'])
-    if not slices:
-        parts = slicename.split("_")
-        login_base = parts[0]
-        # if site doesnt exist add it
-        sites = api.plshell.GetSites(api.plauth, [login_base])
-        if not sites:
-            authority = get_authority(hrn)
-            site_records = registry.resolve(credential, authority)
-            site_record = {}
-            if not site_records:
-                raise RecordNotFound(authority)
-            site = site_records[0]
-                
-            # add the site
-            site.pop('site_id')
-            site_id = api.plshell.AddSite(api.plauth, site)
-        else:
-            site = sites[0]
-            
-        slice_fields = {}
-        slice_keys = ['name', 'url', 'description']
-        for key in slice_keys:
-            if key in slice and slice[key]:
-                slice_fields[key] = slice[key]  
-        api.plshell.AddSlice(api.plauth, slice_fields)
-        slice = slice_fields
-        slice['node_ids'] = 0
-    else:
-        slice = slices[0]    
-
-    # get the list of valid slice users from the registry and make 
-    # they are added to the slice 
-    researchers = record.get('researcher', [])
-    for researcher in researchers:
-        person_record = {}
-        person_records = registry.resolve(credential, researcher)
-        for record in person_records:
-            if record['type'] in ['user']:
-                person_record = record
-        if not person_record:
-            pass
-        person_dict = person_record
-        persons = api.plshell.GetPersons(api.plauth, [person_dict['email']],
-                                         ['person_id', 'key_ids'])
-
-        # Create the person record 
-        if not persons:
-            person_id=api.plshell.AddPerson(api.plauth, person_dict)
-
-            # The line below enables the user account on the remote aggregate
-            # soon after it is created.
-            # without this the user key is not transfered to the slice
-            # (as GetSlivers returns key of only enabled users),
-            # which prevents the user from login to the slice.
-            # We may do additional checks before enabling the user.
-
-            api.plshell.UpdatePerson(api.plauth, person_id, {'enabled' : True})
-            key_ids = []
-        else:
-            key_ids = persons[0]['key_ids']
-
-        api.plshell.AddPersonToSlice(api.plauth, person_dict['email'],
-                                     slicename)        
-
-        # Get this users local keys
-        keylist = api.plshell.GetKeys(api.plauth, key_ids, ['key'])
-        keys = [key['key'] for key in keylist]
-
-        # add keys that arent already there 
-        for personkey in person_dict['keys']:
-            if personkey not in keys:
-                key = {'key_type': 'ssh', 'key': personkey}
-                api.plshell.AddPersonKey(api.plauth, person_dict['email'], key)
-
-    # find out where this slice is currently running
-    nodelist = api.plshell.GetNodes(api.plauth, slice['node_ids'],
-                                    ['hostname'])
-    hostnames = [node['hostname'] for node in nodelist]
-
-    # remove nodes not in rspec
-    deleted_nodes = list(set(hostnames).difference(nodes))
-    # add nodes from rspec
-    added_nodes = list(set(nodes).difference(hostnames))
-
-    """
-    print >> sys.stderr, "Slice on nodes:"
-    for n in hostnames:
-        print >> sys.stderr, n
-    print >> sys.stderr, "Wants nodes:"
-    for n in nodes:
-        print >> sys.stderr, n
-    print >> sys.stderr, "Deleting nodes:"
-    for n in deleted_nodes:
-        print >> sys.stderr, n
-    print >> sys.stderr, "Adding nodes:"
-    for n in added_nodes:
-        print >> sys.stderr, n
-    """
-
-    api.plshell.AddSliceToNodes(api.plauth, slicename, added_nodes) 
-    api.plshell.DeleteSliceFromNodes(api.plauth, slicename, deleted_nodes)
-
-    return 1
-
-def get_rspec(api, hrn):
-    topo = Topology(api)      
-    if (hrn):
-        slicename = hrn_to_pl_slicename(hrn)
-        slice = get_slice(api, slicename)
-        if slice:
-            slice.hrn = hrn
-            topo.nodeTopoFromSliceTags(slice)
-        else:
-            # call the default sfa.plc.nodes.get_rspec() method
-            return Nodes(api).get_rspec(hrn)     
-
-    return topo.toxml(hrn)
-
-
-
-"""
-Hook called via 'sfi.py create'
-"""
-def create_slice(api, hrn, xml):
-    ### Check the whitelist
-    ### It consists of lines of the form: <slice hrn> <bw>
-    whitelist = {}
-    f = open(SFA_VINI_WHITELIST)
-    for line in f.readlines():
-        (slice, maxbw) = line.split()
-        whitelist[slice] = maxbw
-        
-    if hrn in whitelist:
-        maxbw = whitelist[hrn]
-    else:
-        raise PermissionError("%s not in VINI whitelist" % hrn)
-        
-    rspec = RSpec(xml)
-    topo = Topology(api)
-    
-    topo.nodeTopoFromRSpec(rspec)
-
-    # Check request against current allocations
-    topo.verifyNodeTopo(hrn, topo, maxbw)
-    
-    nodes = topo.nodesInTopo()
-    hostnames = []
-    for node in nodes:
-        hostnames.append(node.hostname)
-    create_slice_vini_aggregate(api, hrn, hostnames)
-
-    slicename = hrn_to_pl_slicename(hrn)
-    slice = get_slice(api, slicename)
-    if slice:
-        topo.updateSliceTags(slice)    
-
-    return True
-
-"""
-Returns the request context required by sfatables. At some point, this mechanism should be changed
-to refer to "contexts", which is the information that sfatables is requesting. But for now, we just
-return the basic information needed in a dict.
-"""
-def fetch_context(slice_hrn, user_hrn, contexts):
-    base_context = {'sfa':{'user':{'hrn':user_hrn},
-                           'slice':{'hrn':slice_hrn}}}
-    return base_context
-
-def main():
-    r = RSpec()
-    r.parseFile(sys.argv[1])
-    rspec = r.toDict()
-    create_slice(None,'plc',rspec)
-
-if __name__ == "__main__":
-    main()
diff --git a/sfa/rspecs/aggregates/vini/__init__.py b/sfa/rspecs/aggregates/vini/__init__.py
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/sfa/rspecs/aggregates/vini/request.xml b/sfa/rspecs/aggregates/vini/request.xml
deleted file mode 100644 (file)
index ffe034c..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-<?xml version="1.0"?>
-<RSpec type="VINI">
-  <request>
-    <sliver nodeid="n18"/>
-    <sliver nodeid="n20"/>
-    <sliver nodeid="n22"/>
-    <sliver nodeid="n26"/>
-    <sliver nodeid="n28"/>
-    <sliver nodeid="n30"/>
-    <sliver nodeid="n32"/>
-    <sliver nodeid="n34"/>
-    <sliver nodeid="n36"/>
-    <vlink endpoints="n18 n22"/>
-    <vlink endpoints="n18 n26"/>
-    <vlink endpoints="n18 n28"/>
-    <vlink endpoints="n20 n22"/>
-    <vlink endpoints="n22 n26"/>
-    <vlink endpoints="n26 n30"/>
-    <vlink endpoints="n28 n30"/>
-    <vlink endpoints="n28 n32"/>
-    <vlink endpoints="n30 n36"/>
-    <vlink endpoints="n34 n36"/>
-    <vlink endpoints="n32 n36"/>
-    <vlink endpoints="n32 n34"/>
-  </request>
-</RSpec>
diff --git a/sfa/rspecs/aggregates/vini/rspec.py b/sfa/rspecs/aggregates/vini/rspec.py
deleted file mode 100644 (file)
index d391a71..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-from sfa.util.rspec import RSpec
-from sfa.rspecs.aggregates.vini.utils import *
-import sys
-
-SFA_VINI_DEFAULT_RSPEC = '/etc/sfa/vini.rspec'
-
-class ViniRSpec(RSpec):
-    def __init__(self, xml = None, xsd = None, NSURL = None):
-        RSpec.__init__(self, xml, xsd, NSURL)
-        if not xml:
-            self.parseFile(SFA_VINI_DEFAULT_RSPEC)
-        
-    def updateCapacity(self, topo):
-        d = self.toDict()
-        sitespecs = []
-        sitelinkspecs = []
-        for site in topo.getSites():
-            if not site.public:
-                continue
-            sdict = {}
-            nodespecs = []
-            for node in site.get_sitenodes(topo.nodes):
-                if not node.tag:
-                    continue
-                ndict = {}
-                ndict['hostname'] = [node.hostname]
-                ndict['name'] = node.tag
-                ndict['kbps'] = [int(node.bps/1000)] 
-                nodespecs.append(ndict)
-            sdict['NodeSpec'] = nodespecs
-            sdict['name'] = site.name
-            sitespecs.append(sdict)
-            
-            for sl in site.links:
-                if sl.end1 == site:
-                    sldict = {}
-                    sldict['endpoint'] = [sl.end1.name, sl.end2.name]
-                    sldict['kbps'] = [int(sl.bps/1000)]
-                    sitelinkspecs.append(sldict)
-                    
-        d['RSpec']['Capacity'][0]['NetSpec'][0]['SiteSpec'] = sitespecs
-        d['RSpec']['Capacity'][0]['NetSpec'][0]['SiteLinkSpec'] = sitelinkspecs
-        self.parseDict(d)
-
-
-    def updateRequest(self, slice, topo):
-        linkspecs = []
-        for link in topo.nodelinks:
-            edict = {}
-            edict['endpoint'] = [link.end1.tag, link.end2.tag]
-            edict['kbps'] = [int(link.bps/1000)]
-            linkspecs.append(edict)
-
-        d = self.toDict()
-        d['RSpec']['Request'][0]['NetSpec'][0]['LinkSpec'] = linkspecs
-        d['RSpec']['Request'][0]['NetSpec'][0]['name'] = slice.hrn
-        self.parseDict(d)
diff --git a/sfa/rspecs/aggregates/vini/topology.py b/sfa/rspecs/aggregates/vini/topology.py
deleted file mode 100755 (executable)
index cb65fb5..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/python
-
-# $Id: topology.py 14181 2009-07-01 19:46:07Z acb $
-# $URL: https://svn.planet-lab.org/svn/NodeManager-topo/trunk/topology.py $
-
-#
-# Links in the physical topology, gleaned from looking at the Internet2
-# and NLR topology maps.  Link (a, b) connects sites with IDs a and b.
-#
-PhysicalLinks = [(2, 12),  # I2 Princeton - New York 
-         (4, 5),   # NLR Chicago - Houston
-         (4, 6),   # NLR Chicago - Atlanta
-         (4, 7),   # NLR Chicago - Seattle
-         (4, 9),   # NLR Chicago - New York
-         (4, 10),  # NLR Chicago - Wash DC
-         (5, 6),   # NLR Houston - Atlanta
-         (5, 8),   # NLR Houston - Los Angeles
-         (6, 10),  # NLR Atlanta - Wash DC
-         (6, 14),  # NLR Atlanta - Ga Tech
-         (7, 8),   # NLR Seattle - Los Angeles
-         (9, 10),  # NLR New York - Wash DC
-         (11, 13), # I2 Chicago - Wash DC
-         (11, 15), # I2 Chicago - Atlanta
-         (11, 16), # I2 Chicago - CESNET
-         (11, 17), # I2 Chicago - Kansas City
-         (12, 13), # I2 New York - Wash DC
-         (13, 15), # I2 Wash DC - Atlanta
-         (14, 15), # Ga Tech - I2 Atlanta
-         (15, 19), # I2 Atlanta - Houston
-         (17, 19), # I2 Kansas City - Houston
-         (17, 22), # I2 Kansas City - Salt Lake City
-         (17, 24), # I2 Kansas City - UMKC
-         (19, 20), # I2 Houston - Los Angeles
-         (20, 21), # I2 Los Angeles - Seattle
-         (20, 22), # I2 Los Angeles - Salt Lake City
-         (21, 22)] # I2 Seattle - Salt Lake City
-
-
diff --git a/sfa/rspecs/aggregates/vini/utils.py b/sfa/rspecs/aggregates/vini/utils.py
deleted file mode 100644 (file)
index 6078125..0000000
+++ /dev/null
@@ -1,719 +0,0 @@
-from __future__ import with_statement
-import re
-import socket
-from sfa.util.faults import *
-from sfa.rspecs.aggregates.vini.topology import *
-from xmlbuilder import XMLBuilder
-from lxml import etree
-import sys
-from StringIO import StringIO
-
-VINI_RELAXNG_SCHEMA = "/var/www/html/schemas/vini.rng"
-
-# Taken from bwlimit.py
-#
-# See tc_util.c and http://physics.nist.gov/cuu/Units/binary.html. Be
-# warned that older versions of tc interpret "kbps", "mbps", "mbit",
-# and "kbit" to mean (in this system) "kibps", "mibps", "mibit", and
-# "kibit" and that if an older version is installed, all rates will
-# be off by a small fraction.
-suffixes = {
-    "":         1,
-    "bit":     1,
-    "kibit":   1024,
-    "kbit":    1000,
-    "mibit":   1024*1024,
-    "mbit":    1000000,
-    "gibit":   1024*1024*1024,
-    "gbit":    1000000000,
-    "tibit":   1024*1024*1024*1024,
-    "tbit":    1000000000000,
-    "bps":     8,
-    "kibps":   8*1024,
-    "kbps":    8000,
-    "mibps":   8*1024*1024,
-    "mbps":    8000000,
-    "gibps":   8*1024*1024*1024,
-    "gbps":    8000000000,
-    "tibps":   8*1024*1024*1024*1024,
-    "tbps":    8000000000000
-}
-
-
-def get_tc_rate(s):
-    """
-    Parses an integer or a tc rate string (e.g., 1.5mbit) into bits/second
-    """
-
-    if type(s) == int:
-        return s
-    m = re.match(r"([0-9.]+)(\D*)", s)
-    if m is None:
-        return -1
-    suffix = m.group(2).lower()
-    if suffixes.has_key(suffix):
-        return int(float(m.group(1)) * suffixes[suffix])
-    else:
-        return -1
-
-def format_tc_rate(rate):
-    """
-    Formats a bits/second rate into a tc rate string
-    """
-
-    if rate >= 1000000000 and (rate % 1000000000) == 0:
-        return "%.0fgbit" % (rate / 1000000000.)
-    elif rate >= 1000000 and (rate % 1000000) == 0:
-        return "%.0fmbit" % (rate / 1000000.)
-    elif rate >= 1000:
-        return "%.0fkbit" % (rate / 1000.)
-    else:
-        return "%.0fbit" % rate
-
-
-class Node:
-    def __init__(self, node, bps = 1000 * 1000000):
-        self.id = node['node_id']
-        self.idtag = "n%s" % self.id
-        self.hostname = node['hostname']
-        self.name = self.shortname = self.hostname.replace('.vini-veritas.net', '')
-        self.site_id = node['site_id']
-        self.ipaddr = socket.gethostbyname(self.hostname)
-        self.bps = bps
-        self.links = set()
-        self.sliver = False
-
-    def get_link_id(self, remote):
-        if self.id < remote.id:
-            link = (self.id<<7) + remote.id
-        else:
-            link = (remote.id<<7) + self.id
-        return link
-        
-    def get_iface_id(self, remote):
-        if self.id < remote.id:
-            iface = 1
-        else:
-            iface = 2
-        return iface
-    
-    def get_virt_ip(self, remote):
-        link = self.get_link_id(remote)
-        iface = self.get_iface_id(remote)
-        first = link >> 6
-        second = ((link & 0x3f)<<2) + iface
-        return "192.168.%d.%d" % (first, second)
-
-    def get_virt_net(self, remote):
-        link = self.get_link_id(remote)
-        first = link >> 6
-        second = (link & 0x3f)<<2
-        return "192.168.%d.%d/30" % (first, second)
-        
-    def get_site(self, sites):
-        return sites[self.site_id]
-    
-    def get_topo_rspec(self, link):
-        if link.end1 == self:
-            remote = link.end2
-        elif link.end2 == self:
-            remote = link.end1
-        else:
-            raise Error("Link does not connect to Node")
-            
-        my_ip = self.get_virt_ip(remote)
-        remote_ip = remote.get_virt_ip(self)
-        net = self.get_virt_net(remote)
-        bw = format_tc_rate(link.bps)
-        return (remote.id, remote.ipaddr, bw, my_ip, remote_ip, net)
-        
-    def add_link(self, link):
-        self.links.add(link)
-        
-    def add_tag(self, sites):
-        s = self.get_site(sites)
-        words = self.hostname.split(".")
-        index = words[0].replace("node", "")
-        if index.isdigit():
-            self.tag = s.tag + index
-        else:
-            self.tag = None
-
-    # Assumes there is at most one Link between two sites
-    def get_sitelink(self, node, sites):
-        site1 = sites[self.site_id]
-        site2 = sites[node.site_id]
-        sl = site1.links.intersection(site2.links)
-        if len(sl):
-            return sl.pop()
-        return None
-
-    def add_sliver(self):
-        self.sliver = True
-
-    def toxml(self, xml, hrn):
-        if not self.tag:
-            return
-        with xml.node(id = self.idtag):
-            with xml.hostname:
-                xml << self.hostname
-            with xml.kbps:
-                xml << str(int(self.bps/1000))
-            if self.sliver:
-                with xml.sliver:
-                    pass
-    
-
-class Link:
-    def __init__(self, end1, end2, bps = 1000 * 1000000, parent = None):
-        self.end1 = end1
-        self.end2 = end2
-        self.bps = bps
-        self.parent = parent
-        self.children = []
-
-        end1.add_link(self)
-        end2.add_link(self)
-        
-        if self.parent:
-            self.parent.children.append(self)
-            
-    def toxml(self, xml):
-        end_ids = "%s %s" % (self.end1.idtag, self.end2.idtag)
-
-        if self.parent:
-            element = xml.vlink(endpoints=end_ids)
-        else:
-            element = xml.link(endpoints=end_ids)
-
-        with element:
-            with xml.description:
-                xml << "%s -- %s" % (self.end1.name, self.end2.name)
-            with xml.kbps:
-                xml << str(int(self.bps/1000))
-            for child in self.children:
-                child.toxml(xml)
-        
-
-class Site:
-    def __init__(self, site):
-        self.id = site['site_id']
-        self.idtag = "s%s" % self.id
-        self.node_ids = site['node_ids']
-        self.name = site['abbreviated_name'].replace(" ", "_")
-        self.tag = site['login_base']
-        self.public = site['is_public']
-        self.enabled = site['enabled']
-        self.links = set()
-
-    def get_sitenodes(self, nodes):
-        n = []
-        for i in self.node_ids:
-            n.append(nodes[i])
-        return n
-    
-    def add_link(self, link):
-        self.links.add(link)
-
-    def toxml(self, xml, hrn, nodes):
-        if not (self.public and self.enabled and self.node_ids):
-            return
-        with xml.site(id = self.idtag):
-            with xml.name:
-                xml << self.name
-                
-            for node in self.get_sitenodes(nodes):
-                node.toxml(xml, hrn)
-   
-    
-class Slice:
-    def __init__(self, slice):
-        self.id = slice['slice_id']
-        self.name = slice['name']
-        self.node_ids = set(slice['node_ids'])
-        self.slice_tag_ids = slice['slice_tag_ids']
-    
-    def get_tag(self, tagname, slicetags, node = None):
-        for i in self.slice_tag_ids:
-            tag = slicetags[i]
-            if tag.tagname == tagname:
-                if (not node) or (node.id == tag.node_id):
-                    return tag
-        else:
-            return None
-        
-    def get_nodes(self, nodes):
-        n = []
-        for id in self.node_ids:
-            n.append(nodes[id])
-        return n
-             
-    
-    # Add a new slice tag   
-    def add_tag(self, tagname, value, slicetags, node = None):
-        record = {'slice_tag_id':None, 'slice_id':self.id, 'tagname':tagname, 'value':value}
-        if node:
-            record['node_id'] = node.id
-        else:
-            record['node_id'] = None
-        tag = Slicetag(record)
-        slicetags[tag.id] = tag
-        self.slice_tag_ids.append(tag.id)
-        tag.changed = True       
-        tag.updated = True
-        return tag
-    
-    # Update a slice tag if it exists, else add it             
-    def update_tag(self, tagname, value, slicetags, node = None):
-        tag = self.get_tag(tagname, slicetags, node)
-        if tag and tag.value == value:
-            value = "no change"
-        elif tag:
-            tag.value = value
-            tag.changed = True
-        else:
-            tag = self.add_tag(tagname, value, slicetags, node)
-        tag.updated = True
-            
-    def assign_egre_key(self, slicetags):
-        if not self.get_tag('egre_key', slicetags):
-            try:
-                key = free_egre_key(slicetags)
-                self.update_tag('egre_key', key, slicetags)
-            except:
-                # Should handle this case...
-                pass
-        return
-            
-    def turn_on_netns(self, slicetags):
-        tag = self.get_tag('netns', slicetags)
-        if (not tag) or (tag.value != '1'):
-            self.update_tag('netns', '1', slicetags)
-        return
-   
-    def turn_off_netns(self, slicetags):
-        tag = self.get_tag('netns', slicetags)
-        if tag and (tag.value != '0'):
-            tag.delete()
-        return
-    
-    def add_cap_net_admin(self, slicetags):
-        tag = self.get_tag('capabilities', slicetags)
-        if tag:
-            caps = tag.value.split(',')
-            for cap in caps:
-                if cap == "CAP_NET_ADMIN":
-                    return
-            else:
-                newcaps = "CAP_NET_ADMIN," + tag.value
-                self.update_tag('capabilities', newcaps, slicetags)
-        else:
-            self.add_tag('capabilities', 'CAP_NET_ADMIN', slicetags)
-        return
-    
-    def remove_cap_net_admin(self, slicetags):
-        tag = self.get_tag('capabilities', slicetags)
-        if tag:
-            caps = tag.value.split(',')
-            newcaps = []
-            for cap in caps:
-                if cap != "CAP_NET_ADMIN":
-                    newcaps.append(cap)
-            if newcaps:
-                value = ','.join(newcaps)
-                self.update_tag('capabilities', value, slicetags)
-            else:
-                tag.delete()
-        return
-
-    # Update the vsys/setup-link and vsys/setup-nat slice tags.
-    def add_vsys_tags(self, slicetags):
-        link = nat = False
-        for i in self.slice_tag_ids:
-            tag = slicetags[i]
-            if tag.tagname == 'vsys':
-                if tag.value == 'setup-link':
-                    link = True
-                elif tag.value == 'setup-nat':
-                    nat = True
-        if not link:
-            self.add_tag('vsys', 'setup-link', slicetags)
-        if not nat:
-            self.add_tag('vsys', 'setup-nat', slicetags)
-        return
-
-
-class Slicetag:
-    newid = -1 
-    def __init__(self, tag):
-        self.id = tag['slice_tag_id']
-        if not self.id:
-            # Make one up for the time being...
-            self.id = Slicetag.newid
-            Slicetag.newid -= 1
-        self.slice_id = tag['slice_id']
-        self.tagname = tag['tagname']
-        self.value = tag['value']
-        self.node_id = tag['node_id']
-        self.updated = False
-        self.changed = False
-        self.deleted = False
-    
-    # Mark a tag as deleted
-    def delete(self):
-        self.deleted = True
-        self.updated = True
-    
-    def write(self, api):
-        if self.changed:
-            if int(self.id) > 0:
-                api.plshell.UpdateSliceTag(api.plauth, self.id, self.value)
-            else:
-                api.plshell.AddSliceTag(api.plauth, self.slice_id, 
-                                        self.tagname, self.value, self.node_id)
-        elif self.deleted and int(self.id) > 0:
-            api.plshell.DeleteSliceTag(api.plauth, self.id)
-
-
-"""
-A topology is a compound object consisting of:
-* a dictionary mapping site IDs to Site objects
-* a dictionary mapping node IDs to Node objects
-* the Site objects are connected via SiteLink objects representing
-  the physical topology and available bandwidth
-* the Node objects are connected via Link objects representing
-  the requested or assigned virtual topology of a slice
-"""
-class Topology:
-    def __init__(self, api):
-        self.api = api
-        self.sites = get_sites(api)
-        self.nodes = get_nodes(api)
-        self.tags = get_slice_tags(api)
-        self.sitelinks = []
-        self.nodelinks = []
-    
-        for (s1, s2) in PhysicalLinks:
-            self.sitelinks.append(Link(self.sites[s1], self.sites[s2]))
-        
-        for id in self.nodes:
-            self.nodes[id].add_tag(self.sites)
-        
-        for t in self.tags:
-            tag = self.tags[t]
-            if tag.tagname == 'topo_rspec':
-                node1 = self.nodes[tag.node_id]
-                l = eval(tag.value)
-                for (id, realip, bw, lvip, rvip, vnet) in l:
-                    allocbps = get_tc_rate(bw)
-                    node1.bps -= allocbps
-                    try:
-                        node2 = self.nodes[id]
-                        if node1.id < node2.id:
-                            sl = node1.get_sitelink(node2, self.sites)
-                            sl.bps -= allocbps
-                    except:
-                        pass
-
-    
-    """ Lookup site based on id or idtag value """
-    def lookupSite(self, id):
-        val = None
-        if isinstance(id, basestring):
-            id = int(id.lstrip('s'))
-        try:
-            val = self.sites[id]
-        except:
-            raise KeyError("site ID %s not found" % id)
-        return val
-    
-    def getSites(self):
-        sites = []
-        for s in self.sites:
-            sites.append(self.sites[s])
-        return sites
-        
-    """ Lookup node based on id or idtag value """
-    def lookupNode(self, id):
-        val = None
-        if isinstance(id, basestring):
-            id = int(id.lstrip('n'))
-        try:
-            val = self.nodes[id]
-        except:
-            raise KeyError("node ID %s not found" % id)
-        return val
-    
-    def getNodes(self):
-        nodes = []
-        for n in self.nodes:
-            nodes.append(self.nodes[n])
-        return nodes
-    
-    def nodesInTopo(self):
-        nodes = []
-        for n in self.nodes:
-            node = self.nodes[n]
-            if node.sliver:
-                nodes.append(node)
-        return nodes
-            
-    def lookupSliceTag(self, id):
-        val = None
-        try:
-            val = self.tags[id]
-        except:
-            raise KeyError("slicetag ID %s not found" % id)
-        return val
-    
-    def getSliceTags(self):
-        tags = []
-        for t in self.tags:
-            tags.append(self.tags[t])
-        return tags
-    
-    def lookupSiteLink(self, node1, node2):
-        site1 = self.sites[node1.site_id]
-        site2 = self.sites[node2.site_id]
-        for link in self.sitelinks:
-            if site1 == link.end1 and site2 == link.end2:
-                return link
-            if site2 == link.end1 and site1 == link.end2:
-                return link
-        return None
-    
-
-    def __add_vlink(self, vlink, slicenodes, parent = None):
-        n1 = n2 = None
-        endpoints = vlink.get("endpoints")
-        if endpoints:
-            (end1, end2) = endpoints.split()
-            n1 = self.lookupNode(end1)
-            n2 = self.lookupNode(end2)
-        elif parent:
-            """ Try to infer the endpoints for the virtual link """
-            site_endpoints = parent.get("endpoints")
-            (n1, n2) = self.__infer_endpoints(site_endpoints, slicenodes)
-        else:
-            raise Error("no endpoints given")
-
-        #print "Added virtual link: %s -- %s" % (n1.tag, n2.tag)
-        bps = int(vlink.findtext("kbps")) * 1000
-        sitelink = self.lookupSiteLink(n1, n2)
-        if not sitelink:
-            raise PermissionError("nodes %s and %s not adjacent" % 
-                                  (n1.idtag, n2.idtag))
-        self.nodelinks.append(Link(n1, n2, bps, sitelink))
-        return
-
-    """ 
-    Infer the endpoints of the virtual link.  If the slice exists on 
-    only a single node at each end of the physical link, we'll assume that
-    the user wants the virtual link to terminate at these nodes.
-    """
-    def __infer_endpoints(self, endpoints, slicenodes):
-        n = []
-        ends = endpoints.split()
-        for end in ends:
-            found = 0
-            site = self.lookupSite(end)
-            for id in site.node_ids:
-                if id in slicenodes:
-                    n.append(slicenodes[id])
-                    found += 1
-            if found != 1:
-                raise Error("could not infer endpoint for site %s" % site.id)
-        #print "Inferred endpoints: %s %s" % (n[0].idtag, n[1].idtag)
-        return n
-        
-    def nodeTopoFromRSpec(self, xml):
-        if self.nodelinks:
-            raise Error("virtual topology already present")
-            
-        nodedict = {}
-        for node in self.getNodes():
-            nodedict[node.idtag] = node
-            
-        slicenodes = {}
-
-        tree = etree.parse(StringIO(xml))
-
-        # Validate the incoming request against the RelaxNG schema
-        relaxng_doc = etree.parse(VINI_RELAXNG_SCHEMA)
-        relaxng = etree.RelaxNG(relaxng_doc)
-        
-        if not relaxng(tree):
-            error = relaxng.error_log.last_error
-            message = "%s (line %s)" % (error.message, error.line)
-            raise InvalidRSpec(message)
-
-        rspec = tree.getroot()
-
-        """
-        Handle requests where the user has annotated a description of the
-        physical resources (nodes and links) with virtual ones (slivers
-        and vlinks).
-        """
-        # Find slivers under node elements
-        for sliver in rspec.iterfind("./network/site/node/sliver"):
-            elem = sliver.getparent()
-            node = nodedict[elem.get("id")]
-            slicenodes[node.id] = node
-            node.add_sliver()
-
-        # Find vlinks under link elements
-        for vlink in rspec.iterfind("./network/link/vlink"):
-            link = vlink.getparent()
-            self.__add_vlink(vlink, slicenodes, link)
-
-        """
-        Handle requests where the user has listed the virtual resources only
-        """
-        # Find slivers that specify nodeid
-        for sliver in rspec.iterfind("./request/sliver[@nodeid]"):
-            node = nodedict[sliver.get("nodeid")]
-            slicenodes[node.id] = node
-            node.add_sliver()
-
-        # Find vlinks that specify endpoints
-        for vlink in rspec.iterfind("./request/vlink[@endpoints]"):
-            self.__add_vlink(vlink, slicenodes)
-
-        return
-
-    def nodeTopoFromSliceTags(self, slice):
-        if self.nodelinks:
-            raise Error("virtual topology already present")
-            
-        for node in slice.get_nodes(self.nodes):
-            node.sliver = True
-            linktag = slice.get_tag('topo_rspec', self.tags, node)
-            if linktag:
-                l = eval(linktag.value)
-                for (id, realip, bw, lvip, rvip, vnet) in l:
-                    if node.id < id:
-                        bps = get_tc_rate(bw)
-                        remote = self.lookupNode(id)
-                        sitelink = self.lookupSiteLink(node, remote)
-                        self.nodelinks.append(Link(node,remote,bps,sitelink))
-
-    def updateSliceTags(self, slice):
-        if not self.nodelinks:
-            return
-        slice.update_tag('vini_topo', 'manual', self.tags)
-        slice.assign_egre_key(self.tags)
-        slice.turn_on_netns(self.tags)
-        slice.add_cap_net_admin(self.tags)
-
-        for node in slice.get_nodes(self.nodes):
-            linkdesc = []
-            for link in node.links:
-                linkdesc.append(node.get_topo_rspec(link))
-            if linkdesc:
-                topo_str = "%s" % linkdesc
-                slice.update_tag('topo_rspec', topo_str, self.tags, node)
-
-        # Update slice tags in database
-        for tag in self.getSliceTags():
-            if tag.slice_id == slice.id:
-                if tag.tagname == 'topo_rspec' and not tag.updated:
-                    tag.delete()
-                tag.write(self.api)
-                
-    """
-    Check the requested topology against the available topology and capacity
-    """
-    def verifyNodeTopo(self, hrn, topo):
-        for link in self.nodelinks:
-            if link.bps <= 0:
-                raise GeniInvalidArgument(bw, "BW")
-                
-            n1 = link.end1
-            n2 = link.end2
-            sitelink = self.lookupSiteLink(n1, n2)
-            if not sitelink:
-                raise PermissionError("%s: nodes %s and %s not adjacent" % (hrn, n1.tag, n2.tag))
-            if sitelink.bps < link.bps:
-                raise PermissionError("%s: insufficient capacity between %s and %s" % (hrn, n1.tag, n2.tag))
-                
-    """
-    Produce XML directly from the topology specification.
-    """
-    def toxml(self, hrn = None):
-        xml = XMLBuilder(format = True, tab_step = "  ")
-        with xml.RSpec(type="VINI"):
-            if hrn:
-                element = xml.network(name="Public_VINI", slice=hrn)
-            else:
-                element = xml.network(name="Public_VINI")
-                
-            with element:
-                for site in self.getSites():
-                    site.toxml(xml, hrn, self.nodes)
-                for link in self.sitelinks:
-                    link.toxml(xml)
-
-        header = '<?xml version="1.0"?>\n'
-        return header + str(xml)
-
-"""
-Create a dictionary of site objects keyed by site ID
-"""
-def get_sites(api):
-    tmp = []
-    for site in api.plshell.GetSites(api.plauth):
-        t = site['site_id'], Site(site)
-        tmp.append(t)
-    return dict(tmp)
-
-
-"""
-Create a dictionary of node objects keyed by node ID
-"""
-def get_nodes(api):
-    tmp = []
-    for node in api.plshell.GetNodes(api.plauth):
-        t = node['node_id'], Node(node)
-        tmp.append(t)
-    return dict(tmp)
-
-"""
-Create a dictionary of slice objects keyed by slice ID
-"""
-def get_slice(api, slicename):
-    slice = api.plshell.GetSlices(api.plauth, [slicename])
-    if slice:
-        return Slice(slice[0])
-    else:
-        return None
-
-"""
-Create a dictionary of slicetag objects keyed by slice tag ID
-"""
-def get_slice_tags(api):
-    tmp = []
-    for tag in api.plshell.GetSliceTags(api.plauth):
-        t = tag['slice_tag_id'], Slicetag(tag)
-        tmp.append(t)
-    return dict(tmp)
-    
-"""
-Find a free EGRE key
-"""
-def free_egre_key(slicetags):
-    used = set()
-    for i in slicetags:
-        tag = slicetags[i]
-        if tag.tagname == 'egre_key':
-            used.add(int(tag.value))
-                
-    for i in range(1, 256):
-        if i not in used:
-            key = i
-            break
-    else:
-        raise KeyError("No more EGRE keys available")
-        
-    return "%s" % key
-   
diff --git a/sfa/rspecs/aggregates/vini/vini.rnc b/sfa/rspecs/aggregates/vini/vini.rnc
deleted file mode 100644 (file)
index bcd986d..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-start = RSpec
-RSpec = element RSpec { 
-   attribute type { xsd:NMTOKEN },
-   ( network | request )
-}
-network = element network {
-   attribute name { xsd:NMTOKEN },
-   attribute slice { xsd:NMTOKEN }?,
-   ( site | link )+
-}
-site = element site { 
-   attribute id { xsd:ID },
-   element name { text },
-   node+ 
-}
-node = element node {
-   attribute id { xsd:ID },
-   hostname,
-   kbps,
-   sliver*
-}
-link = element link { 
-   attribute endpoints { xsd:IDREFS },
-   element description { text }?,
-   kbps?,
-   vlink*
-}
-request = element request {
-   ( sliver | vlink )+
-}
-sliver = element sliver { 
-   attribute nodeid { xsd:ID }?,
-   empty
-}
-vlink = element vlink { 
-   attribute endpoints { xsd:IDREFS }?,
-   element description { text }?,
-   kbps? 
-}
-hostname = element hostname { text }
-kbps = element kbps { text }
diff --git a/sfa/rspecs/aggregates/vini/vini.xml b/sfa/rspecs/aggregates/vini/vini.xml
deleted file mode 100644 (file)
index 6c67eea..0000000
+++ /dev/null
@@ -1,336 +0,0 @@
-<?xml version="1.0"?>
-<RSpec type="VINI">
-  <network name="Public_VINI">
-    <site id="s2">
-      <name>Princeton</name>
-      <node id="n1">
-        <hostname>node1.princeton.vini-veritas.net</hostname>
-        <kbps>999000</kbps>
-      </node>
-    </site>
-    <site id="s3">
-      <name>PSG</name>
-      <node id="n25">
-        <hostname>node2.psg.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-      <node id="n6">
-        <hostname>node1.psg.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-    </site>
-    <site id="s4">
-      <name>NLR_Chicago</name>
-      <node id="n3">
-        <hostname>node2.chic.nlr.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-      <node id="n2">
-        <hostname>node1.chic.nlr.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-    </site>
-    <site id="s5">
-      <name>NLR_Houston</name>
-      <node id="n4">
-        <hostname>node1.hous.nlr.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-      <node id="n5">
-        <hostname>node2.hous.nlr.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-    </site>
-    <site id="s6">
-      <name>NLR_Atlanta</name>
-      <node id="n9">
-        <hostname>node2.atla.nlr.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-      <node id="n8">
-        <hostname>node1.atla.nlr.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-    </site>
-    <site id="s7">
-      <name>NLR_Seattle</name>
-      <node id="n11">
-        <hostname>node2.seat.nlr.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-      <node id="n10">
-        <hostname>node1.seat.nlr.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-    </site>
-    <site id="s8">
-      <name>NLR_Los_Angeles</name>
-      <node id="n13">
-        <hostname>node2.losa.nlr.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-      <node id="n12">
-        <hostname>node1.losa.nlr.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-    </site>
-    <site id="s9">
-      <name>NLR_New_York</name>
-      <node id="n15">
-        <hostname>node2.newy.nlr.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-      <node id="n14">
-        <hostname>node1.newy.nlr.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-    </site>
-    <site id="s10">
-      <name>NLR_Wash_DC</name>
-      <node id="n17">
-        <hostname>node2.wash.nlr.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-      <node id="n16">
-        <hostname>node1.wash.nlr.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-    </site>
-    <site id="s11">
-      <name>I2_Chicago</name>
-      <node id="n18">
-        <hostname>node1.chic.internet2.vini-veritas.net</hostname>
-        <kbps>963000</kbps>
-      </node>
-      <node id="n19">
-        <hostname>node2.chic.internet2.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-    </site>
-    <site id="s12">
-      <name>I2_New_York</name>
-      <node id="n21">
-        <hostname>node2.newy.internet2.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-      <node id="n20">
-        <hostname>node1.newy.internet2.vini-veritas.net</hostname>
-        <kbps>988000</kbps>
-      </node>
-    </site>
-    <site id="s13">
-      <name>I2_Wash_DC</name>
-      <node id="n23">
-        <hostname>node2.wash.internet2.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-      <node id="n22">
-        <hostname>node1.wash.internet2.vini-veritas.net</hostname>
-        <kbps>964000</kbps>
-      </node>
-    </site>
-    <site id="s14">
-      <name>Georgia_Tech</name>
-      <node id="n45">
-        <hostname>node1.gatech.vini-veritas.net</hostname>
-        <kbps>999000</kbps>
-      </node>
-    </site>
-    <site id="s15">
-      <name>I2_Atlanta</name>
-      <node id="n26">
-        <hostname>node1.atla.internet2.vini-veritas.net</hostname>
-        <kbps>964000</kbps>
-      </node>
-      <node id="n27">
-        <hostname>node2.atla.internet2.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-    </site>
-    <site id="s16">
-      <name>CESNET</name>
-      <node id="n43">
-        <hostname>node2.cesnet.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-      <node id="n42">
-        <hostname>node1.cesnet.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-    </site>
-    <site id="s17">
-      <name>I2_Kansas_City</name>
-      <node id="n29">
-        <hostname>node2.kans.internet2.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-      <node id="n28">
-        <hostname>node1.kans.internet2.vini-veritas.net</hostname>
-        <kbps>961000</kbps>
-      </node>
-    </site>
-    <site id="s19">
-      <name>I2_Houston</name>
-      <node id="n30">
-        <hostname>node1.hous.internet2.vini-veritas.net</hostname>
-        <kbps>964000</kbps>
-      </node>
-      <node id="n31">
-        <hostname>node2.hous.internet2.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-    </site>
-    <site id="s20">
-      <name>I2_Los_Angeles</name>
-      <node id="n36">
-        <hostname>node1.losa.internet2.vini-veritas.net</hostname>
-        <kbps>964000</kbps>
-      </node>
-      <node id="n37">
-        <hostname>node2.losa.internet2.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-    </site>
-    <site id="s21">
-      <name>I2_Seattle</name>
-      <node id="n34">
-        <hostname>node1.seat.internet2.vini-veritas.net</hostname>
-        <kbps>975000</kbps>
-      </node>
-      <node id="n35">
-        <hostname>node2.seat.internet2.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-    </site>
-    <site id="s22">
-      <name>I2_Salt_Lake</name>
-      <node id="n33">
-        <hostname>node2.salt.internet2.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-      <node id="n32">
-        <hostname>node1.salt.internet2.vini-veritas.net</hostname>
-        <kbps>962000</kbps>
-      </node>
-    </site>
-    <site id="s24">
-      <name>UMKC</name>
-      <node id="n48">
-        <hostname>node1.umkc.vini-veritas.net</hostname>
-        <kbps>999000</kbps>
-      </node>
-      <node id="n50">
-        <hostname>node2.umkc.vini-veritas.net</hostname>
-        <kbps>1000000</kbps>
-      </node>
-    </site>
-    <link endpoints="s2 s12">
-      <description>Princeton -- I2_New_York</description>
-      <kbps>1000000</kbps>
-    </link>
-    <link endpoints="s4 s5">
-      <description>NLR_Chicago -- NLR_Houston</description>
-      <kbps>1000000</kbps>
-    </link>
-    <link endpoints="s4 s6">
-      <description>NLR_Chicago -- NLR_Atlanta</description>
-      <kbps>1000000</kbps>
-    </link>
-    <link endpoints="s4 s7">
-      <description>NLR_Chicago -- NLR_Seattle</description>
-      <kbps>1000000</kbps>
-    </link>
-    <link endpoints="s4 s9">
-      <description>NLR_Chicago -- NLR_New_York</description>
-      <kbps>1000000</kbps>
-    </link>
-    <link endpoints="s4 s10">
-      <description>NLR_Chicago -- NLR_Wash_DC</description>
-      <kbps>1000000</kbps>
-    </link>
-    <link endpoints="s5 s6">
-      <description>NLR_Houston -- NLR_Atlanta</description>
-      <kbps>1000000</kbps>
-    </link>
-    <link endpoints="s5 s8">
-      <description>NLR_Houston -- NLR_Los_Angeles</description>
-      <kbps>1000000</kbps>
-    </link>
-    <link endpoints="s6 s10">
-      <description>NLR_Atlanta -- NLR_Wash_DC</description>
-      <kbps>1000000</kbps>
-    </link>
-    <link endpoints="s6 s14">
-      <description>NLR_Atlanta -- Georgia_Tech</description>
-      <kbps>1000000</kbps>
-    </link>
-    <link endpoints="s7 s8">
-      <description>NLR_Seattle -- NLR_Los_Angeles</description>
-      <kbps>1000000</kbps>
-    </link>
-    <link endpoints="s9 s10">
-      <description>NLR_New_York -- NLR_Wash_DC</description>
-      <kbps>1000000</kbps>
-    </link>
-    <link endpoints="s11 s13">
-      <description>I2_Chicago -- I2_Wash_DC</description>
-      <kbps>988000</kbps>
-    </link>
-    <link endpoints="s11 s15">
-      <description>I2_Chicago -- I2_Atlanta</description>
-      <kbps>988000</kbps>
-    </link>
-    <link endpoints="s11 s16">
-      <description>I2_Chicago -- CESNET</description>
-      <kbps>1000000</kbps>
-    </link>
-    <link endpoints="s11 s17">
-      <description>I2_Chicago -- I2_Kansas_City</description>
-      <kbps>987000</kbps>
-    </link>
-    <link endpoints="s12 s13">
-      <description>I2_New_York -- I2_Wash_DC</description>
-      <kbps>988000</kbps>
-    </link>
-    <link endpoints="s13 s15">
-      <description>I2_Wash_DC -- I2_Atlanta</description>
-      <kbps>988000</kbps>
-    </link>
-    <link endpoints="s14 s15">
-      <description>Georgia_Tech -- I2_Atlanta</description>
-      <kbps>1000000</kbps>
-    </link>
-    <link endpoints="s15 s19">
-      <description>I2_Atlanta -- I2_Houston</description>
-      <kbps>988000</kbps>
-    </link>
-    <link endpoints="s17 s19">
-      <description>I2_Kansas_City -- I2_Houston</description>
-      <kbps>988000</kbps>
-    </link>
-    <link endpoints="s17 s22">
-      <description>I2_Kansas_City -- I2_Salt_Lake</description>
-      <kbps>987000</kbps>
-    </link>
-    <link endpoints="s17 s24">
-      <description>I2_Kansas_City -- UMKC</description>
-      <kbps>999000</kbps>
-    </link>
-    <link endpoints="s19 s20">
-      <description>I2_Houston -- I2_Los_Angeles</description>
-      <kbps>988000</kbps>
-    </link>
-    <link endpoints="s20 s21">
-      <description>I2_Los_Angeles -- I2_Seattle</description>
-      <kbps>988000</kbps>
-    </link>
-    <link endpoints="s20 s22">
-      <description>I2_Los_Angeles -- I2_Salt_Lake</description>
-      <kbps>988000</kbps>
-    </link>
-    <link endpoints="s21 s22">
-      <description>I2_Seattle -- I2_Salt_Lake</description>
-      <kbps>987000</kbps>
-    </link>
-  </network>
-</RSpec>