-from sfa.util.rspec import Rspec
+#!/usr/bin/python
+
+from sfa.util.rspec import RSpec
import sys
import pdb
+from sfa.util.xrn import get_authority
+from sfa.util.plxrn import hrn_to_pl_slicename
+from sfa.util.rspec import *
+from sfa.util.specdict import *
+from sfa.util.faults import *
+from sfa.util.storage import *
+from sfa.util.policy import Policy
+from sfa.server.aggregate import Aggregates
+from sfa.server.registry import Registries
+from sfa.util.faults import *
+
+import xml.dom.minidom
SFA_MAX_CONF_FILE = '/etc/sfa/max_allocations'
+SFA_MAX_DEFAULT_RSPEC = '/etc/sfa/max_physical.xml'
+SFA_MAX_CANNED_RSPEC = '/etc/sfa/max_physical_canned.xml'
+
+topology = {}
+
+class SfaOutOfResource(SfaFault):
+ def __init__(self, interface):
+ faultString = "Interface " + interface + " not available"
+ SfaFault.__init__(self, 100, faultString, '')
+
+class SfaNoPairRSpec(SfaFault):
+ def __init__(self, interface, interface2):
+ faultString = "Interface " + interface + " should be paired with " + interface2
+ SfaFault.__init__(self, 100, faultString, '')
-# Topology
+# Returns a mapping from interfaces to the nodes they lie on and their peer interfaces
+# i -> node,i_peer
+
+def get_interface_map():
+ r = RSpec()
+ r.parseFile(SFA_MAX_DEFAULT_RSPEC)
+ rspec = r.toDict()
+ capacity = rspec['rspec']['capacity']
+ netspec = capacity[0]['netspec'][0]
+ linkdefs = {}
+ for n in netspec['nodespec']:
+ ifspecs = n['ifspec']
+ nodename = n['node']
+ for i in ifspecs:
+ ifname = i['name']
+ linkid = i['linkid']
+
+ if (linkdefs.has_key(linkid)):
+ linkdefs[linkid].extend([(nodename,ifname)])
+ else:
+ linkdefs[linkid]=[(nodename,ifname)]
+
+ # topology maps interface x interface -> link,node1,node2
+ topology={}
-topology = {'pl23':('planetlab2.dragon.maxgigapop.net','planetlab3.dragon.maxgigapop.net'),
- 'pl24':('planetlab2.dragon.maxgigapop.net','planetlab4.dragon.maxgigapop.net'),
- 'pl25':('planetlab2.dragon.maxgigapop.net','planetlab5.dragon.maxgigapop.net'),
- 'pl34':('planetlab3.dragon.maxgigapop.net','planetlab4.dragon.maxgigapop.net'),
- 'pl35':('planetlab3.dragon.maxgigapop.net','planetlab5.dragon.maxgigapop.net'),
- 'pl45':('planetlab4.dragon.maxgigapop.net','planetlab5.dragon.maxgigapop.net')
- }
+ for k in linkdefs.keys():
+ (n1,i1) = linkdefs[k][0]
+ (n2,i2) = linkdefs[k][1]
-def link_endpoints(links):
+ topology[i1] = (n1, i2)
+ topology[i2] = (n2, i1)
+
+
+ return topology
+
+
+def allocations_to_rspec(allocations):
+ rspec = xml.dom.minidom.parse(SFA_MAX_DEFAULT_RSPEC)
+ req = rspec.firstChild.appendChild(rspec.createElement("request"))
+ for (iname,ip) in allocations:
+ ifspec = req.appendChild(rspec.createElement("ifspec"))
+ ifspec.setAttribute("name","tns:"+iname)
+ ifspec.setAttribute("ip",ip)
+
+ return rspec.toxml()
+
+
+def if_endpoints(ifs):
nodes=[]
- for l in links:
- nodes.extend(topology[l])
+ for l in ifs:
+ nodes.extend(topology[l][0])
return nodes
def lock_state_file():
rows = open(SFA_MAX_CONF_FILE).read().split('\n')
for r in rows:
columns = r.split(' ')
- if (len(columns)>2):
+ if (len(columns)==2):
hrn = columns[0]
allocs = columns[1].split(',')
- alloc_dict[hrn]=allocs
+ ipallocs = map(lambda alloc:alloc.split('/'), allocs)
+ alloc_dict[hrn]=ipallocs
return alloc_dict
def commit_alloc_dict(d):
f = open(SFA_MAX_CONF_FILE, 'w')
for hrn in d.keys():
columns = d[hrn]
- row = hrn+' '+','.join(columns)+'\n'
+ ipcolumns = map(lambda x:"/".join(x), columns)
+ row = hrn+' '+','.join(ipcolumns)+'\n'
f.write(row)
f.close()
ret.extend(d[k])
return ret
-def bootstrap_slice(api, hrn, added_nodes, deleted_nodes):
- # This code is taken from slices.py
- # To clean up after 21 July
- # Get the slice record from geni
- slice = {}
- registries = Registries(api)
- registry = registries[self.api.hrn]
- credential = self.api.getCredential()
- records = registry.resolve(credential, hrn)
- for record in records:
- if record.get_type() in ['slice']:
- slice = record.as_dict()
- if not slice:
- raise RecordNotFound(hrn)
-
- # Make sure slice exists at plc, if it doesnt add it
- slicename = hrn_to_pl_slicename(hrn)
- slices = api.plshell.GetSlices(api.plauth, [slicename], ['node_ids'])
- if not slices:
- parts = slicename.split("_")
- login_base = parts[0]
- # if site doesnt exist add it
- sites = api.plshell.GetSites(api.plauth, [login_base])
- if not sites:
- authority = get_authority(hrn)
- site_records = registry.resolve(credential, authority)
- site_record = {}
- if not site_records:
- raise RecordNotFound(authority)
- site_record = site_records[0]
- site = site_record.as_dict()
+
+def alloc_links(api, hrn, links_to_add, links_to_drop):
+ slicename=hrn_to_pl_slicename(hrn)
+ for (iface,ip) in links_to_add:
+ node = topology[iface][0][0]
+ try:
+ api.plshell.AddSliceTag(api.plauth, slicename, "ip_addresses", ip, node)
+ api.plshell.AddSliceTag(api.plauth, slicename, "vsys", "getvlan", node)
+ except Exception:
+ # Probably a duplicate tag. XXX July 21
+ pass
+ return True
+
+def alloc_nodes(api,hrn, requested_ifs):
+ requested_nodes = if_endpoints(requested_ifs)
+ create_slice_max_aggregate(api, hrn, requested_nodes)
+
+# Taken from slices.py
+
+def create_slice_max_aggregate(api, hrn, nodes):
+ # Get the slice record from SFA
+ global topology
+ topology = get_interface_map()
+ slice = {}
+ registries = Registries(api)
+ registry = registries[api.hrn]
+ credential = api.getCredential()
+ records = registry.resolve(credential, hrn)
+ for record in records:
+ if record.get_type() in ['slice']:
+ slice = record.as_dict()
+ if not slice:
+ raise RecordNotFound(hrn)
+
+ # Make sure slice exists at plc, if it doesnt add it
+ slicename = hrn_to_pl_slicename(hrn)
+ slices = api.plshell.GetSlices(api.plauth, [slicename], ['node_ids'])
+ if not slices:
+ parts = slicename.split("_")
+ login_base = parts[0]
+ # if site doesnt exist add it
+ sites = api.plshell.GetSites(api.plauth, [login_base])
+ if not sites:
+ authority = get_authority(hrn)
+ site_records = registry.resolve(credential, authority)
+ site_record = {}
+ if not site_records:
+ raise RecordNotFound(authority)
+ site_record = site_records[0]
+ site = site_record.as_dict()
- # add the site
- site.pop('site_id')
- site_id = api.plshell.AddSite(api.plauth, site)
- else:
- site = sites[0]
+ # add the site
+ site.pop('site_id')
+ site_id = api.plshell.AddSite(api.plauth, site)
+ else:
+ site = sites[0]
- slice_fields = {}
- slice_keys = ['name', 'url', 'description']
- for key in slice_keys:
- if key in slice and slice[key]:
- slice_fields[key] = slice[key]
- api.plshell.AddSlice(api.plauth, slice_fields)
- slice = slice_fields
- slice['node_ids'] = 0
+ slice_fields = {}
+ slice_keys = ['name', 'url', 'description']
+ for key in slice_keys:
+ if key in slice and slice[key]:
+ slice_fields[key] = slice[key]
+ api.plshell.AddSlice(api.plauth, slice_fields)
+ slice = slice_fields
+ slice['node_ids'] = 0
+ else:
+ slice = slices[0]
+
+ # get the list of valid slice users from the registry and make
+ # they are added to the slice
+ researchers = record.get('researcher', [])
+ for researcher in researchers:
+ person_record = {}
+ person_records = registry.resolve(credential, researcher)
+ for record in person_records:
+ if record.get_type() in ['user']:
+ person_record = record
+ if not person_record:
+ pass
+ person_dict = person_record.as_dict()
+ persons = api.plshell.GetPersons(api.plauth, [person_dict['email']],
+ ['person_id', 'key_ids'])
+
+ # Create the person record
+ if not persons:
+ person_id=api.plshell.AddPerson(api.plauth, person_dict)
+
+ # The line below enables the user account on the remote aggregate
+ # soon after it is created.
+ # without this the user key is not transfered to the slice
+ # (as GetSlivers returns key of only enabled users),
+ # which prevents the user from login to the slice.
+ # We may do additional checks before enabling the user.
+
+ api.plshell.UpdatePerson(api.plauth, person_id, {'enabled' : True})
+ key_ids = []
else:
- slice = slices[0]
- # get the list of valid slice users from the registry and make
- # they are added to the slice
- researchers = record.get('researcher', [])
- for researcher in researchers:
- person_record = {}
- person_records = registry.resolve(credential, researcher)
- for record in person_records:
- if record.get_type() in ['user']:
- person_record = record
- if not person_record:
- pass
- person_dict = person_record.as_dict()
- persons = api.plshell.GetPersons(api.plauth, [person_dict['email']], ['person_id', 'key_ids'])
-
- # Create the person record
- if not persons:
- person_id=api.plshell.AddPerson(api.plauth, person_dict)
-
- # The line below enables the user account on the remote aggregate soon after it is created.
- # without this the user key is not transfered to the slice (as GetSlivers returns key of only enabled users),
- # which prevents the user from login to the slice. We may do additional checks before enabling the user.
-
- api.plshell.UpdatePerson(api.plauth, person_id, {'enabled' : True})
- key_ids = []
- else:
- key_ids = persons[0]['key_ids']
+ key_ids = persons[0]['key_ids']
- api.plshell.AddPersonToSlice(api.plauth, person_dict['email'], slicename)
+ api.plshell.AddPersonToSlice(api.plauth, person_dict['email'],
+ slicename)
- # Get this users local keys
- keylist = api.plshell.GetKeys(api.plauth, key_ids, ['key'])
- keys = [key['key'] for key in keylist]
+ # Get this users local keys
+ keylist = api.plshell.GetKeys(api.plauth, key_ids, ['key'])
+ keys = [key['key'] for key in keylist]
- # add keys that arent already there
- for personkey in person_dict['keys']:
- if personkey not in keys:
- key = {'key_type': 'ssh', 'key': personkey}
- api.plshell.AddPersonKey(api.plauth, person_dict['email'], key)
+ # add keys that arent already there
+ for personkey in person_dict['keys']:
+ if personkey not in keys:
+ key = {'key_type': 'ssh', 'key': personkey}
+ api.plshell.AddPersonKey(api.plauth, person_dict['email'], key)
- # find out where this slice is currently running
- nodelist = api.plshell.GetNodes(api.plauth, slice['node_ids'], ['hostname'])
- hostnames = [node['hostname'] for node in nodelist]
+ # find out where this slice is currently running
+ nodelist = api.plshell.GetNodes(api.plauth, slice['node_ids'],
+ ['hostname'])
+ hostnames = [node['hostname'] for node in nodelist]
- api.plshell.AddSliceToNodes(self.api.plauth, slicename, added_nodes)
- api.plshell.DeleteSliceFromNodes(self.api.plauth, slicename, deleted_nodes)
+ # remove nodes not in rspec
+ deleted_nodes = list(set(hostnames).difference(nodes))
+ # add nodes from rspec
+ added_nodes = list(set(nodes).difference(hostnames))
- return 1
+ api.plshell.AddSliceToNodes(api.plauth, slicename, added_nodes)
+ api.plshell.DeleteSliceFromNodes(api.plauth, slicename, deleted_nodes)
-def alloc_links(api, links_to_add, links_to_drop, foo):
-
-def alloc_nodes(api,hrn, links_to_add, links_to_delete):
-
- nodes_to_add = link_endpoints(links_to_add)
- nodes_to_delete = link_endpoints(links_to_delete)
+ return 1
- pdb.set_trace()
- bootstrap_slice(api, hrn, nodes_to_add, nodes_to_delete)
-
-def get_rspec(hrn):
+def ListResources(api, hrn):
# Eg. config line:
# plc.princeton.sapan vlan23,vlan45
allocations = read_alloc_dict()
- if (hrn):
- current_allocations = allocations[hrn]
+ if (hrn and allocations.has_key(hrn)):
+ ret_rspec = allocations_to_rspec(allocations[hrn])
else:
- current_allocations = collapse_alloc_dict(allocations)
+ ret_rspec = open(SFA_MAX_CANNED_RSPEC).read()
+
+ return (ret_rspec)
- return (allocations_to_rspec_dict(current_allocations))
+def CreateSliver(api, hrn, rspec_xml):
+ global topology
+ topology = get_interface_map()
-def create_slice(api, hrn, rspec):
# Check if everything in rspec is either allocated by hrn
# or not allocated at all.
+ r = RSpec()
+ r.parseString(rspec_xml)
+ rspec = r.toDict()
lock_state_file()
current_hrn_allocations=[]
# Check request against current allocations
- for a in requested_allocations:
- if (a not in current_hrn_allocations and a in current_allocations):
- return False
+ requested_interfaces = map(lambda(elt):elt[0], requested_allocations)
+ current_interfaces = map(lambda(elt):elt[0], current_allocations)
+ current_hrn_interfaces = map(lambda(elt):elt[0], current_hrn_allocations)
+
+ for a in requested_interfaces:
+ if (a not in current_hrn_interfaces and a in current_interfaces):
+ raise SfaOutOfResource(a)
+ if (topology[a][1] not in requested_interfaces):
+ raise SfaNoPairRSpec(a,topology[a][1])
# Request OK
# Allocations to delete
allocations_to_delete.extend([a])
# Ok, let's do our thing
- alloc_nodes(api, hrn, requested_allocations, allocations_to_delete)
+ alloc_nodes(api, hrn, requested_interfaces)
alloc_links(api, hrn, requested_allocations, allocations_to_delete)
allocations[hrn] = requested_allocations
commit_alloc_dict(allocations)
return True
def rspec_to_allocations(rspec):
- links = []
+ ifs = []
try:
- linkspecs = rspec['rspec']['request'][0]['netspec'][0]['linkspec']
- for l in linkspecs:
- links.extend([l['name'].replace('tns:','')])
-
+ ifspecs = rspec['rspec']['request'][0]['ifspec']
+ for l in ifspecs:
+ ifs.extend([(l['name'].replace('tns:',''),l['ip'])])
except KeyError:
- # Bad Rspec
+ # Bad RSpec
pass
- return links
+ return ifs
def main():
- r = Rspec()
+ t = get_interface_map()
+ r = RSpec()
rspec_xml = open(sys.argv[1]).read()
- r.parseString(rspec_xml)
- rspec = r.toDict()
- create_slice(None,'plc',rspec)
+ #ListResources(None,'foo')
+ CreateSliver(None, "plc.princeton.sap0", rspec_xml)
if __name__ == "__main__":
main()