(no commit message)
authorSapan Bhatia <sapanb@cs.princeton.edu>
Wed, 15 Jul 2009 23:21:30 +0000 (23:21 +0000)
committerSapan Bhatia <sapanb@cs.princeton.edu>
Wed, 15 Jul 2009 23:21:30 +0000 (23:21 +0000)
sfa/methods/create_slice.py
sfa/methods/get_resources.py
sfa/rspecs/aggregates/max.xml [new file with mode: 0644]
sfa/rspecs/aggregates/rspec_manager_max.py [new file with mode: 0644]

index 122da36..47a3ff1 100644 (file)
@@ -33,13 +33,13 @@ class create_slice(Method):
     
     def call(self, cred, hrn, rspec):
         sfa_aggregate_type = Config().get_aggregate_rspec_type()
+        self.api.auth.check(cred, 'createslice')
         if (sfa_aggregate_type == 'pl'):
-            self.api.auth.check(cred, 'createslice')
             slices = Slices(self.api)
             slices.create_slice(hrn, rspec)    
         else:
             # To clean up after July 21 - SB    
             rspec_manager = __import__("sfa.rspecs.aggregates.rspec_manager_"+sfa_aggregate_type)
-            rspec = rspec_manager.create_slice(hrn, rspec)
+            rspec = rspec_manager.create_slice(self.api, hrn, rspec)
         
         return 1 
index 2d84656..a001b35 100644 (file)
@@ -32,8 +32,9 @@ class get_resources(Method):
     
     def call(self, cred, hrn=None):
         sfa_aggregate_type = Config().get_aggregate_rspec_type()
+
+        self.api.auth.check(cred, 'listnodes')
         if (sfa_aggregate_type == 'pl'):
-            self.api.auth.check(cred, 'listnodes')
             nodes = Nodes(self.api)
             if hrn:
                 rspec = nodes.get_rspec(hrn)
diff --git a/sfa/rspecs/aggregates/max.xml b/sfa/rspecs/aggregates/max.xml
new file mode 100644 (file)
index 0000000..f08558c
--- /dev/null
@@ -0,0 +1,38 @@
+<rspec name="max" targetNamespace="http://www.maxgigapop.net/sfa/rspec/vlanspec.rspec"
+                  xmlns:tns="http://www.maxgigapop.net/sfa/rspec/vlanspec.rspec"
+                  xmlns="http://www.planet-lab.org/sfa/rspec/">
+    <capacity>
+        <netspec name="predefined_physical_topology">
+            <nodespec name="planetlab2">
+                <node>planetlab2.dragon.maxgigapop.net</node>
+                <ifspec name="pl23" linkid="pl23"/>
+                <ifspec name="pl24" linkid="pl24"/>
+                <ifspec name="pl25" linkid="pl25"/>
+            </nodespec>
+            <nodespec name="planetlab3">
+                <node>planetlab3.dragon.maxgigapop.net</node>
+                <ifspec name="pl32" linkid="pl23"/>
+                <ifspec name="pl34" linkid="pl34"/>
+                <ifspec name="pl35" linkid="pl35"/>
+            </nodespec>
+            <nodespec name="planetlab4">
+                <node>planetlab4.dragon.maxgigapop.net</node>
+                <ifspec name="pl42" linkid="pl24"/>
+                <ifspec name="pl43" linkid="pl34"/>
+                <ifspec name="pl45" linkid="pl45"/>
+            </nodespec>
+            <nodespec name="planetlab5">
+                <node>planetlab5.dragon.maxgigapop.net</node> 
+                <ifspec name="pl52" linkid="pl25"/>
+                <ifspec name="pl53" linkid="pl35"/>
+                <ifspec name="pl54" linkid="pl45"/>
+            </nodespec>
+        </netspec>
+    </capacity>
+    <request>
+        <netspec name="default_mesh">
+            <linkspec name="tns:pl23"/>
+            <linkspec name="tns:pl34"/>
+        </netspec>
+    </request>
+</rspec>
diff --git a/sfa/rspecs/aggregates/rspec_manager_max.py b/sfa/rspecs/aggregates/rspec_manager_max.py
new file mode 100644 (file)
index 0000000..382a63d
--- /dev/null
@@ -0,0 +1,232 @@
+from sfa.util.rspec import Rspec
+import sys
+import pdb
+
+SFA_MAX_CONF_FILE = '/etc/sfa/max_allocations'
+
+# Topology 
+
+topology = {'pl23':('planetlab2.dragon.maxgigapop.net','planetlab3.dragon.maxgigapop.net'),
+            'pl24':('planetlab2.dragon.maxgigapop.net','planetlab4.dragon.maxgigapop.net'),
+            'pl25':('planetlab2.dragon.maxgigapop.net','planetlab5.dragon.maxgigapop.net'),
+            'pl34':('planetlab3.dragon.maxgigapop.net','planetlab4.dragon.maxgigapop.net'),
+            'pl35':('planetlab3.dragon.maxgigapop.net','planetlab5.dragon.maxgigapop.net'),
+            'pl45':('planetlab4.dragon.maxgigapop.net','planetlab5.dragon.maxgigapop.net')
+            }
+
+def link_endpoints(links):
+    nodes=[]
+    for l in links:
+        nodes.extend(topology[l])
+    return l
+
+
+def lock_state_file():
+    # Noop for demo
+    return True
+
+def unlock_state_file():
+    return True
+    # Noop for demo
+
+def read_alloc_dict():
+    alloc_dict={}
+    rows = open(SFA_MAX_CONF_FILE).read().split('\n')
+    for r in rows:
+        columns = r.split(' ')
+        if (len(columns)>2):
+            hrn = columns[0]
+            allocs = columns[1].split(',')
+            alloc_dict[hrn]=allocs
+    return alloc_dict
+
+def commit_alloc_dict(d):
+    f = open(SFA_MAX_CONF_FILE, 'w')
+    for hrn in d.keys():
+        columns = d[hrn]
+        row = hrn+' '+','.join(columns)+'\n'
+        f.write(row)
+    f.close()
+
+def collapse_alloc_dict(d):
+    ret = []
+    for k in d.keys():
+        ret.extend(d[k])
+    return ret
+
+def bootstrap_slice(api, hrn, added_nodes, deleted_nodes):    
+        # This code is taken from slices.py
+        # To clean up after 21 July
+        # Get the slice record from geni
+        slice = {}
+        registries = Registries(api)
+        registry = registries[self.api.hrn]
+        credential = self.api.getCredential()
+        records = registry.resolve(credential, hrn)
+        for record in records:
+            if record.get_type() in ['slice']:
+                slice = record.as_dict()
+        if not slice:
+            raise RecordNotFound(hrn)   
+
+        # Make sure slice exists at plc, if it doesnt add it
+        slicename = hrn_to_pl_slicename(hrn)
+        slices = api.plshell.GetSlices(api.plauth, [slicename], ['node_ids'])
+        if not slices:
+            parts = slicename.split("_")
+            login_base = parts[0]
+            # if site doesnt exist add it
+            sites = api.plshell.GetSites(api.plauth, [login_base])
+            if not sites:
+                authority = get_authority(hrn)
+                site_records = registry.resolve(credential, authority)
+                site_record = {}
+                if not site_records:
+                    raise RecordNotFound(authority)
+                site_record = site_records[0]
+                site = site_record.as_dict()
+                
+                 # add the site
+                site.pop('site_id')
+                site_id = api.plshell.AddSite(api.plauth, site)
+            else:
+                site = sites[0]
+            
+            slice_fields = {}
+            slice_keys = ['name', 'url', 'description']
+            for key in slice_keys:
+                if key in slice and slice[key]:
+                    slice_fields[key] = slice[key]  
+            api.plshell.AddSlice(api.plauth, slice_fields)
+            slice = slice_fields
+            slice['node_ids'] = 0
+        else:
+            slice = slices[0]    
+        # get the list of valid slice users from the registry and make 
+        # they are added to the slice 
+        researchers = record.get('researcher', [])
+        for researcher in researchers:
+            person_record = {}
+            person_records = registry.resolve(credential, researcher)
+            for record in person_records:
+                if record.get_type() in ['user']:
+                    person_record = record
+            if not person_record:
+                pass
+            person_dict = person_record.as_dict()
+            persons = api.plshell.GetPersons(api.plauth, [person_dict['email']], ['person_id', 'key_ids'])
+
+            # Create the person record 
+            if not persons:
+                person_id=api.plshell.AddPerson(api.plauth, person_dict)
+
+               # The line below enables the user account on the remote aggregate soon after it is created.
+               # without this the user key is not transfered to the slice (as GetSlivers returns key of only enabled users),
+               # which prevents the user from login to the slice. We may do additional checks before enabling the user.
+
+               api.plshell.UpdatePerson(api.plauth, person_id, {'enabled' : True})
+                key_ids = []
+            else:
+                key_ids = persons[0]['key_ids']
+
+            api.plshell.AddPersonToSlice(api.plauth, person_dict['email'], slicename)        
+
+            # Get this users local keys
+            keylist = api.plshell.GetKeys(api.plauth, key_ids, ['key'])
+            keys = [key['key'] for key in keylist]
+
+            # add keys that arent already there 
+            for personkey in person_dict['keys']:
+                if personkey not in keys:
+                    key = {'key_type': 'ssh', 'key': personkey}
+                    api.plshell.AddPersonKey(api.plauth, person_dict['email'], key)
+
+        # find out where this slice is currently running
+        nodelist = api.plshell.GetNodes(api.plauth, slice['node_ids'], ['hostname'])
+        hostnames = [node['hostname'] for node in nodelist]
+
+        api.plshell.AddSliceToNodes(self.api.plauth, slicename, added_nodes) 
+        api.plshell.DeleteSliceFromNodes(self.api.plauth, slicename, deleted_nodes)
+
+        return 1
+
+def alloc_nodes(hrn, links_to_add, links_to_delete):
+    
+    nodes_to_add = link_endpoints(links_to_add)
+    nodes_to_delete = link_endpoints(links_to_delete)
+
+    #bootstrap_slice(api, hrn, nodes_to_add, nodes_to_delete)
+
+    for r in requested_allocations:
+        print "Requesting "+r
+
+def get_rspec(hrn):
+    # Eg. config line:
+    # plc.princeton.sapan vlan23,vlan45
+
+    allocations = read_alloc_dict()
+    if (hrn):
+        current_allocations = allocations[hrn]
+    else:
+        current_allocations = collapse_alloc_dict(allocations)
+
+    return (allocations_to_rspec_dict(current_allocations))
+
+
+def create_slice(api, hrn, rspec):
+    # Check if everything in rspec is either allocated by hrn
+    # or not allocated at all.
+
+    lock_state_file()
+
+    allocations = read_alloc_dict()
+    requested_allocations = rspec_to_allocations (rspec)
+    current_allocations = collapse_alloc_dict(allocations)
+    try:
+        current_hrn_allocations=allocations[hrn]
+    except KeyError:
+        current_hrn_allocations=[]
+
+    # Check request against current allocations
+    for a in requested_allocations:
+        if (a not in current_hrn_allocations and a in current_allocations):
+            return False
+    # Request OK
+
+    # Allocations to delete
+    allocations_to_delete = []
+    for a in current_hrn_allocations:
+        if (a not in requested_allocations):
+            allocations_to_delete.extend([a])
+
+    # Ok, let's do our thing
+    alloc_nodes(api, hrn, requested_allocations, allocations_to_delete)
+    alloc_links(api, hrn, requested_allocations, allocations_to_delete)
+    allocations[hrn] = requested_allocations
+    commit_alloc_dict(allocations)
+
+    unlock_state_file()
+
+    return True
+
+def rspec_to_allocations(rspec):
+    links = []
+    try:
+        linkspecs = rspec['rspec']['request'][0]['netspec'][0]['linkspec']
+        for l in linkspecs:
+            links.extend([l['name'].replace('tns:','')])
+        
+    except KeyError:
+        # Bad Rspec
+        pass
+    return links
+
+def main():
+    r = Rspec()
+    rspec_xml = open(sys.argv[1]).read()
+    r.parseString(rspec_xml)
+    rspec = r.toDict()
+    create_slice(None,'plc',rspec)
+    
+if __name__ == "__main__":
+    main()