- key_ids = persons[0]['key_ids']
-
- api.plshell.AddPersonToSlice(api.plauth, person_dict['email'],
- slicename)
-
- # Get this users local keys
- keylist = api.plshell.GetKeys(api.plauth, key_ids, ['key'])
- keys = [key['key'] for key in keylist]
-
- # add keys that arent already there
- for personkey in person_dict['keys']:
- if personkey not in keys:
- key = {'key_type': 'ssh', 'key': personkey}
- api.plshell.AddPersonKey(api.plauth, person_dict['email'], key)
-
- # find out where this slice is currently running
- nodelist = api.plshell.GetNodes(api.plauth, slice['node_ids'],
- ['hostname'])
- hostnames = [node['hostname'] for node in nodelist]
-
- # remove nodes not in rspec
- deleted_nodes = list(set(hostnames).difference(nodes))
- # add nodes from rspec
- added_nodes = list(set(nodes).difference(hostnames))
-
- api.plshell.AddSliceToNodes(api.plauth, slicename, added_nodes)
- api.plshell.DeleteSliceFromNodes(api.plauth, slicename, deleted_nodes)
-
-def delete_slice(api, xrn, cred):
- slice_id = get_short_slice_id(cred, xrn)
- (ret, output) = call_am_apiclient("DeleteSliceNetworkClient", [slice_id,], 3)
- # parse output ?
- return 1
-
-
-def get_rspec(api, creds, options):
- # get slice's hrn from options
- xrn = options.get('geni_slice_urn', None)
- hrn, type = urn_to_hrn(xrn)
- # Eg. config line:
- # plc.princeton.sapan vlan23,vlan45
-
- allocations = read_alloc_dict()
- if (hrn and allocations.has_key(hrn)):
- ret_rspec = allocations_to_rspec(allocations[hrn])
-def get_rspec(api, cred, options):
- #geni_slice_urn: urn:publicid:IDN+plc:maxpl+slice+xi_rspec_test1
- urn = options.get('geni_slice_urn')
- slice_id = get_short_slice_id(cred, urn)
- if slice_id == None:
- (ret, output) = call_am_apiclient("GetResourceTopology", ['all', '\"\"'], 5)
- else:
- ret_rspec = open(SFA_MAX_CANNED_RSPEC).read()
-
- return (ret_rspec)
-
-
-def create_slice(api, xrn, creds, rspec_xml, users):
- global topology
- hrn = urn_to_hrn(xrn)[0]
- topology = get_interface_map()
-
- # Check if everything in rspec is either allocated by hrn
- # or not allocated at all.
- r = RSpec()
- r.parseString(rspec_xml)
- rspec = r.toDict()
-
- lock_state_file()
-
- allocations = read_alloc_dict()
- requested_allocations = rspec_to_allocations (rspec)
- current_allocations = collapse_alloc_dict(allocations)
- try:
- current_hrn_allocations=allocations[hrn]
- except KeyError:
- current_hrn_allocations=[]
-
- # Check request against current allocations
- requested_interfaces = map(lambda(elt):elt[0], requested_allocations)
- current_interfaces = map(lambda(elt):elt[0], current_allocations)
- current_hrn_interfaces = map(lambda(elt):elt[0], current_hrn_allocations)
-
- for a in requested_interfaces:
- if (a not in current_hrn_interfaces and a in current_interfaces):
- raise SfaOutOfResource(a)
- if (topology[a][1] not in requested_interfaces):
- raise SfaNoPairRSpec(a,topology[a][1])
- # Request OK
-
- # Allocations to delete
- allocations_to_delete = []
- for a in current_hrn_allocations:
- if (a not in requested_allocations):
- allocations_to_delete.extend([a])
-
- # Ok, let's do our thing
- alloc_nodes(api, hrn, requested_interfaces)
- alloc_links(api, hrn, requested_allocations, allocations_to_delete)
- allocations[hrn] = requested_allocations
- commit_alloc_dict(allocations)
-
- unlock_state_file()
-
- return True
-
-def rspec_to_allocations(rspec):
- ifs = []
- try:
- ifspecs = rspec['rspec']['request'][0]['ifspec']
- for l in ifspecs:
- ifs.extend([(l['name'].replace('tns:',''),l['ip'])])
- except KeyError:
- # Bad RSpec
- pass
- return ifs
- # xxx - fixme
- (ret, output) = call_am_apiclient("GetResourceTopology", ['all', slice_id,], 5)
- # parse output into rspec XML
- if output.find("No resouce found") > 0:
- rspec = "<RSpec type=\"SFA\"> <Fault>No resource found</Fault> </RSpec>"
- else:
- comp_rspec = get_xml_by_tag(output, 'computeResource')
- sfa_logger().debug("#### computeResource %s" % comp_rspec)
- topo_rspec = get_xml_by_tag(output, 'topology')
- sfa_logger().debug("#### topology %s" % topo_rspec)
- rspec = "<RSpec type=\"SFA\"> <network name=\"" + Config().get_interface_hrn() + "\">";
- if comp_rspec != None:
- rspec = rspec + get_xml_by_tag(output, 'computeResource')
- if topo_rspec != None:
- rspec = rspec + get_xml_by_tag(output, 'topology')
- rspec = rspec + "</network> </RSpec>"
-
- return (rspec)
-
-def start_slice(api, xrn, cred):
- # service not supported
- return None
-
-def stop_slice(api, xrn, cred):
- # service not supported
- return None
-
-def reset_slices(api, xrn):
- # service not supported
- return None
-
-"""
-Returns the request context required by sfatables. At some point, this mechanism should be changed
-to refer to "contexts", which is the information that sfatables is requesting. But for now, we just
-return the basic information needed in a dict.
-"""
-def fetch_context(slice_hrn, user_hrn, contexts):
- base_context = {'sfa':{'user':{'hrn':user_hrn}}}
- return base_context
-
-def main():
- t = get_interface_map()
- api = SfaAPI()
- r = RSpec()
- rspec_xml = open(sys.argv[1]).read()
- #get_rspec(None,'foo')
- create_slice(None, "plc.princeton.sap0", rspec_xml)
-
- create_slice(api, "plc.maxpl.test000", None, rspec_xml, None)
-
-if __name__ == "__main__":
- main()
+ has_failure = 0
+ all_active = 0
+ if output.find("Status => FAILED") > 0:
+ top_level_staus = 'failed'
+ elif (output.find("Status => ACCEPTED") > 0 or output.find("Status => PENDING") > 0
+ or output.find("Status => INSETUP") > 0 or output.find("Status => INCREATE") > 0
+ ):
+ top_level_status = 'configuring'
+ else:
+ top_level_status = 'ready'
+ result['geni_resources'] = self.parse_resources(output, slice_xrn)
+ result['geni_urn'] = urn
+ result['geni_status'] = top_level_status
+ return result
+
+ def create_slice(self, api, xrn, cred, rspec, users):
+ indx1 = rspec.find("<RSpec")
+ indx2 = rspec.find("</RSpec>")
+ if indx1 > -1 and indx2 > indx1:
+ rspec = rspec[indx1 + len("<RSpec type=\"SFA\">"):indx2 - 1]
+ rspec_path = self.save_rspec_to_file(rspec)
+ self.prepare_slice(api, xrn, cred, users)
+ slice_id = self.get_plc_slice_id(cred, xrn)
+ sys_cmd = "sed -i \"s/rspec id=\\\"[^\\\"]*/rspec id=\\\"" + slice_id + "/g\" " + \
+ rspec_path + \
+ ";sed -i \"s/:rspec=[^:'<\\\" ]*/:rspec=" + \
+ slice_id + "/g\" " + rspec_path
+ ret = self.shell_execute(sys_cmd, 1)
+ sys_cmd = "sed -i \"s/rspec id=\\\"[^\\\"]*/rspec id=\\\"" + \
+ rspec_path + "/g\""
+ ret = self.shell_execute(sys_cmd, 1)
+ (ret, output) = self.call_am_apiclient(
+ "CreateSliceNetworkClient", [rspec_path, ], 3)
+ # parse output ?
+ rspec = "<RSpec type=\"SFA\"> Done! </RSpec>"
+ return True
+
+ def delete_slice(self, api, xrn, cred):
+ slice_id = self.get_plc_slice_id(cred, xrn)
+ (ret, output) = self.call_am_apiclient(
+ "DeleteSliceNetworkClient", [slice_id, ], 3)
+ # parse output ?
+ return 1
+
+ def get_rspec(self, api, cred, slice_urn):
+ logger.debug("#### called max-get_rspec")
+ # geni_slice_urn: urn:publicid:IDN+plc:maxpl+slice+xi_rspec_test1
+ if slice_urn == None:
+ (ret, output) = self.call_am_apiclient(
+ "GetResourceTopology", ['all', '\"\"'], 5)
+ else:
+ slice_id = self.get_plc_slice_id(cred, slice_urn)
+ (ret, output) = self.call_am_apiclient(
+ "GetResourceTopology", ['all', slice_id, ], 5)
+ # parse output into rspec XML
+ if output.find("No resouce found") > 0:
+ rspec = "<RSpec type=\"SFA\"> <Fault>No resource found</Fault> </RSpec>"
+ else:
+ comp_rspec = self.get_xml_by_tag(output, 'computeResource')
+ logger.debug("#### computeResource %s" % comp_rspec)
+ topo_rspec = self.get_xml_by_tag(output, 'topology')
+ logger.debug("#### topology %s" % topo_rspec)
+ rspec = "<RSpec type=\"SFA\"> <network name=\"" + \
+ Config().get_interface_hrn() + "\">"
+ if comp_rspec != None:
+ rspec = rspec + self.get_xml_by_tag(output, 'computeResource')
+ if topo_rspec != None:
+ rspec = rspec + self.get_xml_by_tag(output, 'topology')
+ rspec = rspec + "</network> </RSpec>"
+ return (rspec)
+
+ def start_slice(self, api, xrn, cred):
+ # service not supported
+ return None
+
+ def stop_slice(self, api, xrn, cred):
+ # service not supported
+ return None
+
+ def reset_slices(self, api, xrn):
+ # service not supported
+ return None
+
+ # GENI AM API Methods
+
+ def SliverStatus(self, api, slice_xrn, creds, options):
+ call_id = options.get('call_id')
+ if Callids().already_handled(call_id):
+ return {}
+ return self.slice_status(api, slice_xrn, creds)
+
+ def CreateSliver(self, api, slice_xrn, creds, rspec_string, users, options):
+ call_id = options.get('call_id')
+ if Callids().already_handled(call_id):
+ return ""
+ # TODO: create real CreateSliver response rspec
+ ret = self.create_slice(api, slice_xrn, creds, rspec_string, users)
+ if ret:
+ return self.get_rspec(api, creds, slice_xrn)
+ else:
+ return "<?xml version=\"1.0\" ?> <RSpec type=\"SFA\"> Error! </RSpec>"
+
+ def DeleteSliver(self, api, xrn, creds, options):
+ call_id = options.get('call_id')
+ if Callids().already_handled(call_id):
+ return ""
+ return self.delete_slice(api, xrn, creds)
+
+ # no caching
+ def ListResources(self, api, creds, options):
+ call_id = options.get('call_id')
+ if Callids().already_handled(call_id):
+ return ""
+ # version_string = "rspec_%s" % (rspec_version.get_version_name())
+ slice_urn = options.get('geni_slice_urn')
+ return self.get_rspec(api, creds, slice_urn)
+
+ def fetch_context(self, slice_hrn, user_hrn, contexts):
+ """
+ Returns the request context required by sfatables. At some point, this mechanism should be changed
+ to refer to "contexts", which is the information that sfatables is requesting. But for now, we just
+ return the basic information needed in a dict.
+ """
+ base_context = {'sfa': {'user': {'hrn': user_hrn}}}
+ return base_context