-
- try:
- tree = etree.parse(StringIO(agg_rspec))
- except etree.XMLSyntaxError:
- message = agg + ": " + str(sys.exc_info()[1])
- raise InvalidRSpec(message)
-
- root = tree.getroot()
- if root.get("type") in ["SFA"]:
- if rspec == None:
- rspec = root
- else:
- for network in root.iterfind("./network"):
- rspec.append(deepcopy(network))
- for request in root.iterfind("./request"):
- rspec.append(deepcopy(request))
-
- return etree.tostring(rspec, xml_declaration=True, pretty_print=True)
-
-"""
-Returns the request context required by sfatables. At some point, this
-mechanism should be changed to refer to "contexts", which is the
-information that sfatables is requesting. But for now, we just return
-the basic information needed in a dict.
-"""
-def fetch_context(slice_hrn, user_hrn, contexts):
- #slice_hrn = urn_to_hrn(slice_xrn)[0]
- #user_hrn = urn_to_hrn(user_xrn)[0]
- base_context = {'sfa':{'user':{'hrn':user_hrn}, 'slice':{'hrn':slice_hrn}}}
- return base_context
+def status(api, xrn, creds):
+ """
+ Not implemented
+ """
+ return 1
+
+# Thierry : caching at the slicemgr level makes sense to some extent
+caching=True
+#caching=False
+def ListSlices(api, creds, call_id):
+
+ if Callids().already_handled(call_id): return []
+
+ # look in cache first
+ if caching and api.cache:
+ slices = api.cache.get('slices')
+ if slices:
+ return slices
+
+ # get the callers hrn
+ valid_cred = api.auth.checkCredentials(creds, 'listslices', None)[0]
+ caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+
+ # attempt to use delegated credential first
+ credential = api.getDelegatedCredential(creds)
+ if not credential:
+ credential = api.getCredential()
+ threads = ThreadManager()
+ # fetch from aggregates
+ for aggregate in api.aggregates:
+ # prevent infinite loop. Dont send request back to caller
+ # unless the caller is the aggregate's SM
+ if caller_hrn == aggregate and aggregate != api.hrn:
+ continue
+ server = api.aggregates[aggregate]
+ threads.run(server.ListSlices, credential, call_id)
+
+ # combime results
+ results = threads.get_results()
+ slices = []
+ for result in results:
+ slices.extend(result)
+
+ # cache the result
+ if caching and api.cache:
+ api.cache.add('slices', slices)
+
+ return slices
+
+
+def ListResources(api, creds, options, call_id):
+
+ if Callids().already_handled(call_id): return ""
+
+ # get slice's hrn from options
+ xrn = options.get('geni_slice_urn', '')
+ (hrn, type) = urn_to_hrn(xrn)
+
+ # get the rspec's return format from options
+ rspec_version = RSpecVersion(options.get('rspec_version', 'SFA 1'))
+ version_string = "rspec_%s" % (rspec_version.get_version_name())
+
+ # look in cache first
+ if caching and api.cache and not xrn:
+ rspec = api.cache.get(version_string)
+ if rspec:
+ return rspec
+
+ # get the callers hrn
+ valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0]
+ caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+
+ # attempt to use delegated credential first
+ credential = api.getDelegatedCredential(creds)
+ if not credential:
+ credential = api.getCredential()
+ threads = ThreadManager()
+ for aggregate in api.aggregates:
+ # prevent infinite loop. Dont send request back to caller
+ # unless the caller is the aggregate's SM
+ if caller_hrn == aggregate and aggregate != api.hrn:
+ continue
+ # get the rspec from the aggregate
+ server = api.aggregates[aggregate]
+ my_opts = copy(options)
+ my_opts['geni_compressed'] = False
+ threads.run(server.ListResources, credential, my_opts, call_id)
+
+ results = threads.get_results()
+ #results.append(open('/root/protogeni.rspec', 'r').read())
+ rspec = SfaRSpec()
+ for result in results:
+ try:
+ tmp_rspec = parse_rspec(result)
+ if isinstance(tmp_rspec, SfaRSpec):
+ rspec.merge(result)
+ elif isinstance(tmp_rspec, PGRSpec):
+ rspec.merge(RSpecConverter.to_sfa_rspec(result))
+ else:
+ api.logger.info("SM.ListResources: invalid aggregate rspec")
+ except:
+ api.logger.info("SM.ListResources: Failed to merge aggregate rspec")
+
+ # cache the result
+ if caching and api.cache and not xrn:
+ api.cache.add(version_string, rspec.toxml())
+
+ return rspec.toxml()
+
+# first draft at a merging SliverStatus
+def SliverStatus(api, slice_xrn, creds, call_id):
+ if Callids().already_handled(call_id): return {}
+ # attempt to use delegated credential first
+ credential = api.getDelegatedCredential(creds)
+ if not credential:
+ credential = api.getCredential()
+ threads = ThreadManager()
+ for aggregate in api.aggregates:
+ server = api.aggregates[aggregate]
+ threads.run (server.SliverStatus, slice_xrn, credential, call_id)
+ results = threads.get_results()
+
+ # get rid of any void result - e.g. when call_id was hit where by convention we return {}
+ results = [ result for result in results if result and result['geni_resources']]
+
+ # do not try to combine if there's no result
+ if not results : return {}
+
+ # otherwise let's merge stuff
+ overall = {}
+
+ # mmh, it is expected that all results carry the same urn
+ overall['geni_urn'] = results[0]['geni_urn']
+
+ # consolidate geni_status - simple model using max on a total order
+ states = [ 'ready', 'configuring', 'failed', 'unknown' ]
+ # hash name to index
+ shash = dict ( zip ( states, range(len(states)) ) )
+ def combine_status (x,y):
+ return shash [ max (shash(x),shash(y)) ]
+ overall['geni_status'] = reduce (combine_status, [ result['geni_status'] for result in results], 'ready' )
+
+ # {'ready':0,'configuring':1,'failed':2,'unknown':3}
+ # append all geni_resources
+ overall['geni_resources'] = \
+ reduce (lambda x,y: x+y, [ result['geni_resources'] for result in results] , [])
+
+ return overall