-### $Id: slices.py 15842 2009-11-22 09:56:13Z anil $
-### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/plc/slices.py $
-
-import datetime
-import time
-import traceback
+#
import sys
-from copy import deepcopy
-from lxml import etree
+import time,datetime
from StringIO import StringIO
from types import StringTypes
+from copy import deepcopy
+from copy import copy
+from lxml import etree
+
+from sfa.util.sfalogging import sfa_logger
from sfa.util.rspecHelper import merge_rspecs
-from sfa.util.namespace import *
+from sfa.util.xrn import Xrn, urn_to_hrn, hrn_to_urn
+from sfa.util.plxrn import hrn_to_pl_slicename
from sfa.util.rspec import *
from sfa.util.specdict import *
from sfa.util.faults import *
from sfa.util.record import SfaRecord
+from sfa.rspecs.pg_rspec import PGRSpec
+from sfa.rspecs.sfa_rspec import SfaRSpec
+from sfa.rspecs.pg_rspec_converter import PGRSpecConverter
+from sfa.rspecs.rspec_parser import parse_rspec
from sfa.util.policy import Policy
from sfa.util.prefixTree import prefixTree
from sfa.util.sfaticket import *
from sfa.trust.credential import Credential
from sfa.util.threadmanager import ThreadManager
import sfa.util.xmlrpcprotocol as xmlrpcprotocol
-from sfa.util.debug import log
import sfa.plc.peers as peers
-from copy import copy
-
-def get_version():
- version = {}
- version['geni_api'] = 1
- version['sfa'] = 1
- return version
+from sfa.util.version import version_core
+from sfa.rspecs.rspec_version import RSpecVersion
+from sfa.util.callids import Callids
+
+# we have specialized xmlrpclib.ServerProxy to remember the input url
+# OTOH it's not clear if we're only dealing with XMLRPCServerProxy instances
+def get_serverproxy_url (server):
+ try:
+ return server.url
+ except:
+ sfa_logger().warning("GetVersion, falling back to xmlrpclib.ServerProxy internals")
+ return server._ServerProxy__host + server._ServerProxy__handler
+
+def GetVersion(api):
+ # peers explicitly in aggregates.xml
+ peers =dict ([ (peername,get_serverproxy_url(v)) for (peername,v) in api.aggregates.iteritems()
+ if peername != api.hrn])
+ xrn=Xrn (api.hrn)
+ sm_version=version_core({'interface':'slicemgr',
+ 'hrn' : xrn.get_hrn(),
+ 'urn' : xrn.get_urn(),
+ 'peers': peers,
+ })
+ # local aggregate if present needs to have localhost resolved
+ if api.hrn in api.aggregates:
+ local_am_url=get_serverproxy_url(api.aggregates[api.hrn])
+ sm_version['peers'][api.hrn]=local_am_url.replace('localhost',sm_version['hostname'])
+ return sm_version
+
+def CreateSliver(api, xrn, creds, rspec, users, call_id):
+
+ if Callids().already_handled(call_id): return ""
-def slice_status(api, slice_xrn, creds ):
- result = {}
- result['geni_urn'] = slice_xrn
- result['geni_status'] = 'unknown'
- result['geni_resources'] = {}
- return result
-
-def create_slice(api, xrn, creds, rspec, users):
hrn, type = urn_to_hrn(xrn)
# Validate the RSpec against PlanetLab's schema --disabled for now
# Just send entire RSpec to each aggregate
server = api.aggregates[aggregate]
- threads.run(server.CreateSliver, xrn, credential, rspec, users)
+ threads.run(server.CreateSliver, xrn, credential, rspec, users, call_id)
- results = threads.get_results()
- merged_rspec = merge_rspecs(results)
- return merged_rspec
+ results = threads.get_results()
+ rspec = SfaRSpec()
+ for result in results:
+ rspec.merge(result)
+ return rspec
+
+def RenewSliver(api, xrn, creds, expiration_time, call_id):
+ if Callids().already_handled(call_id): return True
-def renew_slice(api, xrn, creds, expiration_time):
+ (hrn, type) = urn_to_hrn(xrn)
# get the callers hrn
- valid_cred = api.auth.checkCredentials(creds, 'renewesliver', hrn)[0]
+ valid_cred = api.auth.checkCredentials(creds, 'renewsliver', hrn)[0]
caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
# attempt to use delegated credential first
continue
server = api.aggregates[aggregate]
- threads.run(server.RenewSliver, xrn, credential, expiration_time)
- threads.get_results()
- return 1
+ threads.run(server.RenewSliver, xrn, [credential], expiration_time, call_id)
+ # 'and' the results
+ return reduce (lambda x,y: x and y, threads.get_results() , True)
def get_ticket(api, xrn, creds, rspec, users):
slice_hrn, type = urn_to_hrn(xrn)
aggregate_rspecs[aggregate_hrn] = rspec
# get the callers hrn
- valid_cred = api.auth.checkCredentials(creds, 'getticket', hrn)[0]
+ valid_cred = api.auth.checkCredentials(creds, 'getticket', slice_hrn)[0]
caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
# attempt to use delegated credential first
if not credential:
credential = api.getCredential()
threads = ThreadManager()
- for aggregate, aggregate_rspec in aggregate_rspecs.items():
+ for (aggregate, aggregate_rspec) in aggregate_rspecs.iteritems():
# prevent infinite loop. Dont send request back to caller
# unless the caller is the aggregate's SM
if caller_hrn == aggregate and aggregate != api.hrn:
return ticket.save_to_string(save_parents=True)
-def delete_slice(api, xrn, creds):
+def DeleteSliver(api, xrn, creds, call_id):
+ if Callids().already_handled(call_id): return ""
+ (hrn, type) = urn_to_hrn(xrn)
# get the callers hrn
valid_cred = api.auth.checkCredentials(creds, 'deletesliver', hrn)[0]
caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
if caller_hrn == aggregate and aggregate != api.hrn:
continue
server = api.aggregates[aggregate]
- threads.run(server.DeleteSliver, xrn, credential)
+ threads.run(server.DeleteSliver, xrn, credential, call_id)
threads.get_results()
return 1
def start_slice(api, xrn, creds):
+ hrn, type = urn_to_hrn(xrn)
+
# get the callers hrn
valid_cred = api.auth.checkCredentials(creds, 'startslice', hrn)[0]
caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
return 1
def stop_slice(api, xrn, creds):
+ hrn, type = urn_to_hrn(xrn)
+
# get the callers hrn
valid_cred = api.auth.checkCredentials(creds, 'stopslice', hrn)[0]
caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
"""
return 1
-def get_slices(api, creds):
+# Thierry : caching at the slicemgr level makes sense to some extent
+caching=True
+#caching=False
+def ListSlices(api, creds, call_id):
+
+ if Callids().already_handled(call_id): return []
# look in cache first
- if api.cache:
+ if caching and api.cache:
slices = api.cache.get('slices')
if slices:
return slices
# get the callers hrn
- valid_cred = api.auth.checkCredentials(creds, 'listslices', hrn)[0]
+ valid_cred = api.auth.checkCredentials(creds, 'listslices', None)[0]
caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
# attempt to use delegated credential first
if caller_hrn == aggregate and aggregate != api.hrn:
continue
server = api.aggregates[aggregate]
- threads.run(server.ListSlices, credential)
+ threads.run(server.ListSlices, credential, call_id)
# combime results
results = threads.get_results()
slices.extend(result)
# cache the result
- if api.cache:
+ if caching and api.cache:
api.cache.add('slices', slices)
return slices
-
-def get_rspec(api, creds, options):
-
+
+
+def ListResources(api, creds, options, call_id):
+
+ if Callids().already_handled(call_id): return ""
+
# get slice's hrn from options
- xrn = options.get('geni_slice_urn', None)
- hrn, type = urn_to_hrn(xrn)
+ xrn = options.get('geni_slice_urn', '')
+ (hrn, type) = urn_to_hrn(xrn)
+
+ # get the rspec's return format from options
+ rspec_version = RSpecVersion(options.get('rspec_version', 'SFA 1'))
+ version_string = "rspec_%s_%s" % (rspec_version.format, rspec_version.version)
# get hrn of the original caller
origin_hrn = options.get('origin_hrn', None)
if not origin_hrn:
- origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
+ if isinstance(creds, list):
+ origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
+ else:
+ origin_hrn = Credential(string=creds).get_gid_caller().get_hrn()
# look in cache first
- if api.cache and not xrn:
- rspec = api.cache.get('nodes')
+ if caching and api.cache and not xrn:
+ rspec = api.cache.get(version_string)
if rspec:
return rspec
- hrn, type = urn_to_hrn(xrn)
- rspec = None
-
# get the callers hrn
valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0]
caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
server = api.aggregates[aggregate]
my_opts = copy(options)
my_opts['geni_compressed'] = False
- threads.run(server.ListResources, credential, my_opts)
+ threads.run(server.ListResources, credential, my_opts, call_id)
#threads.run(server.get_resources, cred, xrn, origin_hrn)
results = threads.get_results()
- # combine the rspecs into a single rspec
- for agg_rspec in results:
+ #results.append(open('/root/protogeni.rspec', 'r').read())
+ rspec = SfaRSpec()
+ for result in results:
try:
- tree = etree.parse(StringIO(agg_rspec))
- except etree.XMLSyntaxError:
- message = str(agg_rspec) + ": " + str(sys.exc_info()[1])
- raise InvalidRSpec(message)
-
- root = tree.getroot()
- if root.get("type") in ["SFA"]:
- if rspec == None:
- rspec = root
+ tmp_rspec = parse_rspec(result)
+ if isinstance(tmp_rspec, SfaRSpec):
+ rspec.merge(result)
+ elif isinstance(tmp_rspec, PGRSpec):
+ rspec.merge(PGRSpecConverter.to_sfa_rspec(result))
else:
- for network in root.iterfind("./network"):
- rspec.append(deepcopy(network))
- for request in root.iterfind("./request"):
- rspec.append(deepcopy(request))
-
- rspec = etree.tostring(rspec, xml_declaration=True, pretty_print=True)
+ api.logger.info("SM.ListResources: invalid aggregate rspec")
+ except:
+ api.logger.info("SM.ListResources: Failed to merge aggregate rspec")
+
# cache the result
- if api.cache and not xrn:
- api.cache.add('nodes', rspec)
+ if caching and api.cache and not xrn:
+ api.cache.add(version_string, rspec.toxml())
- return rspec
+ return rspec.toxml()
+
+# first draft at a merging SliverStatus
+def SliverStatus(api, slice_xrn, creds, call_id):
+ if Callids().already_handled(call_id): return {}
+ # attempt to use delegated credential first
+ credential = api.getDelegatedCredential(creds)
+ if not credential:
+ credential = api.getCredential()
+ threads = ThreadManager()
+ for aggregate in api.aggregates:
+ server = api.aggregates[aggregate]
+ threads.run (server.SliverStatus, slice_xrn, credential, call_id)
+ results = threads.get_results()
+
+ # get rid of any void result - e.g. when call_id was hit where by convention we return {}
+ results = [ result for result in results if result and result['geni_resources']]
+
+ # do not try to combine if there's no result
+ if not results : return {}
+
+ # otherwise let's merge stuff
+ overall = {}
+
+ # mmh, it is expected that all results carry the same urn
+ overall['geni_urn'] = results[0]['geni_urn']
+
+ # consolidate geni_status - simple model using max on a total order
+ states = [ 'ready', 'configuring', 'failed', 'unknown' ]
+ # hash name to index
+ shash = dict ( zip ( states, range(len(states)) ) )
+ def combine_status (x,y):
+ return shash [ max (shash(x),shash(y)) ]
+ overall['geni_status'] = reduce (combine_status, [ result['geni_status'] for result in results], 'ready' )
+
+ # {'ready':0,'configuring':1,'failed':2,'unknown':3}
+ # append all geni_resources
+ overall['geni_resources'] = \
+ reduce (lambda x,y: x+y, [ result['geni_resources'] for result in results] , [])
+
+ return overall
def main():
r = RSpec()
r.parseFile(sys.argv[1])
rspec = r.toDict()
- create_slice(None,'plc.princeton.tmacktestslice',rspec)
+ CreateSliver(None,'plc.princeton.tmacktestslice',rspec,'create-slice-tmacktestslice')
if __name__ == "__main__":
main()