4 from StringIO import StringIO
5 from types import StringTypes
6 from copy import deepcopy
10 from sfa.util.sfalogging import sfa_logger
11 from sfa.util.rspecHelper import merge_rspecs
12 from sfa.util.xrn import Xrn, urn_to_hrn, hrn_to_urn
13 from sfa.util.plxrn import hrn_to_pl_slicename
14 from sfa.util.rspec import *
15 from sfa.util.specdict import *
16 from sfa.util.faults import *
17 from sfa.util.record import SfaRecord
18 from sfa.rspecs.pg_rspec import PGRSpec
19 from sfa.rspecs.sfa_rspec import SfaRSpec
20 from sfa.rspecs.rspec_converter import RSpecConverter
21 from sfa.rspecs.rspec_parser import parse_rspec
22 from sfa.rspecs.rspec_version import RSpecVersion
23 from sfa.rspecs.pl_rspec_version import supported_rspecs
24 from sfa.util.policy import Policy
25 from sfa.util.prefixTree import prefixTree
26 from sfa.util.sfaticket import *
27 from sfa.trust.credential import Credential
28 from sfa.util.threadmanager import ThreadManager
29 import sfa.util.xmlrpcprotocol as xmlrpcprotocol
30 import sfa.plc.peers as peers
31 from sfa.util.version import version_core
32 from sfa.util.callids import Callids
34 # we have specialized xmlrpclib.ServerProxy to remember the input url
35 # OTOH it's not clear if we're only dealing with XMLRPCServerProxy instances
36 def get_serverproxy_url (server):
40 sfa_logger().warning("GetVersion, falling back to xmlrpclib.ServerProxy internals")
41 return server._ServerProxy__host + server._ServerProxy__handler
44 # peers explicitly in aggregates.xml
45 peers =dict ([ (peername,get_serverproxy_url(v)) for (peername,v) in api.aggregates.iteritems()
46 if peername != api.hrn])
48 version_more = {'interface':'slicemgr',
49 'hrn' : xrn.get_hrn(),
50 'urn' : xrn.get_urn(),
52 version_more.update(supported_rspecs)
53 sm_version=version_core(version_more)
54 # local aggregate if present needs to have localhost resolved
55 if api.hrn in api.aggregates:
56 local_am_url=get_serverproxy_url(api.aggregates[api.hrn])
57 sm_version['peers'][api.hrn]=local_am_url.replace('localhost',sm_version['hostname'])
60 def CreateSliver(api, xrn, creds, rspec_str, users, call_id):
62 def _CreateSliver(server, xrn, credentail, rspec, users, call_id):
63 # should check the cache first
64 # get aggregate version
65 version = server.GetVersion()
66 if 'sfa' not in version and 'geni_api' in version:
67 # sfa aggregtes support both sfa and pg rspecs, no need to convert
68 # if aggregate supports sfa rspecs. othewise convert to pg rspec
69 rspec = RSpecConverter.to_pg_rspec(rspec)
71 return server.CreateSliver(xrn, credential, rspec, users, call_id)
74 if Callids().already_handled(call_id): return ""
76 # Validate the RSpec against PlanetLab's schema --disabled for now
77 # The schema used here needs to aggregate the PL and VINI schemas
78 # schema = "/var/www/html/schemas/pl.rng"
79 rspec = parse_rspec(rspec_str)
82 rspec.validate(schema)
84 # attempt to use delegated credential first
85 credential = api.getDelegatedCredential(creds)
87 credential = api.getCredential()
90 hrn, type = urn_to_hrn(xrn)
91 valid_cred = api.auth.checkCredentials(creds, 'createsliver', hrn)[0]
92 caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
93 threads = ThreadManager()
94 for aggregate in api.aggregates:
95 # prevent infinite loop. Dont send request back to caller
96 # unless the caller is the aggregate's SM
97 if caller_hrn == aggregate and aggregate != api.hrn:
100 # Just send entire RSpec to each aggregate
101 server = api.aggregates[aggregate]
102 threads.run(_CreateSliver, server, xrn, credential, rspec.toxml(), users, call_id)
104 results = threads.get_results()
106 for result in results:
110 def RenewSliver(api, xrn, creds, expiration_time, call_id):
111 if Callids().already_handled(call_id): return True
113 (hrn, type) = urn_to_hrn(xrn)
114 # get the callers hrn
115 valid_cred = api.auth.checkCredentials(creds, 'renewsliver', hrn)[0]
116 caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
118 # attempt to use delegated credential first
119 credential = api.getDelegatedCredential(creds)
121 credential = api.getCredential()
122 threads = ThreadManager()
123 for aggregate in api.aggregates:
124 # prevent infinite loop. Dont send request back to caller
125 # unless the caller is the aggregate's SM
126 if caller_hrn == aggregate and aggregate != api.hrn:
129 server = api.aggregates[aggregate]
130 threads.run(server.RenewSliver, xrn, [credential], expiration_time, call_id)
132 return reduce (lambda x,y: x and y, threads.get_results() , True)
134 def get_ticket(api, xrn, creds, rspec, users):
135 slice_hrn, type = urn_to_hrn(xrn)
136 # get the netspecs contained within the clients rspec
137 aggregate_rspecs = {}
138 tree= etree.parse(StringIO(rspec))
139 elements = tree.findall('./network')
140 for element in elements:
141 aggregate_hrn = element.values()[0]
142 aggregate_rspecs[aggregate_hrn] = rspec
144 # get the callers hrn
145 valid_cred = api.auth.checkCredentials(creds, 'getticket', slice_hrn)[0]
146 caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
148 # attempt to use delegated credential first
149 credential = api.getDelegatedCredential(creds)
151 credential = api.getCredential()
152 threads = ThreadManager()
153 for (aggregate, aggregate_rspec) in aggregate_rspecs.iteritems():
154 # prevent infinite loop. Dont send request back to caller
155 # unless the caller is the aggregate's SM
156 if caller_hrn == aggregate and aggregate != api.hrn:
159 if aggregate in api.aggregates:
160 server = api.aggregates[aggregate]
162 net_urn = hrn_to_urn(aggregate, 'authority')
163 # we may have a peer that knows about this aggregate
164 for agg in api.aggregates:
165 target_aggs = api.aggregates[agg].get_aggregates(credential, net_urn)
166 if not target_aggs or not 'hrn' in target_aggs[0]:
168 # send the request to this address
169 url = target_aggs[0]['url']
170 server = xmlrpcprotocol.get_server(url, api.key_file, api.cert_file)
171 # aggregate found, no need to keep looping
175 threads.run(server.GetTicket, xrn, credential, aggregate_rspec, users)
177 results = threads.get_results()
179 # gather information from each ticket
184 for result in results:
185 agg_ticket = SfaTicket(string=result)
186 attrs = agg_ticket.get_attributes()
188 object_gid = agg_ticket.get_gid_object()
189 rspecs.append(agg_ticket.get_rspec())
190 initscripts.extend(attrs.get('initscripts', []))
191 slivers.extend(attrs.get('slivers', []))
194 attributes = {'initscripts': initscripts,
196 merged_rspec = merge_rspecs(rspecs)
198 # create a new ticket
199 ticket = SfaTicket(subject = slice_hrn)
200 ticket.set_gid_caller(api.auth.client_gid)
201 ticket.set_issuer(key=api.key, subject=api.hrn)
202 ticket.set_gid_object(object_gid)
203 ticket.set_pubkey(object_gid.get_pubkey())
204 #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
205 ticket.set_attributes(attributes)
206 ticket.set_rspec(merged_rspec)
209 return ticket.save_to_string(save_parents=True)
212 def DeleteSliver(api, xrn, creds, call_id):
213 if Callids().already_handled(call_id): return ""
214 (hrn, type) = urn_to_hrn(xrn)
215 # get the callers hrn
216 valid_cred = api.auth.checkCredentials(creds, 'deletesliver', hrn)[0]
217 caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
219 # attempt to use delegated credential first
220 credential = api.getDelegatedCredential(creds)
222 credential = api.getCredential()
223 threads = ThreadManager()
224 for aggregate in api.aggregates:
225 # prevent infinite loop. Dont send request back to caller
226 # unless the caller is the aggregate's SM
227 if caller_hrn == aggregate and aggregate != api.hrn:
229 server = api.aggregates[aggregate]
230 threads.run(server.DeleteSliver, xrn, credential, call_id)
231 threads.get_results()
234 def start_slice(api, xrn, creds):
235 hrn, type = urn_to_hrn(xrn)
237 # get the callers hrn
238 valid_cred = api.auth.checkCredentials(creds, 'startslice', hrn)[0]
239 caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
241 # attempt to use delegated credential first
242 credential = api.getDelegatedCredential(creds)
244 credential = api.getCredential()
245 threads = ThreadManager()
246 for aggregate in api.aggregates:
247 # prevent infinite loop. Dont send request back to caller
248 # unless the caller is the aggregate's SM
249 if caller_hrn == aggregate and aggregate != api.hrn:
251 server = api.aggregates[aggregate]
252 threads.run(server.Start, xrn, credential)
253 threads.get_results()
256 def stop_slice(api, xrn, creds):
257 hrn, type = urn_to_hrn(xrn)
259 # get the callers hrn
260 valid_cred = api.auth.checkCredentials(creds, 'stopslice', hrn)[0]
261 caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
263 # attempt to use delegated credential first
264 credential = api.getDelegatedCredential(creds)
266 credential = api.getCredential()
267 threads = ThreadManager()
268 for aggregate in api.aggregates:
269 # prevent infinite loop. Dont send request back to caller
270 # unless the caller is the aggregate's SM
271 if caller_hrn == aggregate and aggregate != api.hrn:
273 server = api.aggregates[aggregate]
274 threads.run(server.Stop, xrn, credential)
275 threads.get_results()
278 def reset_slice(api, xrn):
284 def shutdown(api, xrn, creds):
290 def status(api, xrn, creds):
296 # Thierry : caching at the slicemgr level makes sense to some extent
299 def ListSlices(api, creds, call_id):
301 if Callids().already_handled(call_id): return []
303 # look in cache first
304 if caching and api.cache:
305 slices = api.cache.get('slices')
309 # get the callers hrn
310 valid_cred = api.auth.checkCredentials(creds, 'listslices', None)[0]
311 caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
313 # attempt to use delegated credential first
314 credential = api.getDelegatedCredential(creds)
316 credential = api.getCredential()
317 threads = ThreadManager()
318 # fetch from aggregates
319 for aggregate in api.aggregates:
320 # prevent infinite loop. Dont send request back to caller
321 # unless the caller is the aggregate's SM
322 if caller_hrn == aggregate and aggregate != api.hrn:
324 server = api.aggregates[aggregate]
325 threads.run(server.ListSlices, credential, call_id)
328 results = threads.get_results()
330 for result in results:
331 slices.extend(result)
334 if caching and api.cache:
335 api.cache.add('slices', slices)
340 def ListResources(api, creds, options, call_id):
342 if Callids().already_handled(call_id): return ""
344 # get slice's hrn from options
345 xrn = options.get('geni_slice_urn', '')
346 (hrn, type) = urn_to_hrn(xrn)
348 # get the rspec's return format from options
349 rspec_version = RSpecVersion(options.get('rspec_version', 'SFA 1'))
350 version_string = "rspec_%s" % (rspec_version.get_version_name())
352 # look in cache first
353 if caching and api.cache and not xrn:
354 rspec = api.cache.get(version_string)
358 # get the callers hrn
359 valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0]
360 caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
362 # attempt to use delegated credential first
363 credential = api.getDelegatedCredential(creds)
365 credential = api.getCredential()
366 threads = ThreadManager()
367 for aggregate in api.aggregates:
368 # prevent infinite loop. Dont send request back to caller
369 # unless the caller is the aggregate's SM
370 if caller_hrn == aggregate and aggregate != api.hrn:
372 # get the rspec from the aggregate
373 server = api.aggregates[aggregate]
374 my_opts = copy(options)
375 my_opts['geni_compressed'] = False
376 threads.run(server.ListResources, credential, my_opts, call_id)
378 results = threads.get_results()
379 #results.append(open('/root/protogeni.rspec', 'r').read())
381 for result in results:
383 tmp_rspec = parse_rspec(result)
384 if isinstance(tmp_rspec, SfaRSpec):
386 elif isinstance(tmp_rspec, PGRSpec):
387 rspec.merge(RSpecConverter.to_sfa_rspec(result))
389 api.logger.info("SM.ListResources: invalid aggregate rspec")
391 api.logger.info("SM.ListResources: Failed to merge aggregate rspec")
394 if caching and api.cache and not xrn:
395 api.cache.add(version_string, rspec.toxml())
399 # first draft at a merging SliverStatus
400 def SliverStatus(api, slice_xrn, creds, call_id):
401 if Callids().already_handled(call_id): return {}
402 # attempt to use delegated credential first
403 credential = api.getDelegatedCredential(creds)
405 credential = api.getCredential()
406 threads = ThreadManager()
407 for aggregate in api.aggregates:
408 server = api.aggregates[aggregate]
409 threads.run (server.SliverStatus, slice_xrn, credential, call_id)
410 results = threads.get_results()
412 # get rid of any void result - e.g. when call_id was hit where by convention we return {}
413 results = [ result for result in results if result and result['geni_resources']]
415 # do not try to combine if there's no result
416 if not results : return {}
418 # otherwise let's merge stuff
421 # mmh, it is expected that all results carry the same urn
422 overall['geni_urn'] = results[0]['geni_urn']
424 # consolidate geni_status - simple model using max on a total order
425 states = [ 'ready', 'configuring', 'failed', 'unknown' ]
427 shash = dict ( zip ( states, range(len(states)) ) )
428 def combine_status (x,y):
429 return shash [ max (shash(x),shash(y)) ]
430 overall['geni_status'] = reduce (combine_status, [ result['geni_status'] for result in results], 'ready' )
432 # {'ready':0,'configuring':1,'failed':2,'unknown':3}
433 # append all geni_resources
434 overall['geni_resources'] = \
435 reduce (lambda x,y: x+y, [ result['geni_resources'] for result in results] , [])
441 r.parseFile(sys.argv[1])
443 CreateSliver(None,'plc.princeton.tmacktestslice',rspec,'create-slice-tmacktestslice')
445 if __name__ == "__main__":