dc16a05340f2e22b49b73e0dfe1caf5f06d3c379
[sfa.git] / sfa / managers / slice_manager_pl.py
1
2 import sys
3 import time,datetime
4 from StringIO import StringIO
5 from types import StringTypes
6 from copy import deepcopy
7 from copy import copy
8 from lxml import etree
9
10 from sfa.util.sfalogging import sfa_logger
11 from sfa.util.rspecHelper import merge_rspecs
12 from sfa.util.xrn import Xrn, urn_to_hrn, hrn_to_urn
13 from sfa.util.plxrn import hrn_to_pl_slicename
14 from sfa.util.rspec import *
15 from sfa.util.specdict import *
16 from sfa.util.faults import *
17 from sfa.util.record import SfaRecord
18 from sfa.rspecs.sfa_rspec import SfaRSpec
19 from sfa.util.policy import Policy
20 from sfa.util.prefixTree import prefixTree
21 from sfa.util.sfaticket import *
22 from sfa.trust.credential import Credential
23 from sfa.util.threadmanager import ThreadManager
24 import sfa.util.xmlrpcprotocol as xmlrpcprotocol     
25 import sfa.plc.peers as peers
26 from sfa.util.version import version_core
27 from sfa.util.callids import Callids
28
29 # we have specialized xmlrpclib.ServerProxy to remember the input url
30 # OTOH it's not clear if we're only dealing with XMLRPCServerProxy instances
31 def get_serverproxy_url (server):
32     try:
33         return server.url
34     except:
35         sfa_logger().warning("GetVersion, falling back to xmlrpclib.ServerProxy internals")
36         return server._ServerProxy__host + server._ServerProxy__handler 
37
38 def GetVersion(api):
39     # peers explicitly in aggregates.xml
40     peers =dict ([ (peername,get_serverproxy_url(v)) for (peername,v) in api.aggregates.iteritems() 
41                    if peername != api.hrn])
42     xrn=Xrn (api.hrn)
43     sm_version=version_core({'interface':'slicemgr',
44                              'hrn' : xrn.get_hrn(),
45                              'urn' : xrn.get_urn(),
46                              'peers': peers,
47                              })
48     # local aggregate if present needs to have localhost resolved
49     if api.hrn in api.aggregates:
50         local_am_url=get_serverproxy_url(api.aggregates[api.hrn])
51         sm_version['peers'][api.hrn]=local_am_url.replace('localhost',sm_version['hostname'])
52     return sm_version
53
54 def CreateSliver(api, xrn, creds, rspec, users, call_id):
55
56     if Callids().already_handled(call_id): return ""
57
58     hrn, type = urn_to_hrn(xrn)
59
60     # Validate the RSpec against PlanetLab's schema --disabled for now
61     # The schema used here needs to aggregate the PL and VINI schemas
62     # schema = "/var/www/html/schemas/pl.rng"
63     schema = None
64     if schema:
65         try:
66             tree = etree.parse(StringIO(rspec))
67         except etree.XMLSyntaxError:
68             message = str(sys.exc_info()[1])
69             raise InvalidRSpec(message)
70
71         relaxng_doc = etree.parse(schema)
72         relaxng = etree.RelaxNG(relaxng_doc)
73         
74         if not relaxng(tree):
75             error = relaxng.error_log.last_error
76             message = "%s (line %s)" % (error.message, error.line)
77             raise InvalidRSpec(message)
78
79     # get the callers hrn
80     valid_cred = api.auth.checkCredentials(creds, 'createsliver', hrn)[0]
81     caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
82
83     # attempt to use delegated credential first
84     credential = api.getDelegatedCredential(creds)
85     if not credential:     
86         credential = api.getCredential()
87     threads = ThreadManager()
88     for aggregate in api.aggregates:
89         # prevent infinite loop. Dont send request back to caller
90         # unless the caller is the aggregate's SM 
91         if caller_hrn == aggregate and aggregate != api.hrn:
92             continue
93             
94         # Just send entire RSpec to each aggregate
95         server = api.aggregates[aggregate]
96         threads.run(server.CreateSliver, xrn, credential, rspec, users, call_id)
97             
98     results = threads.get_results()
99     rspec = SfaRSpec()
100     for result in results:
101         rspec.merge(result)     
102     return rspec
103
104 def RenewSliver(api, xrn, creds, expiration_time, call_id):
105     if Callids().already_handled(call_id): return True
106
107     (hrn, type) = urn_to_hrn(xrn)
108     # get the callers hrn
109     valid_cred = api.auth.checkCredentials(creds, 'renewsliver', hrn)[0]
110     caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
111
112     # attempt to use delegated credential first
113     credential = api.getDelegatedCredential(creds)
114     if not credential:
115         credential = api.getCredential()
116     threads = ThreadManager()
117     for aggregate in api.aggregates:
118         # prevent infinite loop. Dont send request back to caller
119         # unless the caller is the aggregate's SM
120         if caller_hrn == aggregate and aggregate != api.hrn:
121             continue
122
123         server = api.aggregates[aggregate]
124         threads.run(server.RenewSliver, xrn, [credential], expiration_time, call_id)
125     # 'and' the results
126     return reduce (lambda x,y: x and y, threads.get_results() , True)
127
128 def get_ticket(api, xrn, creds, rspec, users):
129     slice_hrn, type = urn_to_hrn(xrn)
130     # get the netspecs contained within the clients rspec
131     aggregate_rspecs = {}
132     tree= etree.parse(StringIO(rspec))
133     elements = tree.findall('./network')
134     for element in elements:
135         aggregate_hrn = element.values()[0]
136         aggregate_rspecs[aggregate_hrn] = rspec 
137
138     # get the callers hrn
139     valid_cred = api.auth.checkCredentials(creds, 'getticket', slice_hrn)[0]
140     caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
141
142     # attempt to use delegated credential first
143     credential = api.getDelegatedCredential(creds)
144     if not credential:
145         credential = api.getCredential() 
146     threads = ThreadManager()
147     for (aggregate, aggregate_rspec) in aggregate_rspecs.iteritems():
148         # prevent infinite loop. Dont send request back to caller
149         # unless the caller is the aggregate's SM
150         if caller_hrn == aggregate and aggregate != api.hrn:
151             continue
152         server = None
153         if aggregate in api.aggregates:
154             server = api.aggregates[aggregate]
155         else:
156             net_urn = hrn_to_urn(aggregate, 'authority')     
157             # we may have a peer that knows about this aggregate
158             for agg in api.aggregates:
159                 target_aggs = api.aggregates[agg].get_aggregates(credential, net_urn)
160                 if not target_aggs or not 'hrn' in target_aggs[0]:
161                     continue
162                 # send the request to this address 
163                 url = target_aggs[0]['url']
164                 server = xmlrpcprotocol.get_server(url, api.key_file, api.cert_file)
165                 # aggregate found, no need to keep looping
166                 break   
167         if server is None:
168             continue 
169         threads.run(server.GetTicket, xrn, credential, aggregate_rspec, users)
170
171     results = threads.get_results()
172     
173     # gather information from each ticket 
174     rspecs = []
175     initscripts = []
176     slivers = [] 
177     object_gid = None  
178     for result in results:
179         agg_ticket = SfaTicket(string=result)
180         attrs = agg_ticket.get_attributes()
181         if not object_gid:
182             object_gid = agg_ticket.get_gid_object()
183         rspecs.append(agg_ticket.get_rspec())
184         initscripts.extend(attrs.get('initscripts', [])) 
185         slivers.extend(attrs.get('slivers', [])) 
186     
187     # merge info
188     attributes = {'initscripts': initscripts,
189                  'slivers': slivers}
190     merged_rspec = merge_rspecs(rspecs) 
191
192     # create a new ticket
193     ticket = SfaTicket(subject = slice_hrn)
194     ticket.set_gid_caller(api.auth.client_gid)
195     ticket.set_issuer(key=api.key, subject=api.hrn)
196     ticket.set_gid_object(object_gid)
197     ticket.set_pubkey(object_gid.get_pubkey())
198     #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
199     ticket.set_attributes(attributes)
200     ticket.set_rspec(merged_rspec)
201     ticket.encode()
202     ticket.sign()          
203     return ticket.save_to_string(save_parents=True)
204
205
206 def DeleteSliver(api, xrn, creds, call_id):
207     if Callids().already_handled(call_id): return ""
208     (hrn, type) = urn_to_hrn(xrn)
209     # get the callers hrn
210     valid_cred = api.auth.checkCredentials(creds, 'deletesliver', hrn)[0]
211     caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
212
213     # attempt to use delegated credential first
214     credential = api.getDelegatedCredential(creds)
215     if not credential:
216         credential = api.getCredential()
217     threads = ThreadManager()
218     for aggregate in api.aggregates:
219         # prevent infinite loop. Dont send request back to caller
220         # unless the caller is the aggregate's SM
221         if caller_hrn == aggregate and aggregate != api.hrn:
222             continue
223         server = api.aggregates[aggregate]
224         threads.run(server.DeleteSliver, xrn, credential, call_id)
225     threads.get_results()
226     return 1
227
228 def start_slice(api, xrn, creds):
229     hrn, type = urn_to_hrn(xrn)
230
231     # get the callers hrn
232     valid_cred = api.auth.checkCredentials(creds, 'startslice', hrn)[0]
233     caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
234
235     # attempt to use delegated credential first
236     credential = api.getDelegatedCredential(creds)
237     if not credential:
238         credential = api.getCredential()
239     threads = ThreadManager()
240     for aggregate in api.aggregates:
241         # prevent infinite loop. Dont send request back to caller
242         # unless the caller is the aggregate's SM
243         if caller_hrn == aggregate and aggregate != api.hrn:
244             continue
245         server = api.aggregates[aggregate]
246         threads.run(server.Start, xrn, credential)
247     threads.get_results()    
248     return 1
249  
250 def stop_slice(api, xrn, creds):
251     hrn, type = urn_to_hrn(xrn)
252
253     # get the callers hrn
254     valid_cred = api.auth.checkCredentials(creds, 'stopslice', hrn)[0]
255     caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
256
257     # attempt to use delegated credential first
258     credential = api.getDelegatedCredential(creds)
259     if not credential:
260         credential = api.getCredential()
261     threads = ThreadManager()
262     for aggregate in api.aggregates:
263         # prevent infinite loop. Dont send request back to caller
264         # unless the caller is the aggregate's SM
265         if caller_hrn == aggregate and aggregate != api.hrn:
266             continue
267         server = api.aggregates[aggregate]
268         threads.run(server.Stop, xrn, credential)
269     threads.get_results()    
270     return 1
271
272 def reset_slice(api, xrn):
273     """
274     Not implemented
275     """
276     return 1
277
278 def shutdown(api, xrn, creds):
279     """
280     Not implemented   
281     """
282     return 1
283
284 def status(api, xrn, creds):
285     """
286     Not implemented 
287     """
288     return 1
289
290 # Thierry : caching at the slicemgr level makes sense to some extent
291 caching=True
292 #caching=False
293 def ListSlices(api, creds, call_id):
294
295     if Callids().already_handled(call_id): return []
296
297     # look in cache first
298     if caching and api.cache:
299         slices = api.cache.get('slices')
300         if slices:
301             return slices    
302
303     # get the callers hrn
304     valid_cred = api.auth.checkCredentials(creds, 'listslices', None)[0]
305     caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
306
307     # attempt to use delegated credential first
308     credential = api.getDelegatedCredential(creds)
309     if not credential:
310         credential = api.getCredential()
311     threads = ThreadManager()
312     # fetch from aggregates
313     for aggregate in api.aggregates:
314         # prevent infinite loop. Dont send request back to caller
315         # unless the caller is the aggregate's SM
316         if caller_hrn == aggregate and aggregate != api.hrn:
317             continue
318         server = api.aggregates[aggregate]
319         threads.run(server.ListSlices, credential, call_id)
320
321     # combime results
322     results = threads.get_results()
323     slices = []
324     for result in results:
325         slices.extend(result)
326     
327     # cache the result
328     if caching and api.cache:
329         api.cache.add('slices', slices)
330
331     return slices
332
333
334 def ListResources(api, creds, options, call_id):
335
336     if Callids().already_handled(call_id): return ""
337
338     # get slice's hrn from options
339     xrn = options.get('geni_slice_urn', '')
340     (hrn, type) = urn_to_hrn(xrn)
341
342     # get hrn of the original caller
343     origin_hrn = options.get('origin_hrn', None)
344     if not origin_hrn:
345         if isinstance(creds, list):
346             origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
347         else:
348             origin_hrn = Credential(string=creds).get_gid_caller().get_hrn()
349     
350     # look in cache first 
351     if caching and api.cache and not xrn:
352         rspec =  api.cache.get('nodes')
353         if rspec:
354             return rspec
355
356     # get the callers hrn
357     valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0]
358     caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
359
360     # attempt to use delegated credential first
361     credential = api.getDelegatedCredential(creds)
362     if not credential:
363         credential = api.getCredential()
364     threads = ThreadManager()
365     for aggregate in api.aggregates:
366         # prevent infinite loop. Dont send request back to caller
367         # unless the caller is the aggregate's SM
368         if caller_hrn == aggregate and aggregate != api.hrn:
369             continue
370         # get the rspec from the aggregate
371         server = api.aggregates[aggregate]
372         my_opts = copy(options)
373         my_opts['geni_compressed'] = False
374         threads.run(server.ListResources, credential, my_opts, call_id)
375         #threads.run(server.get_resources, cred, xrn, origin_hrn)
376                     
377     results = threads.get_results()
378     rspec = SfaRSpec()
379     for result in results:
380         rspec.merge(result)
381
382     # cache the result
383     if caching and api.cache and not xrn:
384         api.cache.add('nodes', rspec)
385  
386     return rspec.toxml()
387
388 # first draft at a merging SliverStatus
389 def SliverStatus(api, slice_xrn, creds, call_id):
390     if Callids().already_handled(call_id): return {}
391     # attempt to use delegated credential first
392     credential = api.getDelegatedCredential(creds)
393     if not credential:
394         credential = api.getCredential()
395     threads = ThreadManager()
396     for aggregate in api.aggregates:
397         server = api.aggregates[aggregate]
398         threads.run (server.SliverStatus, slice_xrn, credential, call_id)
399     results = threads.get_results()
400
401     # get rid of any void result - e.g. when call_id was hit where by convention we return {}
402     results = [ result for result in results if result and result['geni_resources']]
403
404     # do not try to combine if there's no result
405     if not results : return {}
406
407     # otherwise let's merge stuff
408     overall = {}
409
410     # mmh, it is expected that all results carry the same urn
411     overall['geni_urn'] = results[0]['geni_urn']
412
413     # consolidate geni_status - simple model using max on a total order
414     states = [ 'ready', 'configuring', 'failed', 'unknown' ]
415     # hash name to index
416     shash = dict ( zip ( states, range(len(states)) ) )
417     def combine_status (x,y):
418         return shash [ max (shash(x),shash(y)) ]
419     overall['geni_status'] = reduce (combine_status, [ result['geni_status'] for result in results], 'ready' )
420
421     # {'ready':0,'configuring':1,'failed':2,'unknown':3}
422     # append all geni_resources
423     overall['geni_resources'] = \
424         reduce (lambda x,y: x+y, [ result['geni_resources'] for result in results] , [])
425
426     return overall
427
428 def main():
429     r = RSpec()
430     r.parseFile(sys.argv[1])
431     rspec = r.toDict()
432     CreateSliver(None,'plc.princeton.tmacktestslice',rspec,'create-slice-tmacktestslice')
433
434 if __name__ == "__main__":
435     main()
436