X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sfa%2Fmanagers%2Faggregate_manager_pl.py;h=5355f97b36fb4cba9587dee79cff4ba57e276fac;hb=725c637b3d6f4e41773b83f7977e2ba962b5b1b7;hp=ae4215a8cee4c06f2ab8cdfb7f9a434c901a2432;hpb=80baeaf1423edbd62b0be8dfb4ca28ff1268166a;p=sfa.git diff --git a/sfa/managers/aggregate_manager_pl.py b/sfa/managers/aggregate_manager_pl.py index ae4215a8..5355f97b 100644 --- a/sfa/managers/aggregate_manager_pl.py +++ b/sfa/managers/aggregate_manager_pl.py @@ -1,16 +1,15 @@ -### $Id: slices.py 15842 2009-11-22 09:56:13Z anil $ -### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/plc/slices.py $ - import datetime import time import traceback import sys import re from types import StringTypes -from sfa.util.namespace import * + +from sfa.util.faults import * +from sfa.util.xrn import get_authority, hrn_to_urn, urn_to_hrn, Xrn +from sfa.util.plxrn import slicename_to_hrn, hrn_to_pl_slicename, hostname_to_urn from sfa.util.rspec import * from sfa.util.specdict import * -from sfa.util.faults import * from sfa.util.record import SfaRecord from sfa.util.policy import Policy from sfa.util.record import * @@ -21,7 +20,16 @@ import sfa.plc.peers as peers from sfa.plc.network import * from sfa.plc.api import SfaAPI from sfa.plc.slices import * +from sfa.util.version import version_core +from sfa.util.sfatime import utcparse +from sfa.util.callids import Callids +def GetVersion(api): + xrn=Xrn(api.hrn) + return version_core({'interface':'aggregate', + 'testbed':'myplc', + 'hrn':xrn.get_hrn(), + }) def __get_registry_objects(slice_xrn, creds, users): """ @@ -42,7 +50,6 @@ def __get_registry_objects(slice_xrn, creds, users): slicename = hrn_to_pl_slicename(hrn) login_base = slicename.split('_')[0] reg_objects = {} - site = {} site['site_id'] = 0 site['name'] = 'geni.%s' % login_base @@ -58,7 +65,12 @@ def __get_registry_objects(slice_xrn, creds, users): reg_objects['site'] = site slice = {} - slice['expires'] = int(time.mktime(Credential(string=creds[0]).get_lifetime().timetuple())) + + extime = Credential(string=creds[0]).get_expiration() + # If the expiration time is > 60 days from now, set the expiration time to 60 days from now + if extime > datetime.datetime.utcnow() + datetime.timedelta(days=60): + extime = datetime.datetime.utcnow() + datetime.timedelta(days=60) + slice['expires'] = int(time.mktime(extime.timetuple())) slice['hrn'] = hrn slice['name'] = hrn_to_pl_slicename(hrn) slice['url'] = hrn @@ -70,7 +82,7 @@ def __get_registry_objects(slice_xrn, creds, users): for user in users: user['key_ids'] = [] hrn, _ = urn_to_hrn(user['urn']) - user['email'] = hrn + "@geni.net" + user['email'] = hrn_to_pl_slicename(hrn) + "@geni.net" user['first_name'] = hrn user['last_name'] = hrn reg_objects['users'][user['email']] = user @@ -83,39 +95,86 @@ def __get_hostnames(nodes): hostnames.append(node.hostname) return hostnames -def get_version(): - version = {} - version['geni_api'] = 1 - version['sfa'] = 1 - return version - def slice_status(api, slice_xrn, creds): + hrn, type = urn_to_hrn(slice_xrn) + # find out where this slice is currently running + api.logger.info(hrn) + slicename = hrn_to_pl_slicename(hrn) + + slices = api.plshell.GetSlices(api.plauth, [slicename], ['node_ids','person_ids','name','expires']) + if len(slices) == 0: + raise Exception("Slice %s not found (used %s as slicename internally)" % slice_xrn, slicename) + slice = slices[0] + + nodes = api.plshell.GetNodes(api.plauth, slice['node_ids'], + ['hostname', 'site_id', 'boot_state', 'last_contact']) + site_ids = [node['site_id'] for node in nodes] + sites = api.plshell.GetSites(api.plauth, site_ids, ['site_id', 'login_base']) + sites_dict = {} + for site in sites: + sites_dict[site['site_id']] = site['login_base'] + + # XX remove me + #api.logger.info(slice_xrn) + #api.logger.info(slice) + #api.logger.info(nodes) + # XX remove me + result = {} - result['geni_urn'] = slice_xrn - result['geni_status'] = 'unknown' - result['geni_resources'] = {} + top_level_status = 'unknown' + if nodes: + top_level_status = 'ready' + result['geni_urn'] = Xrn(slice_xrn, 'slice').get_urn() + result['pl_login'] = slice['name'] + result['pl_expires'] = datetime.datetime.fromtimestamp(slice['expires']).ctime() + + resources = [] + for node in nodes: + res = {} + res['pl_hostname'] = node['hostname'] + res['pl_boot_state'] = node['boot_state'] + res['pl_last_contact'] = node['last_contact'] + if not node['last_contact'] is None: + res['pl_last_contact'] = datetime.datetime.fromtimestamp(node['last_contact']).ctime() + res['geni_urn'] = hostname_to_urn(api.hrn, sites_dict[node['site_id']], node['hostname']) + if node['boot_state'] == 'boot': + res['geni_status'] = 'ready' + else: + res['geni_status'] = 'failed' + top_level_staus = 'failed' + + res['geni_error'] = '' + + resources.append(res) + + result['geni_status'] = top_level_status + result['geni_resources'] = resources + # XX remove me + #api.logger.info(result) + # XX remove me return result -def create_slice(api, slice_xrn, creds, rspec, users): +def CreateSliver(api, slice_xrn, creds, rspec, users, call_id): """ Create the sliver[s] (slice) at this aggregate. Verify HRN and initialize the slice record in PLC if necessary. """ + if Callids().already_handled(call_id): return "" reg_objects = __get_registry_objects(slice_xrn, creds, users) - hrn, type = urn_to_hrn(slice_xrn) + (hrn, type) = urn_to_hrn(slice_xrn) peer = None slices = Slices(api) peer = slices.get_peer(hrn) sfa_peer = slices.get_sfa_peer(hrn) registry = api.registries[api.hrn] credential = api.getCredential() - site_id, remote_site_id = slices.verify_site(registry, credential, hrn, - peer, sfa_peer, reg_objects) + (site_id, remote_site_id) = slices.verify_site(registry, credential, hrn, + peer, sfa_peer, reg_objects) slice_record = slices.verify_slice(registry, credential, hrn, site_id, - remote_site_id, peer, sfa_peer, reg_objects) + remote_site_id, peer, sfa_peer, reg_objects) network = Network(api) @@ -146,21 +205,25 @@ def create_slice(api, slice_xrn, creds, rspec, users): api.plshell.BindObjectToPeer(api.plauth, 'slice', slice.id, peer, slice.peer_id) - # print network.toxml() + # xxx - check this holds enough data for the client to understand what's happened + return network.toxml() - return True - -def renew_slice(api, xrn, creds, exipration_time): - hrn, type = urn_to_hrn(xrn) +def RenewSliver(api, xrn, creds, expiration_time, call_id): + if Callids().already_handled(call_id): return True + (hrn, type) = urn_to_hrn(xrn) slicename = hrn_to_pl_slicename(hrn) slices = api.plshell.GetSlices(api.plauth, {'name': slicename}, ['slice_id']) if not slices: raise RecordNotFound(hrn) slice = slices[0] - slice['expires'] = expiration_time - api.plshell.UpdateSlice(api.plauth, slice['slice_id'], slice) - return 1 + requested_time = utcparse(expiration_time) + record = {'expires': int(time.mktime(requested_time.timetuple()))} + try: + api.plshell.UpdateSlice(api.plauth, slice['slice_id'], record) + return True + except: + return False def start_slice(api, xrn, creds): hrn, type = urn_to_hrn(xrn) @@ -195,8 +258,9 @@ def reset_slice(api, xrn): # XX not implemented at this interface return 1 -def delete_slice(api, xrn, creds): - hrn, type = urn_to_hrn(xrn) +def DeleteSliver(api, xrn, creds, call_id): + if Callids().already_handled(call_id): return "" + (hrn, type) = urn_to_hrn(xrn) slicename = hrn_to_pl_slicename(hrn) slices = api.plshell.GetSlices(api.plauth, {'name': slicename}) if not slices: @@ -214,9 +278,13 @@ def delete_slice(api, xrn, creds): api.plshell.BindObjectToPeer(api.plauth, 'slice', slice['slice_id'], peer, slice['peer_slice_id']) return 1 -def get_slices(api, creds): +# xxx Thierry : caching at the aggregate level sounds wrong... +caching=True +#caching=False +def ListSlices(api, creds, call_id): + if Callids().already_handled(call_id): return [] # look in cache first - if api.cache: + if caching and api.cache: slices = api.cache.get('slices') if slices: return slices @@ -227,20 +295,22 @@ def get_slices(api, creds): slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns] # cache the result - if api.cache: + if caching and api.cache: api.cache.add('slices', slice_urns) return slice_urns -def get_rspec(api, creds, options): +def ListResources(api, creds, options,call_id): + if Callids().already_handled(call_id): return "" # get slice's hrn from options - xrn = options.get('geni_slice_urn', None) - hrn, type = urn_to_hrn(xrn) + xrn = options.get('geni_slice_urn', '') + (hrn, type) = urn_to_hrn(xrn) # look in cache first - if api.cache and not xrn: + if caching and api.cache and not xrn: rspec = api.cache.get('nodes') if rspec: + api.logger.info("aggregate.ListResources: returning cached value for hrn %s"%hrn) return rspec network = Network(api) @@ -251,7 +321,7 @@ def get_rspec(api, creds, options): rspec = network.toxml() # cache the result - if api.cache and not xrn: + if caching and api.cache and not xrn: api.cache.add('nodes', rspec) return rspec @@ -271,7 +341,7 @@ def get_ticket(api, xrn, creds, rspec, users): credential = api.getCredential() records = registry.Resolve(xrn, credential) - # similar to create_slice, we must verify that the required records exist + # similar to CreateSliver, we must verify that the required records exist # at this aggregate before we can issue a ticket site_id, remote_site_id = slices.verify_site(registry, credential, slice_hrn, peer, sfa_peer, reg_objects) @@ -320,15 +390,15 @@ def get_ticket(api, xrn, creds, rspec, users): def main(): api = SfaAPI() """ - rspec = get_rspec(api, "plc.princeton.sapan", None) - #rspec = get_rspec(api, "plc.princeton.coblitz", None) - #rspec = get_rspec(api, "plc.pl.sirius", None) + rspec = ListResources(api, "plc.princeton.sapan", None, 'pl_test_sapan') + #rspec = ListResources(api, "plc.princeton.coblitz", None, 'pl_test_coblitz') + #rspec = ListResources(api, "plc.pl.sirius", None, 'pl_test_sirius') print rspec """ f = open(sys.argv[1]) xml = f.read() f.close() - create_slice(api, "plc.princeton.sapan", xml) + CreateSliver(api, "plc.princeton.sapan", xml, 'CreateSliver_sapan') if __name__ == "__main__": main()