X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sfa%2Fmanagers%2Faggregate_manager_pl.py;h=9e0aa27f5595b81ae4a105eef4d32c6931a98866;hb=4ff67c801ceeb1d0c7ca2863c2b7bf8152182b8f;hp=0c70ddaeb9ec76cf4f860754e13c7282bd68f7ba;hpb=ba2aaa438f939a4b5c697052e37b1c3218901319;p=sfa.git diff --git a/sfa/managers/aggregate_manager_pl.py b/sfa/managers/aggregate_manager_pl.py index 0c70ddae..9e0aa27f 100644 --- a/sfa/managers/aggregate_manager_pl.py +++ b/sfa/managers/aggregate_manager_pl.py @@ -4,11 +4,10 @@ import traceback import sys import re from types import StringTypes -from dateutil.parser import parse from sfa.util.faults import * -from sfa.util.xrn import get_authority, hrn_to_urn, urn_to_hrn -from sfa.util.plxrn import slicename_to_hrn, hrn_to_pl_slicename +from sfa.util.xrn import get_authority, hrn_to_urn, urn_to_hrn, Xrn +from sfa.util.plxrn import slicename_to_hrn, hrn_to_pl_slicename, hostname_to_urn from sfa.util.rspec import * from sfa.util.specdict import * from sfa.util.record import SfaRecord @@ -21,7 +20,16 @@ import sfa.plc.peers as peers from sfa.plc.network import * from sfa.plc.api import SfaAPI from sfa.plc.slices import * +from sfa.util.version import version_core +from sfa.util.sfatime import utcparse +from sfa.util.callids import Callids +def GetVersion(api): + xrn=Xrn(api.hrn) + return version_core({'interface':'aggregate', + 'testbed':'myplc', + 'hrn':xrn.get_hrn(), + }) def __get_registry_objects(slice_xrn, creds, users): """ @@ -42,7 +50,6 @@ def __get_registry_objects(slice_xrn, creds, users): slicename = hrn_to_pl_slicename(hrn) login_base = slicename.split('_')[0] reg_objects = {} - site = {} site['site_id'] = 0 site['name'] = 'geni.%s' % login_base @@ -58,7 +65,12 @@ def __get_registry_objects(slice_xrn, creds, users): reg_objects['site'] = site slice = {} - slice['expires'] = int(time.mktime(Credential(string=creds[0]).get_expiration().timetuple())) + + extime = Credential(string=creds[0]).get_expiration() + # If the expiration time is > 60 days from now, set the expiration time to 60 days from now + if extime > datetime.datetime.utcnow() + datetime.timedelta(days=60): + extime = datetime.datetime.utcnow() + datetime.timedelta(days=60) + slice['expires'] = int(time.mktime(extime.timetuple())) slice['hrn'] = hrn slice['name'] = hrn_to_pl_slicename(hrn) slice['url'] = hrn @@ -83,12 +95,6 @@ def __get_hostnames(nodes): hostnames.append(node.hostname) return hostnames -def get_version(): - version = {} - version['geni_api'] = 1 - version['sfa'] = 1 - return version - def slice_status(api, slice_xrn, creds): hrn, type = urn_to_hrn(slice_xrn) # find out where this slice is currently running @@ -101,37 +107,59 @@ def slice_status(api, slice_xrn, creds): slice = slices[0] nodes = api.plshell.GetNodes(api.plauth, slice['node_ids'], - ['hostname', 'boot_state', 'last_contact']) - api.logger.info(slice) - api.logger.info(nodes) - + ['hostname', 'site_id', 'boot_state', 'last_contact']) + site_ids = [node['site_id'] for node in nodes] + sites = api.plshell.GetSites(api.plauth, site_ids, ['site_id', 'login_base']) + sites_dict = {} + for site in sites: + sites_dict[site['site_id']] = site['login_base'] + + # XX remove me + #api.logger.info(slice_xrn) + #api.logger.info(slice) + #api.logger.info(nodes) + # XX remove me + result = {} - result['geni_urn'] = slice_xrn - result['geni_status'] = 'unknown' + top_level_status = 'unknown' + if nodes: + top_level_status = 'ready' + result['geni_urn'] = Xrn(slice_xrn, 'slice').get_urn() result['pl_login'] = slice['name'] - result['pl_expires'] = slice['expires'] + result['pl_expires'] = datetime.datetime.fromtimestamp(slice['expires']).ctime() resources = [] - for node in nodes: res = {} res['pl_hostname'] = node['hostname'] res['pl_boot_state'] = node['boot_state'] res['pl_last_contact'] = node['last_contact'] - res['geni_urn'] = '' - res['geni_status'] = 'unknown' + if not node['last_contact'] is None: + res['pl_last_contact'] = datetime.datetime.fromtimestamp(node['last_contact']).ctime() + res['geni_urn'] = hostname_to_urn(api.hrn, sites_dict[node['site_id']], node['hostname']) + if node['boot_state'] == 'boot': + res['geni_status'] = 'ready' + else: + res['geni_status'] = 'failed' + top_level_staus = 'failed' + res['geni_error'] = '' resources.append(res) + result['geni_status'] = top_level_status result['geni_resources'] = resources + # XX remove me + #api.logger.info(result) + # XX remove me return result -def create_slice(api, slice_xrn, creds, rspec, users): +def CreateSliver(api, slice_xrn, creds, rspec, users, call_id): """ Create the sliver[s] (slice) at this aggregate. Verify HRN and initialize the slice record in PLC if necessary. """ + if Callids().already_handled(call_id): return False reg_objects = __get_registry_objects(slice_xrn, creds, users) @@ -142,11 +170,11 @@ def create_slice(api, slice_xrn, creds, rspec, users): sfa_peer = slices.get_sfa_peer(hrn) registry = api.registries[api.hrn] credential = api.getCredential() - site_id, remote_site_id = slices.verify_site(registry, credential, hrn, - peer, sfa_peer, reg_objects) + (site_id, remote_site_id) = slices.verify_site(registry, credential, hrn, + peer, sfa_peer, reg_objects) slice_record = slices.verify_slice(registry, credential, hrn, site_id, - remote_site_id, peer, sfa_peer, reg_objects) + remote_site_id, peer, sfa_peer, reg_objects) network = Network(api) @@ -189,7 +217,7 @@ def renew_slice(api, xrn, creds, expiration_time): if not slices: raise RecordNotFound(hrn) slice = slices[0] - requested_time = parse(expiration_time) + requested_time = utcparse(expiration_time) record = {'expires': int(time.mktime(requested_time.timetuple()))} api.plshell.UpdateSlice(api.plauth, slice['slice_id'], record) return 1 @@ -264,15 +292,20 @@ def get_slices(api, creds): return slice_urns -def get_rspec(api, creds, options): +# xxx Thierry : caching at the aggregate level sounds wrong... +caching=True +#caching=False +def ListResources(api, creds, options,call_id): + if Callids().already_handled(call_id): return "" # get slice's hrn from options xrn = options.get('geni_slice_urn', '') - hrn, type = urn_to_hrn(xrn) + (hrn, type) = urn_to_hrn(xrn) # look in cache first - if api.cache and not xrn: + if caching and api.cache and not xrn: rspec = api.cache.get('nodes') if rspec: + api.logger.info("aggregate.ListResources: returning cached value for hrn %s"%hrn) return rspec network = Network(api) @@ -283,7 +316,7 @@ def get_rspec(api, creds, options): rspec = network.toxml() # cache the result - if api.cache and not xrn: + if caching and api.cache and not xrn: api.cache.add('nodes', rspec) return rspec @@ -303,7 +336,7 @@ def get_ticket(api, xrn, creds, rspec, users): credential = api.getCredential() records = registry.Resolve(xrn, credential) - # similar to create_slice, we must verify that the required records exist + # similar to CreateSliver, we must verify that the required records exist # at this aggregate before we can issue a ticket site_id, remote_site_id = slices.verify_site(registry, credential, slice_hrn, peer, sfa_peer, reg_objects) @@ -352,15 +385,15 @@ def get_ticket(api, xrn, creds, rspec, users): def main(): api = SfaAPI() """ - rspec = get_rspec(api, "plc.princeton.sapan", None) - #rspec = get_rspec(api, "plc.princeton.coblitz", None) - #rspec = get_rspec(api, "plc.pl.sirius", None) + rspec = ListResources(api, "plc.princeton.sapan", None, 'pl_test_sapan') + #rspec = ListResources(api, "plc.princeton.coblitz", None, 'pl_test_coblitz') + #rspec = ListResources(api, "plc.pl.sirius", None, 'pl_test_sirius') print rspec """ f = open(sys.argv[1]) xml = f.read() f.close() - create_slice(api, "plc.princeton.sapan", xml) + CreateSliver(api, "plc.princeton.sapan", xml, 'CreateSliver_sapan') if __name__ == "__main__": main()