X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sfa%2Fplc%2Fslices.py;h=1e486a5bc1a91b621adbe85fc473d30431aea3ab;hb=e089eee3ca203abf5df903dab9faa04397cbc7be;hp=87b139c4857104f3ac08e538b416f7fde57c1cfb;hpb=c7657e7fb9f7453678efa4deb46a4a71ab6603d6;p=sfa.git diff --git a/sfa/plc/slices.py b/sfa/plc/slices.py index 87b139c4..1e486a5b 100644 --- a/sfa/plc/slices.py +++ b/sfa/plc/slices.py @@ -3,7 +3,10 @@ import datetime import time +import traceback +import sys +from types import StringTypes from sfa.util.misc import * from sfa.util.rspec import * from sfa.util.specdict import * @@ -16,7 +19,7 @@ from sfa.server.registry import Registries class Slices(SimpleStorage): - def __init__(self, api, ttl = .5): + def __init__(self, api, ttl = .5, caller_cred=None): self.api = api self.ttl = ttl self.threshold = None @@ -27,8 +30,30 @@ class Slices(SimpleStorage): SimpleStorage.__init__(self, self.slices_file) self.policy = Policy(self.api) self.load() + self.caller_cred=caller_cred + def get_peer(self, hrn): + # Becaues of myplc federation, we first need to determine if this + # slice belongs to out local plc or a myplc peer. We will assume it + # is a local site, unless we find out otherwise + peer = None + + # get this slice's authority (site) + slice_authority = get_authority(hrn) + + # get this site's authority (sfa root authority or sub authority) + site_authority = get_authority(slice_authority).lower() + + # check if we are already peered with this site_authority, if so + peers = self.api.plshell.GetPeers(self.api.plauth, {}, ['peer_id', 'peername', 'shortname', 'hrn_root']) + for peer_record in peers: + names = [name.lower() for name in peer_record.values() if isinstance(name, StringTypes)] + if site_authority in names: + peer = peer_record['shortname'] + + return peer + def refresh(self): """ Update the cached list of slices @@ -93,20 +118,33 @@ class Slices(SimpleStorage): self.delete_slice_smgr(hrn) def delete_slice_aggregate(self, hrn): + slicename = hrn_to_pl_slicename(hrn) - slices = self.api.plshell.GetSlices(self.api.plauth, {'peer_id': None, 'name': slicename}) + slices = self.api.plshell.GetSlices(self.api.plauth, {'name': slicename}) if not slices: return 1 slice = slices[0] + # determine if this is a peer slice + peer = self.get_peer(hrn) + if peer: + self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', slice['slice_id'], peer) self.api.plshell.DeleteSliceFromNodes(self.api.plauth, slicename, slice['node_ids']) + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', slice['slice_id'], peer, slice['peer_slice_id']) return 1 def delete_slice_smgr(self, hrn): credential = self.api.getCredential() aggregates = Aggregates(self.api) for aggregate in aggregates: - aggregates[aggregate].delete_slice(credential, hrn) + try: + aggregates[aggregate].delete_slice(credential, hrn, caller_cred=self.caller_cred) + except: + print >> log, "Error calling list nodes at aggregate %s" % aggregate + traceback.print_exc(log) + exc_type, exc_value, exc_traceback = sys.exc_info() + print exc_type, exc_value, exc_traceback def create_slice(self, hrn, rspec): @@ -120,64 +158,58 @@ class Slices(SimpleStorage): print >> log, "Slice %(hrn)s not allowed by policy %(policy_file)s" % locals() return 1 - if self.api.interface in ['aggregate']: self.create_slice_aggregate(hrn, rspec) elif self.api.interface in ['slicemgr']: self.create_slice_smgr(hrn, rspec) def create_slice_aggregate(self, hrn, rspec): - # Becaues of myplc federation, we first need to determine if this - # slice belongs to out local plc or a myplc peer. We will assume it - # is a local site, unless we find out otherwise - peer = None - # get this slice's authority (site) - slice_authority = get_authority(hrn) - # get this site's authority (sfa root authority or sub authority) - site_authority = get_authority(slice_authority) - # check if we are already peered with this site_authority at ple, if so - peers = self.api.plshell.GetPeers(self.api.plauth, {}, ['peer_id', 'peername', 'shortname', 'hrn_root']) - for peer_record in peers: - if site_authority in peer_record.values(): - peer = peer_record['shortname'] + + # Determine if this is a peer slice + peer = self.get_peer(hrn) + spec = Rspec(rspec) - # Get the slice record from geni + # Get the slice record from sfa slice = {} + slice_record = None registries = Registries(self.api) registry = registries[self.api.hrn] credential = self.api.getCredential() - records = registry.resolve(credential, hrn) - for record in records: + slice_records = registry.resolve(credential, hrn) + for record in slice_records: if record.get_type() in ['slice']: slice_record = record.as_dict() if not slice_record: - raise RecordNotFound(hrn) - + raise RecordNotFound(hrn) + + # Get the slice's site record + authority = get_authority(hrn) + site_records = registry.resolve(credential, authority) + site = {} + for site_record in site_records: + if site_record.get_type() in ['authority']: + site = site_record.as_dict() + if not site: + raise RecordNotFound(authority) + remote_site_id = site.pop('site_id') + # Make sure slice exists at plc, if it doesnt add it slicename = hrn_to_pl_slicename(hrn) - slices = self.api.plshell.GetSlices(self.api.plauth, [slicename], ['node_ids']) + slices = self.api.plshell.GetSlices(self.api.plauth, [slicename], ['slice_id', 'node_ids', 'site_id'] ) + parts = slicename.split("_") + login_base = parts[0] + # if site doesnt exist add it + sites = self.api.plshell.GetSites(self.api.plauth, [login_base]) if not slices: - parts = slicename.split("_") - login_base = parts[0] - # if site doesnt exist add it - sites = self.api.plshell.GetSites(self.api.plauth, [login_base]) if not sites: - authority = get_authority(hrn) - site_records = registry.resolve(credential, authority) - site_record = {} - if not site_records: - raise RecordNotFound(authority) - site_record = site_records[0] - site = site_record.as_dict() - - # add the site - remote_site_id = site.pop('site_id') + # add the site site_id = self.api.plshell.AddSite(self.api.plauth, site) # this belongs to a peer if peer: self.api.plshell.BindObjectToPeer(self.api.plauth, 'site', site_id, peer, remote_site_id) else: - site = sites[0] + site_id = sites[0]['site_id'] + remote_site_id = sites[0]['peer_site_id'] # create slice object slice_fields = {} @@ -189,13 +221,16 @@ class Slices(SimpleStorage): # add the slice slice_id = self.api.plshell.AddSlice(self.api.plauth, slice_fields) slice = slice_fields + #this belongs to a peer - if peer: self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', slice_id, peer, slice_record['pointer']) - slice['node_ids'] = 0 + slice['node_ids'] = [] else: - slice = slices[0] + slice = slices[0] + slice_id = slice['slice_id'] + site_id = slice['site_id'] + remote_site_id = sites[0]['peer_site_id'] # get the list of valid slice users from the registry and make # they are added to the slice researchers = record.get('researcher', []) @@ -223,26 +258,42 @@ class Slices(SimpleStorage): self.api.plshell.UpdatePerson(self.api.plauth, person_id, {'enabled' : True}) if peer: - self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', person_id, peer, person_record['pointer']) + self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', person_id, peer, person_dict['pointer']) key_ids = [] else: + person_id = persons[0]['person_id'] key_ids = persons[0]['key_ids'] - self.api.plshell.AddPersonToSlice(self.api.plauth, person_dict['email'], slicename) + # if this is a peer person, we must unbind them from the peer or PLCAPI will throw + # an error + if peer: + self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'person', person_id, peer) + self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'site', site_id, peer) + + self.api.plshell.AddPersonToSlice(self.api.plauth, person_dict['email'], slicename) + self.api.plshell.AddPersonToSite(self.api.plauth, person_dict['email'], site_id) + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', person_id, peer, person_dict['pointer']) + self.api.plshell.BindObjectToPeer(self.api.plauth, 'site', site_id, peer, remote_site_id) + # Get this users local keys keylist = self.api.plshell.GetKeys(self.api.plauth, key_ids, ['key']) keys = [key['key'] for key in keylist] # add keys that arent already there + key_ids=person_dict['key_ids'] for personkey in person_dict['keys']: if personkey not in keys: key = {'key_type': 'ssh', 'key': personkey} if peer: - # XX Need to get the key_id from remote registry somehow - #self.api.plshell.BindObjectToPeer(self.api.plauth, 'key', None, peer, key_id) - pass - else: - self.api.plshell.AddPersonKey(self.api.plauth, person_dict['email'], key) + self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'person', person_id, peer) + key_id=self.api.plshell.AddPersonKey(self.api.plauth, person_dict['email'], key) + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', person_id, peer, person_dict['pointer']) + # BindObjectToPeer may faill if type is key and it's already bound to the peer + # so lets just put a try/except here + try: self.api.plshell.BindObjectToPeer(self.api.plauth, 'key', key_id, peer, key_ids.pop(0)) + except: pass # find out where this slice is currently running nodelist = self.api.plshell.GetNodes(self.api.plauth, slice['node_ids'], ['hostname']) @@ -262,8 +313,12 @@ class Slices(SimpleStorage): # add nodes from rspec added_nodes = list(set(nodes).difference(hostnames)) + if peer: + self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', slice_id, peer) self.api.plshell.AddSliceToNodes(self.api.plauth, slicename, added_nodes) self.api.plshell.DeleteSliceFromNodes(self.api.plauth, slicename, deleted_nodes) + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', slice_id, peer, slice_record['pointer']) return 1 @@ -282,26 +337,37 @@ class Slices(SimpleStorage): rspecs = {} aggregates = Aggregates(self.api) credential = self.api.getCredential() - # only attempt to extract information about the aggregates we know about - for aggregate in aggregates: - netspec = spec.getDictByTagNameValue('NetSpec', aggregate) - if netspec: - # creat a plc dict - resources = {'start_time': start_time, 'end_time': end_time, 'networks': netspec} - resourceDict = {'Rspec': resources} - tempspec.parseDict(resourceDict) - rspecs[aggregate] = tempspec.toxml() - - # notify the aggregates - for aggregate in rspecs.keys(): + + # split the netspecs into individual rspecs + netspecs = spec.getDictsByTagName('NetSpec') + for netspec in netspecs: + net_hrn = netspec['name'] + resources = {'start_time': start_time, 'end_time': end_time, 'networks': netspec} + resourceDict = {'Rspec': resources} + tempspec.parseDict(resourceDict) + rspecs[net_hrn] = tempspec.toxml() + + # send each rspec to the appropriate aggregate/sm + for net_hrn in rspecs: try: - # send the whloe rspec to the local aggregate - if aggregate in [self.api.hrn]: - aggregates[aggregate].create_slice(credential, hrn, rspec) + # if we are directly connected to the aggregate then we can just send them the rspec + # if not, then we may be connected to an sm thats connected to the aggregate + if net_hrn in aggregates: + # send the whloe rspec to the local aggregate + if net_hrn in [self.api.hrn]: + aggregates[net_hrn].create_slice(credential, hrn, rspec, caller_cred=self.caller_cred) + else: + aggregates[net_hrn].create_slice(credential, hrn, rspecs[net_hrn], caller_cred=self.caller_cred) else: - aggregates[aggregate].create_slice(credential, hrn, rspecs[aggregate]) + # lets forward this rspec to a sm that knows about the network + for aggregate in aggregates: + network_found = aggregates[aggregate].get_aggregates(credential, net_hrn) + if network_networks: + aggregates[aggregate].create_slice(credential, hrn, rspecs[net_hrn], caller_cred=self.caller_cred) + except: - print >> log, "Error creating slice %(hrn)s at aggregate %(aggregate)s" % locals() + print >> log, "Error creating slice %(hrn)s at aggregate %(net_hrn)s" % locals() + traceback.print_exc() return 1