X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sfa%2Fplc%2Fslices.py;h=e6164442d7e6876b74863af807f34fae9f5ed59f;hb=10c8efa8217ba42dea14cd82441a7d733c0a86c9;hp=4040e6ce3fff4dc16754fc2f1730387fc1d62bb7;hpb=afec5e4f067654250ed3573c984de3c2a6143ba7;p=sfa.git diff --git a/sfa/plc/slices.py b/sfa/plc/slices.py index 4040e6ce..e6164442 100644 --- a/sfa/plc/slices.py +++ b/sfa/plc/slices.py @@ -19,24 +19,129 @@ from sfa.util.debug import log from sfa.server.aggregate import Aggregates from sfa.server.registry import Registries +MAXINT = 2L**31-1 + class Slices(SimpleStorage): rspec_to_slice_tag = {'max_rate':'net_max_rate'} - def __init__(self, api, ttl = .5, caller_cred=None): + def __init__(self, api, ttl = .5, origin_hrn=None): self.api = api self.ttl = ttl self.threshold = None - path = self.api.config.SFA_BASE_DIR + path = self.api.config.SFA_DATA_DIR filename = ".".join([self.api.interface, self.api.hrn, "slices"]) filepath = path + os.sep + filename self.slices_file = filepath SimpleStorage.__init__(self, self.slices_file) self.policy = Policy(self.api) self.load() - self.caller_cred=caller_cred - - + self.origin_hrn=origin_hrn + + def get_slivers(self, hrn, node=None): + + slice_name = hrn_to_pl_slicename(hrn) + # XX Should we just call PLCAPI.GetSliceTicket(slice_name) instead + # of doing all of this? + #return self.api.GetSliceTicket(self.auth, slice_name) + + # from PLCAPI.GetSlivers.get_slivers() + slice_fields = ['slice_id', 'name', 'instantiation', 'expires', 'person_ids', 'slice_tag_ids'] + slices = self.api.plshell.GetSlices(self.api.plauth, slice_name, slice_fields) + # Build up list of users and slice attributes + person_ids = set() + all_slice_tag_ids = set() + for slice in slices: + person_ids.update(slice['person_ids']) + all_slice_tag_ids.update(slice['slice_tag_ids']) + person_ids = list(person_ids) + all_slice_tag_ids = list(all_slice_tag_ids) + # Get user information + all_persons_list = self.api.plshell.GetPersons(self.api.plauth, {'person_id':person_ids,'enabled':True}, ['person_id', 'enabled', 'key_ids']) + all_persons = {} + for person in all_persons_list: + all_persons[person['person_id']] = person + + # Build up list of keys + key_ids = set() + for person in all_persons.values(): + key_ids.update(person['key_ids']) + key_ids = list(key_ids) + # Get user account keys + all_keys_list = self.api.plshell.GetKeys(self.api.plauth, key_ids, ['key_id', 'key', 'key_type']) + all_keys = {} + for key in all_keys_list: + all_keys[key['key_id']] = key + # Get slice attributes + all_slice_tags_list = self.api.plshell.GetSliceTags(self.api.plauth, all_slice_tag_ids) + all_slice_tags = {} + for slice_tag in all_slice_tags_list: + all_slice_tags[slice_tag['slice_tag_id']] = slice_tag + + slivers = [] + for slice in slices: + keys = [] + for person_id in slice['person_ids']: + if person_id in all_persons: + person = all_persons[person_id] + if not person['enabled']: + continue + for key_id in person['key_ids']: + if key_id in all_keys: + key = all_keys[key_id] + keys += [{'key_type': key['key_type'], + 'key': key['key']}] + attributes = [] + # All (per-node and global) attributes for this slice + slice_tags = [] + for slice_tag_id in slice['slice_tag_ids']: + if slice_tag_id in all_slice_tags: + slice_tags.append(all_slice_tags[slice_tag_id]) + # Per-node sliver attributes take precedence over global + # slice attributes, so set them first. + # Then comes nodegroup slice attributes + # Followed by global slice attributes + sliver_attributes = [] + + if node is not None: + for sliver_attribute in filter(lambda a: a['node_id'] == node['node_id'], slice_tags): + sliver_attributes.append(sliver_attribute['tagname']) + attributes.append({'tagname': sliver_attribute['tagname'], + 'value': sliver_attribute['value']}) + + # set nodegroup slice attributes + for slice_tag in filter(lambda a: a['nodegroup_id'] in node['nodegroup_ids'], slice_tags): + # Do not set any nodegroup slice attributes for + # which there is at least one sliver attribute + # already set. + if slice_tag not in slice_tags: + attributes.append({'tagname': slice_tag['tagname'], + 'value': slice_tag['value']}) + + for slice_tag in filter(lambda a: a['node_id'] is None, slice_tags): + # Do not set any global slice attributes for + # which there is at least one sliver attribute + # already set. + if slice_tag['tagname'] not in sliver_attributes: + attributes.append({'tagname': slice_tag['tagname'], + 'value': slice_tag['value']}) + + # XXX Sanity check; though technically this should be a system invariant + # checked with an assertion + if slice['expires'] > MAXINT: slice['expires']= MAXINT + + slivers.append({ + 'hrn': hrn, + 'name': slice['name'], + 'slice_id': slice['slice_id'], + 'instantiation': slice['instantiation'], + 'expires': slice['expires'], + 'keys': keys, + 'attributes': attributes + }) + + return slivers + def get_peer(self, hrn): # Becaues of myplc federation, we first need to determine if this # slice belongs to out local plc or a myplc peer. We will assume it @@ -109,7 +214,8 @@ class Slices(SimpleStorage): success = False # request hash is optional so lets try the call without it try: - slices = aggregates[aggregate].get_slices(credential) + request_hash=None + slices = aggregates[aggregate].get_slices(credential, request_hash) slice_hrns.extend(slices) success = True except: @@ -168,13 +274,14 @@ class Slices(SimpleStorage): def delete_slice_smgr(self, hrn): credential = self.api.getCredential() - caller_cred = self.caller_cred + origin_hrn = self.origin_hrn aggregates = Aggregates(self.api) for aggregate in aggregates: success = False # request hash is optional so lets try the call without it try: - aggregates[aggregate].delete_slice(credential, hrn, caller_cred) + request_hash=None + aggregates[aggregate].delete_slice(credential, hrn, request_hash, origin_hrn) success = True except: print >> log, "%s" % (traceback.format_exc()) @@ -185,7 +292,7 @@ class Slices(SimpleStorage): try: arg_list = [credential, hrn] request_hash = self.api.key.compute_hash(arg_list) - aggregates[aggregate].delete_slice(credential, hrn, request_hash, caller_cred) + aggregates[aggregate].delete_slice(credential, hrn, request_hash, origin_hrn) success = True except: print >> log, "%s" % (traceback.format_exc()) @@ -266,7 +373,7 @@ class Slices(SimpleStorage): slicename = hrn_to_pl_slicename(slice_hrn) parts = slicename.split("_") login_base = parts[0] - slices = self.api.plshell.GetSlices(self.api.plauth, [slicename], ['slice_id', 'node_ids', 'site_id']) + slices = self.api.plshell.GetSlices(self.api.plauth, [slicename]) if not slices: slice_fields = {} slice_keys = ['name', 'url', 'description'] @@ -297,6 +404,8 @@ class Slices(SimpleStorage): slice = slices[0] slice_id = slice['slice_id'] site_id = slice['site_id'] + #the slice is alredy on the remote agg. Let us update(e.g. expires field) it with the latest info. + self.sync_slice(slice, slice_record, peer) slice['peer_slice_id'] = slice_record['pointer'] self.verify_persons(registry, credential, slice_record, site_id, remote_site_id, peer, sfa_peer) @@ -494,7 +603,7 @@ class Slices(SimpleStorage): rspecs[net_hrn] = tempspec.toxml() # send each rspec to the appropriate aggregate/sm - caller_cred = self.caller_cred + origin_hrn = self.origin_hrn for net_hrn in rspecs: try: # if we are directly connected to the aggregate then we can just send them the rspec @@ -503,18 +612,20 @@ class Slices(SimpleStorage): # send the whloe rspec to the local aggregate if net_hrn in [self.api.hrn]: try: - aggregates[net_hrn].create_slice(credential, hrn, rspec, caller_cred) + request_hash = None + aggregates[net_hrn].create_slice(credential, hrn, rspec, request_hash, origin_hrn) except: arg_list = [credential,hrn,rspec] request_hash = self.api.key.compute_hash(arg_list) - aggregates[net_hrn].create_slice(credential, hrn, rspec, request_hash, caller_cred) + aggregates[net_hrn].create_slice(credential, hrn, rspec, request_hash, origin_hrn) else: try: - aggregates[net_hrn].create_slice(credential, hrn, rspecs[net_hrn], caller_cred) + request_hash = None + aggregates[net_hrn].create_slice(credential, hrn, rspecs[net_hrn], request_hash, origin_hrn) except: arg_list = [credential,hrn,rspecs[net_hrn]] request_hash = self.api.key.compute_hash(arg_list) - aggregates[net_hrn].create_slice(credential, hrn, rspecs[net_hrn], request_hash, caller_cred) + aggregates[net_hrn].create_slice(credential, hrn, rspecs[net_hrn], request_hash, origin_hrn) else: # lets forward this rspec to a sm that knows about the network arg_list = [credential, net_hrn] @@ -526,11 +637,12 @@ class Slices(SimpleStorage): network_found = aggregates[aggregate].get_aggregates(credential, net_hrn, request_hash) if network_networks: try: - aggregates[aggregate].create_slice(credential, hrn, rspecs[net_hrn], caller_cred) + request_hash = None + aggregates[aggregate].create_slice(credential, hrn, rspecs[net_hrn], request_hash, origin_hrn) except: arg_list = [credential, hrn, rspecs[net_hrn]] request_hash = self.api.key.compute_hash(arg_list) - aggregates[aggregate].create_slice(credential, hrn, rspecs[net_hrn], request_hash, caller_cred) + aggregates[aggregate].create_slice(credential, hrn, rspecs[net_hrn], request_hash, origin_hrn) except: print >> log, "Error creating slice %(hrn)s at aggregate %(net_hrn)s" % locals() @@ -591,3 +703,11 @@ class Slices(SimpleStorage): except: aggregates[aggregate].stop_slice(credential, hrn, request_hash) + def sync_slice(self, old_record, new_record, peer): + if old_record['expires'] != new_record['expires']: + if peer: + self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', old_record['slice_id'], peer) + self.api.plshell.UpdateSlice(self.api.plauth, old_record['slice_id'], {'expires' : new_record['expires']}) + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', old_record['slice_id'], peer, old_record['peer_slice_id']) + return 1