X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sfa%2Fplc%2Fslices.py;h=e6164442d7e6876b74863af807f34fae9f5ed59f;hb=10c8efa8217ba42dea14cd82441a7d733c0a86c9;hp=88e29c95d9275a950eeffff8dd31b1b84f85ce5d;hpb=d19daaaa783830706e04dc1a887b95c396c15f30;p=sfa.git diff --git a/sfa/plc/slices.py b/sfa/plc/slices.py index 88e29c95..e6164442 100644 --- a/sfa/plc/slices.py +++ b/sfa/plc/slices.py @@ -19,22 +19,129 @@ from sfa.util.debug import log from sfa.server.aggregate import Aggregates from sfa.server.registry import Registries +MAXINT = 2L**31-1 + class Slices(SimpleStorage): - def __init__(self, api, ttl = .5, caller_cred=None): + rspec_to_slice_tag = {'max_rate':'net_max_rate'} + + def __init__(self, api, ttl = .5, origin_hrn=None): self.api = api self.ttl = ttl self.threshold = None - path = self.api.config.SFA_BASE_DIR + path = self.api.config.SFA_DATA_DIR filename = ".".join([self.api.interface, self.api.hrn, "slices"]) filepath = path + os.sep + filename self.slices_file = filepath SimpleStorage.__init__(self, self.slices_file) self.policy = Policy(self.api) self.load() - self.caller_cred=caller_cred - - + self.origin_hrn=origin_hrn + + def get_slivers(self, hrn, node=None): + + slice_name = hrn_to_pl_slicename(hrn) + # XX Should we just call PLCAPI.GetSliceTicket(slice_name) instead + # of doing all of this? + #return self.api.GetSliceTicket(self.auth, slice_name) + + # from PLCAPI.GetSlivers.get_slivers() + slice_fields = ['slice_id', 'name', 'instantiation', 'expires', 'person_ids', 'slice_tag_ids'] + slices = self.api.plshell.GetSlices(self.api.plauth, slice_name, slice_fields) + # Build up list of users and slice attributes + person_ids = set() + all_slice_tag_ids = set() + for slice in slices: + person_ids.update(slice['person_ids']) + all_slice_tag_ids.update(slice['slice_tag_ids']) + person_ids = list(person_ids) + all_slice_tag_ids = list(all_slice_tag_ids) + # Get user information + all_persons_list = self.api.plshell.GetPersons(self.api.plauth, {'person_id':person_ids,'enabled':True}, ['person_id', 'enabled', 'key_ids']) + all_persons = {} + for person in all_persons_list: + all_persons[person['person_id']] = person + + # Build up list of keys + key_ids = set() + for person in all_persons.values(): + key_ids.update(person['key_ids']) + key_ids = list(key_ids) + # Get user account keys + all_keys_list = self.api.plshell.GetKeys(self.api.plauth, key_ids, ['key_id', 'key', 'key_type']) + all_keys = {} + for key in all_keys_list: + all_keys[key['key_id']] = key + # Get slice attributes + all_slice_tags_list = self.api.plshell.GetSliceTags(self.api.plauth, all_slice_tag_ids) + all_slice_tags = {} + for slice_tag in all_slice_tags_list: + all_slice_tags[slice_tag['slice_tag_id']] = slice_tag + + slivers = [] + for slice in slices: + keys = [] + for person_id in slice['person_ids']: + if person_id in all_persons: + person = all_persons[person_id] + if not person['enabled']: + continue + for key_id in person['key_ids']: + if key_id in all_keys: + key = all_keys[key_id] + keys += [{'key_type': key['key_type'], + 'key': key['key']}] + attributes = [] + # All (per-node and global) attributes for this slice + slice_tags = [] + for slice_tag_id in slice['slice_tag_ids']: + if slice_tag_id in all_slice_tags: + slice_tags.append(all_slice_tags[slice_tag_id]) + # Per-node sliver attributes take precedence over global + # slice attributes, so set them first. + # Then comes nodegroup slice attributes + # Followed by global slice attributes + sliver_attributes = [] + + if node is not None: + for sliver_attribute in filter(lambda a: a['node_id'] == node['node_id'], slice_tags): + sliver_attributes.append(sliver_attribute['tagname']) + attributes.append({'tagname': sliver_attribute['tagname'], + 'value': sliver_attribute['value']}) + + # set nodegroup slice attributes + for slice_tag in filter(lambda a: a['nodegroup_id'] in node['nodegroup_ids'], slice_tags): + # Do not set any nodegroup slice attributes for + # which there is at least one sliver attribute + # already set. + if slice_tag not in slice_tags: + attributes.append({'tagname': slice_tag['tagname'], + 'value': slice_tag['value']}) + + for slice_tag in filter(lambda a: a['node_id'] is None, slice_tags): + # Do not set any global slice attributes for + # which there is at least one sliver attribute + # already set. + if slice_tag['tagname'] not in sliver_attributes: + attributes.append({'tagname': slice_tag['tagname'], + 'value': slice_tag['value']}) + + # XXX Sanity check; though technically this should be a system invariant + # checked with an assertion + if slice['expires'] > MAXINT: slice['expires']= MAXINT + + slivers.append({ + 'hrn': hrn, + 'name': slice['name'], + 'slice_id': slice['slice_id'], + 'instantiation': slice['instantiation'], + 'expires': slice['expires'], + 'keys': keys, + 'attributes': attributes + }) + + return slivers + def get_peer(self, hrn): # Becaues of myplc federation, we first need to determine if this # slice belongs to out local plc or a myplc peer. We will assume it @@ -103,15 +210,31 @@ class Slices(SimpleStorage): slice_hrns = [] aggregates = Aggregates(self.api) credential = self.api.getCredential() - arg_list = [credential] - request_hash = self.api.key.compute_hash(arg_list) for aggregate in aggregates: + success = False + # request hash is optional so lets try the call without it try: + request_hash=None slices = aggregates[aggregate].get_slices(credential, request_hash) slice_hrns.extend(slices) + success = True except: + print >> log, "%s" % (traceback.format_exc()) print >> log, "Error calling slices at aggregate %(aggregate)s" % locals() - # update timestamp and threshold + + # try sending the request hash if the previous call failed + if not success: + arg_list = [credential] + request_hash = self.api.key.compute_hash(arg_list) + try: + slices = aggregates[aggregate].get_slices(credential, request_hash) + slice_hrns.extend(slices) + success = True + except: + print >> log, "%s" % (traceback.format_exc()) + print >> log, "Error calling slices at aggregate %(aggregate)s" % locals() + + # update timestamp and threshold timestamp = datetime.datetime.now() hr_timestamp = timestamp.strftime(self.api.time_format) delta = datetime.timedelta(hours=self.ttl) @@ -151,16 +274,30 @@ class Slices(SimpleStorage): def delete_slice_smgr(self, hrn): credential = self.api.getCredential() + origin_hrn = self.origin_hrn aggregates = Aggregates(self.api) for aggregate in aggregates: + success = False + # request hash is optional so lets try the call without it try: - aggregates[aggregate].delete_slice(credential, hrn, caller_cred=self.caller_cred) + request_hash=None + aggregates[aggregate].delete_slice(credential, hrn, request_hash, origin_hrn) + success = True except: + print >> log, "%s" % (traceback.format_exc()) print >> log, "Error calling list nodes at aggregate %s" % aggregate - traceback.print_exc(log) - exc_type, exc_value, exc_traceback = sys.exc_info() - print exc_type, exc_value, exc_traceback - + + # try sending the request hash if the previous call failed + if not success: + try: + arg_list = [credential, hrn] + request_hash = self.api.key.compute_hash(arg_list) + aggregates[aggregate].delete_slice(credential, hrn, request_hash, origin_hrn) + success = True + except: + print >> log, "%s" % (traceback.format_exc()) + print >> log, "Error calling list nodes at aggregate %s" % aggregate + def create_slice(self, hrn, rspec): # check our slice policy before we procede @@ -180,11 +317,17 @@ class Slices(SimpleStorage): def verify_site(self, registry, credential, slice_hrn, peer, sfa_peer): authority = get_authority(slice_hrn) - site_records = registry.resolve(credential, authority) + try: + site_records = registry.resolve(credential, authority) + except: + arg_list = [credential, authority] + request_hash = self.api.key.compute_hash(arg_list) + site_records = registry.resolve(credential, authority, request_hash) + site = {} for site_record in site_records: if site_record['type'] == 'authority': - site = site_record.as_dict() + site = site_record if not site: raise RecordNotFound(authority) remote_site_id = site.pop('site_id') @@ -197,9 +340,13 @@ class Slices(SimpleStorage): self.api.plshell.BindObjectToPeer(self.api.plauth, 'site', site_id, peer, remote_site_id) # mark this site as an sfa peer record if sfa_peer: - peer_dict = {'type': 'authority', 'hrn': authority, 'peer_authority': sfa_peer, 'pointer': site_id} - registry.register_peer_object(credential, peer_dict) - pass + peer_dict = {'type': 'authority', 'hrn': authority, 'peer_authority': sfa_peer, 'pointer': site_id} + try: + registry.register_peer_object(credential, peer_dict) + except: + arg_list = [credential] + request_hash = self.api.key.compute_hash(arg_list) + registry.register_peer_object(credential, peer_dict, request_hash) else: site_id = sites[0]['site_id'] remote_site_id = sites[0]['peer_site_id'] @@ -211,7 +358,13 @@ class Slices(SimpleStorage): slice = {} slice_record = None authority = get_authority(slice_hrn) - slice_records = registry.resolve(credential, slice_hrn) + try: + slice_records = registry.resolve(credential, slice_hrn) + except: + arg_list = [credential, slice_hrn] + request_hash = self.api.key.compute_hash(arg_list) + slice_records = registry.resolve(credential, slice_hrn, request_hash) + for record in slice_records: if record['type'] in ['slice']: slice_record = record @@ -220,7 +373,7 @@ class Slices(SimpleStorage): slicename = hrn_to_pl_slicename(slice_hrn) parts = slicename.split("_") login_base = parts[0] - slices = self.api.plshell.GetSlices(self.api.plauth, [slicename], ['slice_id', 'node_ids', 'site_id']) + slices = self.api.plshell.GetSlices(self.api.plauth, [slicename]) if not slices: slice_fields = {} slice_keys = ['name', 'url', 'description'] @@ -235,9 +388,13 @@ class Slices(SimpleStorage): # mark this slice as an sfa peer record if sfa_peer: - peer_dict = {'type': 'slice', 'hrn': slice_hrn, 'peer_authority': sfa_peer, 'pointer': slice_id} - registry.register_peer_object(credential, peer_dict) - pass + peer_dict = {'type': 'slice', 'hrn': slice_hrn, 'peer_authority': sfa_peer, 'pointer': slice_id} + try: + registry.register_peer_object(credential, peer_dict) + except: + arg_list = [credential] + request_hash = self.api.key.compute_hash(arg_list) + registry.register_peer_object(credential, peer_dict, request_hash) #this belongs to a peer if peer: @@ -247,6 +404,8 @@ class Slices(SimpleStorage): slice = slices[0] slice_id = slice['slice_id'] site_id = slice['site_id'] + #the slice is alredy on the remote agg. Let us update(e.g. expires field) it with the latest info. + self.sync_slice(slice, slice_record, peer) slice['peer_slice_id'] = slice_record['pointer'] self.verify_persons(registry, credential, slice_record, site_id, remote_site_id, peer, sfa_peer) @@ -260,16 +419,26 @@ class Slices(SimpleStorage): researchers = slice_record.get('researcher', []) for researcher in researchers: person_record = {} - person_records = registry.resolve(credential, researcher) + try: + person_records = registry.resolve(credential, researcher) + except: + arg_list = [credential, researcher] + request_hash = self.api.key.compute_hash(arg_list) + person_records = registry.resolve(credential, researcher, request_hash) for record in person_records: if record['type'] in ['user']: person_record = record if not person_record: pass - person_dict = person_record.as_dict() + person_dict = person_record + local_person=False if peer: peer_id = self.api.plshell.GetPeers(self.api.plauth, {'shortname': peer}, ['peer_id'])[0]['peer_id'] persons = self.api.plshell.GetPersons(self.api.plauth, {'email': [person_dict['email']], 'peer_id': peer_id}, ['person_id', 'key_ids']) + if not persons: + persons = self.api.plshell.GetPersons(self.api.plauth, [person_dict['email']], ['person_id', 'key_ids']) + if persons: + local_person=True else: persons = self.api.plshell.GetPersons(self.api.plauth, [person_dict['email']], ['person_id', 'key_ids']) @@ -280,9 +449,13 @@ class Slices(SimpleStorage): # mark this person as an sfa peer record if sfa_peer: - peer_dict = {'type': 'user', 'hrn': researcher, 'peer_authority': sfa_peer, 'pointer': person_id} - registry.register_peer_object(credential, peer_dict) - pass + peer_dict = {'type': 'user', 'hrn': researcher, 'peer_authority': sfa_peer, 'pointer': person_id} + try: + registry.register_peer_object(credential, peer_dict) + except: + arg_list = [credential] + request_hash = self.api.key.compute_hash(arg_list) + registry.register_peer_object(credential, peer_dict, request_hash) if peer: self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', person_id, peer, person_dict['pointer']) @@ -300,13 +473,14 @@ class Slices(SimpleStorage): self.api.plshell.AddPersonToSlice(self.api.plauth, person_dict['email'], slicename) self.api.plshell.AddPersonToSite(self.api.plauth, person_dict['email'], site_id) - if peer: + if peer and not local_person: self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', person_id, peer, person_dict['pointer']) + if peer: self.api.plshell.BindObjectToPeer(self.api.plauth, 'site', site_id, peer, remote_site_id) - self.verify_keys(registry, credential, person_dict, key_ids, person_id, peer) + self.verify_keys(registry, credential, person_dict, key_ids, person_id, peer, local_person) - def verify_keys(self, registry, credential, person_dict, key_ids, person_id, peer): + def verify_keys(self, registry, credential, person_dict, key_ids, person_id, peer, local_person): keylist = self.api.plshell.GetKeys(self.api.plauth, key_ids, ['key']) keys = [key['key'] for key in keylist] @@ -318,8 +492,9 @@ class Slices(SimpleStorage): if peer: self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'person', person_id, peer) key_id = self.api.plshell.AddPersonKey(self.api.plauth, person_dict['email'], key) - if peer: + if peer and not local_person: self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', person_id, peer, person_dict['pointer']) + if peer: try: self.api.plshell.BindObjectToPeer(self.api.plauth, 'key', key_id, peer, key_ids.pop(0)) except: pass @@ -330,7 +505,7 @@ class Slices(SimpleStorage): peer = self.get_peer(hrn) sfa_peer = self.get_sfa_peer(hrn) - spec = Rspec(rspec) + spec = RSpec(rspec) # Get the slice record from sfa slicename = hrn_to_pl_slicename(hrn) slice = {} @@ -348,21 +523,54 @@ class Slices(SimpleStorage): # get netspec details nodespecs = spec.getDictsByTagName('NodeSpec') - nodes = [] + + # dict in which to store slice attributes to set for the nodes + nodes = {} for nodespec in nodespecs: if isinstance(nodespec['name'], list): - nodes.extend(nodespec['name']) + for nodename in nodespec['name']: + nodes[nodename] = {} + for k in nodespec.keys(): + rspec_attribute_value = nodespec[k] + if (self.rspec_to_slice_tag.has_key(k)): + slice_tag_name = self.rspec_to_slice_tag[k] + nodes[nodename][slice_tag_name] = rspec_attribute_value elif isinstance(nodespec['name'], StringTypes): - nodes.append(nodespec['name']) - + nodename = nodespec['name'] + nodes[nodename] = {} + for k in nodespec.keys(): + rspec_attribute_value = nodespec[k] + if (self.rspec_to_slice_tag.has_key(k)): + slice_tag_name = self.rspec_to_slice_tag[k] + nodes[nodename][slice_tag_name] = rspec_attribute_value + + for k in nodespec.keys(): + rspec_attribute_value = nodespec[k] + if (self.rspec_to_slice_tag.has_key(k)): + slice_tag_name = self.rspec_to_slice_tag[k] + nodes[nodename][slice_tag_name] = rspec_attribute_value + + node_names = nodes.keys() # remove nodes not in rspec - deleted_nodes = list(set(hostnames).difference(nodes)) + deleted_nodes = list(set(hostnames).difference(node_names)) # add nodes from rspec - added_nodes = list(set(nodes).difference(hostnames)) + added_nodes = list(set(node_names).difference(hostnames)) if peer: self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', slice['slice_id'], peer) + self.api.plshell.AddSliceToNodes(self.api.plauth, slicename, added_nodes) + + # Add recognized slice tags + for node_name in node_names: + node = nodes[node_name] + for slice_tag in node.keys(): + value = node[slice_tag] + if (isinstance(value, list)): + value = value[0] + + self.api.plshell.AddSliceTag(self.api.plauth, slicename, slice_tag, value, node_name) + self.api.plshell.DeleteSliceFromNodes(self.api.plauth, slicename, deleted_nodes) if peer: self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', slice['slice_id'], peer, slice['peer_slice_id']) @@ -370,12 +578,12 @@ class Slices(SimpleStorage): return 1 def create_slice_smgr(self, hrn, rspec): - spec = Rspec() - tempspec = Rspec() + spec = RSpec() + tempspec = RSpec() spec.parseString(rspec) slicename = hrn_to_pl_slicename(hrn) specDict = spec.toDict() - if specDict.has_key('Rspec'): specDict = specDict['Rspec'] + if specDict.has_key('RSpec'): specDict = specDict['RSpec'] if specDict.has_key('start_time'): start_time = specDict['start_time'] else: start_time = 0 if specDict.has_key('end_time'): end_time = specDict['end_time'] @@ -390,27 +598,51 @@ class Slices(SimpleStorage): for netspec in netspecs: net_hrn = netspec['name'] resources = {'start_time': start_time, 'end_time': end_time, 'networks': netspec} - resourceDict = {'Rspec': resources} + resourceDict = {'RSpec': resources} tempspec.parseDict(resourceDict) rspecs[net_hrn] = tempspec.toxml() - # send each rspec to the appropriate aggregate/sm + # send each rspec to the appropriate aggregate/sm + origin_hrn = self.origin_hrn for net_hrn in rspecs: try: # if we are directly connected to the aggregate then we can just send them the rspec - # if not, then we may be connected to an sm thats connected to the aggregate + # if not, then we may be connected to an sm thats connected to the aggregate if net_hrn in aggregates: # send the whloe rspec to the local aggregate if net_hrn in [self.api.hrn]: - aggregates[net_hrn].create_slice(credential, hrn, rspec, caller_cred=self.caller_cred) + try: + request_hash = None + aggregates[net_hrn].create_slice(credential, hrn, rspec, request_hash, origin_hrn) + except: + arg_list = [credential,hrn,rspec] + request_hash = self.api.key.compute_hash(arg_list) + aggregates[net_hrn].create_slice(credential, hrn, rspec, request_hash, origin_hrn) else: - aggregates[net_hrn].create_slice(credential, hrn, rspecs[net_hrn], caller_cred=self.caller_cred) + try: + request_hash = None + aggregates[net_hrn].create_slice(credential, hrn, rspecs[net_hrn], request_hash, origin_hrn) + except: + arg_list = [credential,hrn,rspecs[net_hrn]] + request_hash = self.api.key.compute_hash(arg_list) + aggregates[net_hrn].create_slice(credential, hrn, rspecs[net_hrn], request_hash, origin_hrn) else: - # lets forward this rspec to a sm that knows about the network + # lets forward this rspec to a sm that knows about the network + arg_list = [credential, net_hrn] + request_hash = self.api.compute_hash(arg_list) for aggregate in aggregates: - network_found = aggregates[aggregate].get_aggregates(credential, net_hrn) + try: + network_found = aggregates[aggregate].get_aggregates(credential, net_hrn) + except: + network_found = aggregates[aggregate].get_aggregates(credential, net_hrn, request_hash) if network_networks: - aggregates[aggregate].create_slice(credential, hrn, rspecs[net_hrn], caller_cred=self.caller_cred) + try: + request_hash = None + aggregates[aggregate].create_slice(credential, hrn, rspecs[net_hrn], request_hash, origin_hrn) + except: + arg_list = [credential, hrn, rspecs[net_hrn]] + request_hash = self.api.key.compute_hash(arg_list) + aggregates[aggregate].create_slice(credential, hrn, rspecs[net_hrn], request_hash, origin_hrn) except: print >> log, "Error creating slice %(hrn)s at aggregate %(net_hrn)s" % locals() @@ -463,6 +695,19 @@ class Slices(SimpleStorage): def stop_slice_smgr(self, hrn): credential = self.api.getCredential() aggregates = Aggregates(self.api) + arg_list = [credential, hrn] + request_hash = self.api.key.compute_hash(arg_list) for aggregate in aggregates: - aggregates[aggregate].stop_slice(credential, hrn) - + try: + aggregates[aggregate].stop_slice(credential, hrn) + except: + aggregates[aggregate].stop_slice(credential, hrn, request_hash) + + def sync_slice(self, old_record, new_record, peer): + if old_record['expires'] != new_record['expires']: + if peer: + self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', old_record['slice_id'], peer) + self.api.plshell.UpdateSlice(self.api.plauth, old_record['slice_id'], {'expires' : new_record['expires']}) + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', old_record['slice_id'], peer, old_record['peer_slice_id']) + return 1