X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sfa%2Fplc%2Fslices.py;h=cfeed465ed716fbb2dc58625678a3a0f26583593;hb=3d7237fa0b5f2b4a60cb97c7fb3b6aecfd94558a;hp=88e29c95d9275a950eeffff8dd31b1b84f85ce5d;hpb=d19daaaa783830706e04dc1a887b95c396c15f30;p=sfa.git diff --git a/sfa/plc/slices.py b/sfa/plc/slices.py index 88e29c95..cfeed465 100644 --- a/sfa/plc/slices.py +++ b/sfa/plc/slices.py @@ -7,34 +7,141 @@ import traceback import sys from types import StringTypes -from sfa.util.misc import * +from sfa.util.namespace import * from sfa.util.rspec import * from sfa.util.specdict import * from sfa.util.faults import * from sfa.util.storage import * -from sfa.util.record import GeniRecord +from sfa.util.record import SfaRecord from sfa.util.policy import Policy from sfa.util.prefixTree import prefixTree from sfa.util.debug import log from sfa.server.aggregate import Aggregates from sfa.server.registry import Registries +MAXINT = 2L**31-1 + class Slices(SimpleStorage): - def __init__(self, api, ttl = .5, caller_cred=None): + rspec_to_slice_tag = {'max_rate':'net_max_rate'} + + def __init__(self, api, ttl = .5, origin_hrn=None): self.api = api self.ttl = ttl self.threshold = None - path = self.api.config.SFA_BASE_DIR + path = self.api.config.SFA_DATA_DIR filename = ".".join([self.api.interface, self.api.hrn, "slices"]) filepath = path + os.sep + filename self.slices_file = filepath SimpleStorage.__init__(self, self.slices_file) self.policy = Policy(self.api) self.load() - self.caller_cred=caller_cred - - + self.origin_hrn = origin_hrn + + def get_slivers(self, hrn, node=None): + + slice_name = hrn_to_pl_slicename(hrn) + # XX Should we just call PLCAPI.GetSliceTicket(slice_name) instead + # of doing all of this? + #return self.api.GetSliceTicket(self.auth, slice_name) + + # from PLCAPI.GetSlivers.get_slivers() + slice_fields = ['slice_id', 'name', 'instantiation', 'expires', 'person_ids', 'slice_tag_ids'] + slices = self.api.plshell.GetSlices(self.api.plauth, slice_name, slice_fields) + # Build up list of users and slice attributes + person_ids = set() + all_slice_tag_ids = set() + for slice in slices: + person_ids.update(slice['person_ids']) + all_slice_tag_ids.update(slice['slice_tag_ids']) + person_ids = list(person_ids) + all_slice_tag_ids = list(all_slice_tag_ids) + # Get user information + all_persons_list = self.api.plshell.GetPersons(self.api.plauth, {'person_id':person_ids,'enabled':True}, ['person_id', 'enabled', 'key_ids']) + all_persons = {} + for person in all_persons_list: + all_persons[person['person_id']] = person + + # Build up list of keys + key_ids = set() + for person in all_persons.values(): + key_ids.update(person['key_ids']) + key_ids = list(key_ids) + # Get user account keys + all_keys_list = self.api.plshell.GetKeys(self.api.plauth, key_ids, ['key_id', 'key', 'key_type']) + all_keys = {} + for key in all_keys_list: + all_keys[key['key_id']] = key + # Get slice attributes + all_slice_tags_list = self.api.plshell.GetSliceTags(self.api.plauth, all_slice_tag_ids) + all_slice_tags = {} + for slice_tag in all_slice_tags_list: + all_slice_tags[slice_tag['slice_tag_id']] = slice_tag + + slivers = [] + for slice in slices: + keys = [] + for person_id in slice['person_ids']: + if person_id in all_persons: + person = all_persons[person_id] + if not person['enabled']: + continue + for key_id in person['key_ids']: + if key_id in all_keys: + key = all_keys[key_id] + keys += [{'key_type': key['key_type'], + 'key': key['key']}] + attributes = [] + # All (per-node and global) attributes for this slice + slice_tags = [] + for slice_tag_id in slice['slice_tag_ids']: + if slice_tag_id in all_slice_tags: + slice_tags.append(all_slice_tags[slice_tag_id]) + # Per-node sliver attributes take precedence over global + # slice attributes, so set them first. + # Then comes nodegroup slice attributes + # Followed by global slice attributes + sliver_attributes = [] + + if node is not None: + for sliver_attribute in filter(lambda a: a['node_id'] == node['node_id'], slice_tags): + sliver_attributes.append(sliver_attribute['tagname']) + attributes.append({'tagname': sliver_attribute['tagname'], + 'value': sliver_attribute['value']}) + + # set nodegroup slice attributes + for slice_tag in filter(lambda a: a['nodegroup_id'] in node['nodegroup_ids'], slice_tags): + # Do not set any nodegroup slice attributes for + # which there is at least one sliver attribute + # already set. + if slice_tag not in slice_tags: + attributes.append({'tagname': slice_tag['tagname'], + 'value': slice_tag['value']}) + + for slice_tag in filter(lambda a: a['node_id'] is None, slice_tags): + # Do not set any global slice attributes for + # which there is at least one sliver attribute + # already set. + if slice_tag['tagname'] not in sliver_attributes: + attributes.append({'tagname': slice_tag['tagname'], + 'value': slice_tag['value']}) + + # XXX Sanity check; though technically this should be a system invariant + # checked with an assertion + if slice['expires'] > MAXINT: slice['expires']= MAXINT + + slivers.append({ + 'hrn': hrn, + 'name': slice['name'], + 'slice_id': slice['slice_id'], + 'instantiation': slice['instantiation'], + 'expires': slice['expires'], + 'keys': keys, + 'attributes': attributes + }) + + return slivers + def get_peer(self, hrn): # Becaues of myplc federation, we first need to determine if this # slice belongs to out local plc or a myplc peer. We will assume it @@ -103,15 +210,29 @@ class Slices(SimpleStorage): slice_hrns = [] aggregates = Aggregates(self.api) credential = self.api.getCredential() - arg_list = [credential] - request_hash = self.api.key.compute_hash(arg_list) for aggregate in aggregates: + success = False + # request hash is optional so lets try the call without it try: - slices = aggregates[aggregate].get_slices(credential, request_hash) + slices = aggregates[aggregate].get_slices(credential) slice_hrns.extend(slices) + success = True except: + print >> log, "%s" % (traceback.format_exc()) print >> log, "Error calling slices at aggregate %(aggregate)s" % locals() - # update timestamp and threshold + + # try sending the request hash if the previous call failed + if not success: + arg_list = [credential] + try: + slices = aggregates[aggregate].get_slices(credential) + slice_hrns.extend(slices) + success = True + except: + print >> log, "%s" % (traceback.format_exc()) + print >> log, "Error calling slices at aggregate %(aggregate)s" % locals() + + # update timestamp and threshold timestamp = datetime.datetime.now() hr_timestamp = timestamp.strftime(self.api.time_format) delta = datetime.timedelta(hours=self.ttl) @@ -126,65 +247,14 @@ class Slices(SimpleStorage): self.write() - def delete_slice(self, hrn): - if self.api.interface in ['aggregate']: - self.delete_slice_aggregate(hrn) - elif self.api.interface in ['slicemgr']: - self.delete_slice_smgr(hrn) - - def delete_slice_aggregate(self, hrn): - - slicename = hrn_to_pl_slicename(hrn) - slices = self.api.plshell.GetSlices(self.api.plauth, {'name': slicename}) - if not slices: - return 1 - slice = slices[0] - - # determine if this is a peer slice - peer = self.get_peer(hrn) - if peer: - self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', slice['slice_id'], peer) - self.api.plshell.DeleteSliceFromNodes(self.api.plauth, slicename, slice['node_ids']) - if peer: - self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', slice['slice_id'], peer, slice['peer_slice_id']) - return 1 - - def delete_slice_smgr(self, hrn): - credential = self.api.getCredential() - aggregates = Aggregates(self.api) - for aggregate in aggregates: - try: - aggregates[aggregate].delete_slice(credential, hrn, caller_cred=self.caller_cred) - except: - print >> log, "Error calling list nodes at aggregate %s" % aggregate - traceback.print_exc(log) - exc_type, exc_value, exc_traceback = sys.exc_info() - print exc_type, exc_value, exc_traceback - - def create_slice(self, hrn, rspec): - - # check our slice policy before we procede - whitelist = self.policy['slice_whitelist'] - blacklist = self.policy['slice_blacklist'] - - if whitelist and hrn not in whitelist or \ - blacklist and hrn in blacklist: - policy_file = self.policy.policy_file - print >> log, "Slice %(hrn)s not allowed by policy %(policy_file)s" % locals() - return 1 - - if self.api.interface in ['aggregate']: - self.create_slice_aggregate(hrn, rspec) - elif self.api.interface in ['slicemgr']: - self.create_slice_smgr(hrn, rspec) - def verify_site(self, registry, credential, slice_hrn, peer, sfa_peer): authority = get_authority(slice_hrn) site_records = registry.resolve(credential, authority) + site = {} for site_record in site_records: if site_record['type'] == 'authority': - site = site_record.as_dict() + site = site_record if not site: raise RecordNotFound(authority) remote_site_id = site.pop('site_id') @@ -197,9 +267,8 @@ class Slices(SimpleStorage): self.api.plshell.BindObjectToPeer(self.api.plauth, 'site', site_id, peer, remote_site_id) # mark this site as an sfa peer record if sfa_peer: - peer_dict = {'type': 'authority', 'hrn': authority, 'peer_authority': sfa_peer, 'pointer': site_id} + peer_dict = {'type': 'authority', 'hrn': authority, 'peer_authority': sfa_peer, 'pointer': site_id} registry.register_peer_object(credential, peer_dict) - pass else: site_id = sites[0]['site_id'] remote_site_id = sites[0]['peer_site_id'] @@ -212,6 +281,7 @@ class Slices(SimpleStorage): slice_record = None authority = get_authority(slice_hrn) slice_records = registry.resolve(credential, slice_hrn) + for record in slice_records: if record['type'] in ['slice']: slice_record = record @@ -220,7 +290,7 @@ class Slices(SimpleStorage): slicename = hrn_to_pl_slicename(slice_hrn) parts = slicename.split("_") login_base = parts[0] - slices = self.api.plshell.GetSlices(self.api.plauth, [slicename], ['slice_id', 'node_ids', 'site_id']) + slices = self.api.plshell.GetSlices(self.api.plauth, [slicename]) if not slices: slice_fields = {} slice_keys = ['name', 'url', 'description'] @@ -235,9 +305,8 @@ class Slices(SimpleStorage): # mark this slice as an sfa peer record if sfa_peer: - peer_dict = {'type': 'slice', 'hrn': slice_hrn, 'peer_authority': sfa_peer, 'pointer': slice_id} + peer_dict = {'type': 'slice', 'hrn': slice_hrn, 'peer_authority': sfa_peer, 'pointer': slice_id} registry.register_peer_object(credential, peer_dict) - pass #this belongs to a peer if peer: @@ -247,6 +316,8 @@ class Slices(SimpleStorage): slice = slices[0] slice_id = slice['slice_id'] site_id = slice['site_id'] + #the slice is alredy on the remote agg. Let us update(e.g. expires field) it with the latest info. + self.sync_slice(slice, slice_record, peer) slice['peer_slice_id'] = slice_record['pointer'] self.verify_persons(registry, credential, slice_record, site_id, remote_site_id, peer, sfa_peer) @@ -266,10 +337,15 @@ class Slices(SimpleStorage): person_record = record if not person_record: pass - person_dict = person_record.as_dict() + person_dict = person_record + local_person=False if peer: peer_id = self.api.plshell.GetPeers(self.api.plauth, {'shortname': peer}, ['peer_id'])[0]['peer_id'] persons = self.api.plshell.GetPersons(self.api.plauth, {'email': [person_dict['email']], 'peer_id': peer_id}, ['person_id', 'key_ids']) + if not persons: + persons = self.api.plshell.GetPersons(self.api.plauth, [person_dict['email']], ['person_id', 'key_ids']) + if persons: + local_person=True else: persons = self.api.plshell.GetPersons(self.api.plauth, [person_dict['email']], ['person_id', 'key_ids']) @@ -280,9 +356,8 @@ class Slices(SimpleStorage): # mark this person as an sfa peer record if sfa_peer: - peer_dict = {'type': 'user', 'hrn': researcher, 'peer_authority': sfa_peer, 'pointer': person_id} + peer_dict = {'type': 'user', 'hrn': researcher, 'peer_authority': sfa_peer, 'pointer': person_id} registry.register_peer_object(credential, peer_dict) - pass if peer: self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', person_id, peer, person_dict['pointer']) @@ -300,13 +375,14 @@ class Slices(SimpleStorage): self.api.plshell.AddPersonToSlice(self.api.plauth, person_dict['email'], slicename) self.api.plshell.AddPersonToSite(self.api.plauth, person_dict['email'], site_id) - if peer: + if peer and not local_person: self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', person_id, peer, person_dict['pointer']) + if peer: self.api.plshell.BindObjectToPeer(self.api.plauth, 'site', site_id, peer, remote_site_id) - self.verify_keys(registry, credential, person_dict, key_ids, person_id, peer) + self.verify_keys(registry, credential, person_dict, key_ids, person_id, peer, local_person) - def verify_keys(self, registry, credential, person_dict, key_ids, person_id, peer): + def verify_keys(self, registry, credential, person_dict, key_ids, person_id, peer, local_person): keylist = self.api.plshell.GetKeys(self.api.plauth, key_ids, ['key']) keys = [key['key'] for key in keylist] @@ -318,8 +394,9 @@ class Slices(SimpleStorage): if peer: self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'person', person_id, peer) key_id = self.api.plshell.AddPersonKey(self.api.plauth, person_dict['email'], key) - if peer: + if peer and not local_person: self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', person_id, peer, person_dict['pointer']) + if peer: try: self.api.plshell.BindObjectToPeer(self.api.plauth, 'key', key_id, peer, key_ids.pop(0)) except: pass @@ -330,7 +407,7 @@ class Slices(SimpleStorage): peer = self.get_peer(hrn) sfa_peer = self.get_sfa_peer(hrn) - spec = Rspec(rspec) + spec = RSpec(rspec) # Get the slice record from sfa slicename = hrn_to_pl_slicename(hrn) slice = {} @@ -348,121 +425,65 @@ class Slices(SimpleStorage): # get netspec details nodespecs = spec.getDictsByTagName('NodeSpec') - nodes = [] + + # dict in which to store slice attributes to set for the nodes + nodes = {} for nodespec in nodespecs: if isinstance(nodespec['name'], list): - nodes.extend(nodespec['name']) + for nodename in nodespec['name']: + nodes[nodename] = {} + for k in nodespec.keys(): + rspec_attribute_value = nodespec[k] + if (self.rspec_to_slice_tag.has_key(k)): + slice_tag_name = self.rspec_to_slice_tag[k] + nodes[nodename][slice_tag_name] = rspec_attribute_value elif isinstance(nodespec['name'], StringTypes): - nodes.append(nodespec['name']) - + nodename = nodespec['name'] + nodes[nodename] = {} + for k in nodespec.keys(): + rspec_attribute_value = nodespec[k] + if (self.rspec_to_slice_tag.has_key(k)): + slice_tag_name = self.rspec_to_slice_tag[k] + nodes[nodename][slice_tag_name] = rspec_attribute_value + + for k in nodespec.keys(): + rspec_attribute_value = nodespec[k] + if (self.rspec_to_slice_tag.has_key(k)): + slice_tag_name = self.rspec_to_slice_tag[k] + nodes[nodename][slice_tag_name] = rspec_attribute_value + + node_names = nodes.keys() # remove nodes not in rspec - deleted_nodes = list(set(hostnames).difference(nodes)) + deleted_nodes = list(set(hostnames).difference(node_names)) # add nodes from rspec - added_nodes = list(set(nodes).difference(hostnames)) + added_nodes = list(set(node_names).difference(hostnames)) if peer: self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', slice['slice_id'], peer) - self.api.plshell.AddSliceToNodes(self.api.plauth, slicename, added_nodes) - self.api.plshell.DeleteSliceFromNodes(self.api.plauth, slicename, deleted_nodes) - if peer: - self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', slice['slice_id'], peer, slice['peer_slice_id']) - - return 1 - - def create_slice_smgr(self, hrn, rspec): - spec = Rspec() - tempspec = Rspec() - spec.parseString(rspec) - slicename = hrn_to_pl_slicename(hrn) - specDict = spec.toDict() - if specDict.has_key('Rspec'): specDict = specDict['Rspec'] - if specDict.has_key('start_time'): start_time = specDict['start_time'] - else: start_time = 0 - if specDict.has_key('end_time'): end_time = specDict['end_time'] - else: end_time = 0 - - rspecs = {} - aggregates = Aggregates(self.api) - credential = self.api.getCredential() - # split the netspecs into individual rspecs - netspecs = spec.getDictsByTagName('NetSpec') - for netspec in netspecs: - net_hrn = netspec['name'] - resources = {'start_time': start_time, 'end_time': end_time, 'networks': netspec} - resourceDict = {'Rspec': resources} - tempspec.parseDict(resourceDict) - rspecs[net_hrn] = tempspec.toxml() - - # send each rspec to the appropriate aggregate/sm - for net_hrn in rspecs: - try: - # if we are directly connected to the aggregate then we can just send them the rspec - # if not, then we may be connected to an sm thats connected to the aggregate - if net_hrn in aggregates: - # send the whloe rspec to the local aggregate - if net_hrn in [self.api.hrn]: - aggregates[net_hrn].create_slice(credential, hrn, rspec, caller_cred=self.caller_cred) - else: - aggregates[net_hrn].create_slice(credential, hrn, rspecs[net_hrn], caller_cred=self.caller_cred) - else: - # lets forward this rspec to a sm that knows about the network - for aggregate in aggregates: - network_found = aggregates[aggregate].get_aggregates(credential, net_hrn) - if network_networks: - aggregates[aggregate].create_slice(credential, hrn, rspecs[net_hrn], caller_cred=self.caller_cred) - - except: - print >> log, "Error creating slice %(hrn)s at aggregate %(net_hrn)s" % locals() - traceback.print_exc() - return 1 - - - def start_slice(self, hrn): - if self.api.interface in ['aggregate']: - self.start_slice_aggregate(hrn) - elif self.api.interface in ['slicemgr']: - self.start_slice_smgr(hrn) - - def start_slice_aggregate(self, hrn): - slicename = hrn_to_pl_slicename(hrn) - slices = self.api.plshell.GetSlices(self.api.plauth, {'name': slicename}, ['slice_id']) - if not slices: - raise RecordNotFound(hrn) - slice_id = slices[0] - attributes = self.api.plshell.GetSliceAttributes(self.api.plauth, {'slice_id': slice_id, 'name': 'enabled'}, ['slice_attribute_id']) - attribute_id = attreibutes[0]['slice_attribute_id'] - self.api.plshell.UpdateSliceAttribute(self.api.plauth, attribute_id, "1" ) - return 1 + self.api.plshell.AddSliceToNodes(self.api.plauth, slicename, added_nodes) - def start_slice_smgr(self, hrn): - credential = self.api.getCredential() - aggregates = Aggregates(self.api) - for aggregate in aggregates: - aggregates[aggregate].start_slice(credential, hrn) - return 1 + # Add recognized slice tags + for node_name in node_names: + node = nodes[node_name] + for slice_tag in node.keys(): + value = node[slice_tag] + if (isinstance(value, list)): + value = value[0] + self.api.plshell.AddSliceTag(self.api.plauth, slicename, slice_tag, value, node_name) - def stop_slice(self, hrn): - if self.api.interface in ['aggregate']: - self.stop_slice_aggregate(hrn) - elif self.api.interface in ['slicemgr']: - self.stop_slice_smgr(hrn) + self.api.plshell.DeleteSliceFromNodes(self.api.plauth, slicename, deleted_nodes) + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', slice['slice_id'], peer, slice['peer_slice_id']) - def stop_slice_aggregate(self, hrn): - slicename = hrn_to_pl_slicename(hrn) - slices = self.api.plshell.GetSlices(self.api.plauth, {'name': slicename}, ['slice_id']) - if not slices: - raise RecordNotFound(hrn) - slice_id = slices[0]['slice_id'] - attributes = self.api.plshell.GetSliceAttributes(self.api.plauth, {'slice_id': slice_id, 'name': 'enabled'}, ['slice_attribute_id']) - attribute_id = attributes[0]['slice_attribute_id'] - self.api.plshell.UpdateSliceAttribute(self.api.plauth, attribute_id, "0") return 1 - def stop_slice_smgr(self, hrn): - credential = self.api.getCredential() - aggregates = Aggregates(self.api) - for aggregate in aggregates: - aggregates[aggregate].stop_slice(credential, hrn) - + def sync_slice(self, old_record, new_record, peer): + if old_record['expires'] != new_record['expires']: + if peer: + self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', old_record['slice_id'], peer) + self.api.plshell.UpdateSlice(self.api.plauth, old_record['slice_id'], {'expires' : new_record['expires']}) + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', old_record['slice_id'], peer, old_record['peer_slice_id']) + return 1