From 86aab78b5301548d457bae4ad5fa0c252ea75fb0 Mon Sep 17 00:00:00 2001 From: Tony Mack Date: Tue, 1 Dec 2009 16:10:07 +0000 Subject: [PATCH] initial checkin of managers plugin directory --- sfa/client/sfi.py | 14 +- sfa/managers/__init__.py | 0 sfa/managers/aggregate/plc.py | 527 +++++++++++++ sfa/managers/component/__init__.py | 0 .../component/component_manager_plc.py | 39 + sfa/managers/component/plc.py | 703 ++++++++++++++++++ sfa/managers/slicemanager/plc.py | 244 ++++++ sfa/server/component.py | 7 +- 8 files changed, 1527 insertions(+), 7 deletions(-) create mode 100644 sfa/managers/__init__.py create mode 100644 sfa/managers/aggregate/plc.py create mode 100644 sfa/managers/component/__init__.py create mode 100644 sfa/managers/component/component_manager_plc.py create mode 100644 sfa/managers/component/plc.py create mode 100644 sfa/managers/slicemanager/plc.py diff --git a/sfa/client/sfi.py b/sfa/client/sfi.py index 92835008..d0327fec 100755 --- a/sfa/client/sfi.py +++ b/sfa/client/sfi.py @@ -141,6 +141,7 @@ class Sfi: "slices": "", "resources": "[name]", "create": "name rspec", + "get_trusted_certs": "cred", "get_ticket": "name rspec", "redeem_ticket": "ticket", "delete": "name", @@ -689,8 +690,17 @@ class Sfi: arg_list = [cred] request_hash = self.key.compute_hash(arg_list) return self.registry.update(cred, record, request_hash) - - + + def get_trusted_certs(self, opts, args): + """ + return the trusted certs at this interface + """ + trusted_certs = self.registry.get_trusted_certs() + for trusted_cert in trusted_certs: + cert = Certificate(string=trusted_cert) + print cert.get_subject() + return + def aggregates(self, opts, args): """ return a list of details about known aggregates diff --git a/sfa/managers/__init__.py b/sfa/managers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/sfa/managers/aggregate/plc.py b/sfa/managers/aggregate/plc.py new file mode 100644 index 00000000..9524b845 --- /dev/null +++ b/sfa/managers/aggregate/plc.py @@ -0,0 +1,527 @@ +### $Id: slices.py 15842 2009-11-22 09:56:13Z anil $ +### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/plc/slices.py $ + +import datetime +import time +import traceback +import sys + +from types import StringTypes +from sfa.util.misc import * +from sfa.util.rspec import * +from sfa.util.specdict import * +from sfa.util.faults import * +from sfa.util.storage import * +from sfa.util.record import GeniRecord +from sfa.util.policy import Policy +from sfa.util.prefixTree import prefixTree +from sfa.util.debug import log +from sfa.server.registry import Registries + +MAXINT = 2L**31-1 + +class Slices(SimpleStorage): + + rspec_to_slice_tag = {'max_rate':'net_max_rate'} + + def __init__(self, api, ttl = .5, caller_cred=None): + self.api = api + self.ttl = ttl + self.threshold = None + path = self.api.config.SFA_DATA_DIR + filename = ".".join([self.api.interface, self.api.hrn, "slices"]) + filepath = path + os.sep + filename + self.slices_file = filepath + SimpleStorage.__init__(self, self.slices_file) + self.policy = Policy(self.api) + self.load() + self.caller_cred=caller_cred + + def get_slivers(self, hrn, node=None): + + slice_name = hrn_to_pl_slicename(hrn) + # XX Should we just call PLCAPI.GetSliceTicket(slice_name) instead + # of doing all of this? + #return self.api.GetSliceTicket(self.auth, slice_name) + + # from PLCAPI.GetSlivers.get_slivers() + slice_fields = ['slice_id', 'name', 'instantiation', 'expires', 'person_ids', 'slice_tag_ids'] + slices = self.api.plshell.GetSlices(self.api.plauth, slice_name, slice_fields) + # Build up list of users and slice attributes + person_ids = set() + all_slice_tag_ids = set() + for slice in slices: + person_ids.update(slice['person_ids']) + all_slice_tag_ids.update(slice['slice_tag_ids']) + person_ids = list(person_ids) + all_slice_tag_ids = list(all_slice_tag_ids) + # Get user information + all_persons_list = self.api.plshell.GetPersons(self.api.plauth, {'person_id':person_ids,'enabled':True}, ['person_id', 'enabled', 'key_ids']) + all_persons = {} + for person in all_persons_list: + all_persons[person['person_id']] = person + + # Build up list of keys + key_ids = set() + for person in all_persons.values(): + key_ids.update(person['key_ids']) + key_ids = list(key_ids) + # Get user account keys + all_keys_list = self.api.plshell.GetKeys(self.api.plauth, key_ids, ['key_id', 'key', 'key_type']) + all_keys = {} + for key in all_keys_list: + all_keys[key['key_id']] = key + # Get slice attributes + all_slice_tags_list = self.api.plshell.GetSliceTags(self.api.plauth, all_slice_tag_ids) + all_slice_tags = {} + for slice_tag in all_slice_tags_list: + all_slice_tags[slice_tag['slice_tag_id']] = slice_tag + + slivers = [] + for slice in slices: + keys = [] + for person_id in slice['person_ids']: + if person_id in all_persons: + person = all_persons[person_id] + if not person['enabled']: + continue + for key_id in person['key_ids']: + if key_id in all_keys: + key = all_keys[key_id] + keys += [{'key_type': key['key_type'], + 'key': key['key']}] + attributes = [] + # All (per-node and global) attributes for this slice + slice_tags = [] + for slice_tag_id in slice['slice_tag_ids']: + if slice_tag_id in all_slice_tags: + slice_tags.append(all_slice_tags[slice_tag_id]) + # Per-node sliver attributes take precedence over global + # slice attributes, so set them first. + # Then comes nodegroup slice attributes + # Followed by global slice attributes + sliver_attributes = [] + + if node is not None: + for sliver_attribute in filter(lambda a: a['node_id'] == node['node_id'], slice_tags): + sliver_attributes.append(sliver_attribute['tagname']) + attributes.append({'tagname': sliver_attribute['tagname'], + 'value': sliver_attribute['value']}) + + # set nodegroup slice attributes + for slice_tag in filter(lambda a: a['nodegroup_id'] in node['nodegroup_ids'], slice_tags): + # Do not set any nodegroup slice attributes for + # which there is at least one sliver attribute + # already set. + if slice_tag not in slice_tags: + attributes.append({'tagname': slice_tag['tagname'], + 'value': slice_tag['value']}) + + for slice_tag in filter(lambda a: a['node_id'] is None, slice_tags): + # Do not set any global slice attributes for + # which there is at least one sliver attribute + # already set. + if slice_tag['tagname'] not in sliver_attributes: + attributes.append({'tagname': slice_tag['tagname'], + 'value': slice_tag['value']}) + + # XXX Sanity check; though technically this should be a system invariant + # checked with an assertion + if slice['expires'] > MAXINT: slice['expires']= MAXINT + + slivers.append({ + 'hrn': hrn, + 'name': slice['name'], + 'slice_id': slice['slice_id'], + 'instantiation': slice['instantiation'], + 'expires': slice['expires'], + 'keys': keys, + 'attributes': attributes + }) + + return slivers + + def get_peer(self, hrn): + # Becaues of myplc federation, we first need to determine if this + # slice belongs to out local plc or a myplc peer. We will assume it + # is a local site, unless we find out otherwise + peer = None + + # get this slice's authority (site) + slice_authority = get_authority(hrn) + + # get this site's authority (sfa root authority or sub authority) + site_authority = get_authority(slice_authority).lower() + + # check if we are already peered with this site_authority, if so + peers = self.api.plshell.GetPeers(self.api.plauth, {}, ['peer_id', 'peername', 'shortname', 'hrn_root']) + for peer_record in peers: + names = [name.lower() for name in peer_record.values() if isinstance(name, StringTypes)] + if site_authority in names: + peer = peer_record['shortname'] + + return peer + + def get_sfa_peer(self, hrn): + # return the authority for this hrn or None if we are the authority + sfa_peer = None + slice_authority = get_authority(hrn) + site_authority = get_authority(slice_authority) + + if site_authority != self.api.hrn: + sfa_peer = site_authority + + return sfa_peer + + def refresh(self): + """ + Update the cached list of slices + """ + # Reload components list + now = datetime.datetime.now() + if not self.has_key('threshold') or not self.has_key('timestamp') or \ + now > datetime.datetime.fromtimestamp(time.mktime(time.strptime(self['threshold'], self.api.time_format))): + self.refresh_slices_aggregate() + + def refresh_slices_aggregate(self): + slices = self.api.plshell.GetSlices(self.api.plauth, {'peer_id': None}, ['name']) + slice_hrns = [slicename_to_hrn(self.api.hrn, slice['name']) for slice in slices] + + # update timestamp and threshold + timestamp = datetime.datetime.now() + hr_timestamp = timestamp.strftime(self.api.time_format) + delta = datetime.timedelta(hours=self.ttl) + threshold = timestamp + delta + hr_threshold = threshold.strftime(self.api.time_format) + + slice_details = {'hrn': slice_hrns, + 'timestamp': hr_timestamp, + 'threshold': hr_threshold + } + self.update(slice_details) + self.write() + + + def delete_slice(self, hrn): + self.delete_slice_aggregate(hrn) + + def delete_slice_aggregate(self, hrn): + + slicename = hrn_to_pl_slicename(hrn) + slices = self.api.plshell.GetSlices(self.api.plauth, {'name': slicename}) + if not slices: + return 1 + slice = slices[0] + + # determine if this is a peer slice + peer = self.get_peer(hrn) + if peer: + self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', slice['slice_id'], peer) + self.api.plshell.DeleteSliceFromNodes(self.api.plauth, slicename, slice['node_ids']) + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', slice['slice_id'], peer, slice['peer_slice_id']) + return 1 + + def verify_site(self, registry, credential, slice_hrn, peer, sfa_peer): + authority = get_authority(slice_hrn) + try: + site_records = registry.resolve(credential, authority) + except: + arg_list = [credential, authority] + request_hash = self.api.key.compute_hash(arg_list) + site_records = registry.resolve(credential, authority, request_hash) + + site = {} + for site_record in site_records: + if site_record['type'] == 'authority': + site = site_record + if not site: + raise RecordNotFound(authority) + remote_site_id = site.pop('site_id') + + login_base = get_leaf(authority) + sites = self.api.plshell.GetSites(self.api.plauth, login_base) + if not sites: + site_id = self.api.plshell.AddSite(self.api.plauth, site) + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'site', site_id, peer, remote_site_id) + # mark this site as an sfa peer record + if sfa_peer: + peer_dict = {'type': 'authority', 'hrn': authority, 'peer_authority': sfa_peer, 'pointer': site_id} + try: + registry.register_peer_object(credential, peer_dict) + except: + arg_list = [credential] + request_hash = self.api.key.compute_hash(arg_list) + registry.register_peer_object(credential, peer_dict, request_hash) + else: + site_id = sites[0]['site_id'] + remote_site_id = sites[0]['peer_site_id'] + + + return (site_id, remote_site_id) + + def verify_slice(self, registry, credential, slice_hrn, site_id, remote_site_id, peer, sfa_peer): + slice = {} + slice_record = None + authority = get_authority(slice_hrn) + try: + slice_records = registry.resolve(credential, slice_hrn) + except: + arg_list = [credential, slice_hrn] + request_hash = self.api.key.compute_hash(arg_list) + slice_records = registry.resolve(credential, slice_hrn, request_hash) + + for record in slice_records: + if record['type'] in ['slice']: + slice_record = record + if not slice_record: + raise RecordNotFound(hrn) + slicename = hrn_to_pl_slicename(slice_hrn) + parts = slicename.split("_") + login_base = parts[0] + slices = self.api.plshell.GetSlices(self.api.plauth, [slicename], ['slice_id', 'node_ids', 'site_id']) + if not slices: + slice_fields = {} + slice_keys = ['name', 'url', 'description'] + for key in slice_keys: + if key in slice_record and slice_record[key]: + slice_fields[key] = slice_record[key] + + # add the slice + slice_id = self.api.plshell.AddSlice(self.api.plauth, slice_fields) + slice = slice_fields + slice['slice_id'] = slice_id + + # mark this slice as an sfa peer record + if sfa_peer: + peer_dict = {'type': 'slice', 'hrn': slice_hrn, 'peer_authority': sfa_peer, 'pointer': slice_id} + try: + registry.register_peer_object(credential, peer_dict) + except: + arg_list = [credential] + request_hash = self.api.key.compute_hash(arg_list) + registry.register_peer_object(credential, peer_dict, request_hash) + + #this belongs to a peer + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', slice_id, peer, slice_record['pointer']) + slice['node_ids'] = [] + else: + slice = slices[0] + slice_id = slice['slice_id'] + site_id = slice['site_id'] + + slice['peer_slice_id'] = slice_record['pointer'] + self.verify_persons(registry, credential, slice_record, site_id, remote_site_id, peer, sfa_peer) + + return slice + + def verify_persons(self, registry, credential, slice_record, site_id, remote_site_id, peer, sfa_peer): + # get the list of valid slice users from the registry and make + # sure they are added to the slice + slicename = hrn_to_pl_slicename(slice_record['hrn']) + researchers = slice_record.get('researcher', []) + for researcher in researchers: + person_record = {} + try: + person_records = registry.resolve(credential, researcher) + except: + arg_list = [credential, researcher] + request_hash = self.api.key.compute_hash(arg_list) + person_records = registry.resolve(credential, researcher, request_hash) + for record in person_records: + if record['type'] in ['user']: + person_record = record + if not person_record: + pass + person_dict = person_record + local_person=False + if peer: + peer_id = self.api.plshell.GetPeers(self.api.plauth, {'shortname': peer}, ['peer_id'])[0]['peer_id'] + persons = self.api.plshell.GetPersons(self.api.plauth, {'email': [person_dict['email']], 'peer_id': peer_id}, ['person_id', 'key_ids']) + if not persons: + persons = self.api.plshell.GetPersons(self.api.plauth, [person_dict['email']], ['person_id', 'key_ids']) + if persons: + local_person=True + + else: + persons = self.api.plshell.GetPersons(self.api.plauth, [person_dict['email']], ['person_id', 'key_ids']) + + if not persons: + person_id=self.api.plshell.AddPerson(self.api.plauth, person_dict) + self.api.plshell.UpdatePerson(self.api.plauth, person_id, {'enabled' : True}) + + # mark this person as an sfa peer record + if sfa_peer: + peer_dict = {'type': 'user', 'hrn': researcher, 'peer_authority': sfa_peer, 'pointer': person_id} + try: + registry.register_peer_object(credential, peer_dict) + except: + arg_list = [credential] + request_hash = self.api.key.compute_hash(arg_list) + registry.register_peer_object(credential, peer_dict, request_hash) + + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', person_id, peer, person_dict['pointer']) + key_ids = [] + else: + person_id = persons[0]['person_id'] + key_ids = persons[0]['key_ids'] + + + # if this is a peer person, we must unbind them from the peer or PLCAPI will throw + # an error + if peer: + self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'person', person_id, peer) + self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'site', site_id, peer) + + self.api.plshell.AddPersonToSlice(self.api.plauth, person_dict['email'], slicename) + self.api.plshell.AddPersonToSite(self.api.plauth, person_dict['email'], site_id) + if peer and not local_person: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', person_id, peer, person_dict['pointer']) + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'site', site_id, peer, remote_site_id) + + self.verify_keys(registry, credential, person_dict, key_ids, person_id, peer, local_person) + + def verify_keys(self, registry, credential, person_dict, key_ids, person_id, peer, local_person): + keylist = self.api.plshell.GetKeys(self.api.plauth, key_ids, ['key']) + keys = [key['key'] for key in keylist] + + #add keys that arent already there + key_ids = person_dict['key_ids'] + for personkey in person_dict['keys']: + if personkey not in keys: + key = {'key_type': 'ssh', 'key': personkey} + if peer: + self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'person', person_id, peer) + key_id = self.api.plshell.AddPersonKey(self.api.plauth, person_dict['email'], key) + if peer and not local_person: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', person_id, peer, person_dict['pointer']) + if peer: + try: self.api.plshell.BindObjectToPeer(self.api.plauth, 'key', key_id, peer, key_ids.pop(0)) + + except: pass + + def create_slice(self, hrn, rspec): + + # check our slice policy before we procede + whitelist = self.policy['slice_whitelist'] + blacklist = self.policy['slice_blacklist'] + + if whitelist and hrn not in whitelist or \ + blacklist and hrn in blacklist: + policy_file = self.policy.policy_file + print >> log, "Slice %(hrn)s not allowed by policy %(policy_file)s" % locals() + return 1 + + self.create_slice_aggregate(hrn, rspec) + + def create_slice_aggregate(self, hrn, rspec): + + # Determine if this is a peer slice + peer = self.get_peer(hrn) + sfa_peer = self.get_sfa_peer(hrn) + + spec = RSpec(rspec) + # Get the slice record from sfa + slicename = hrn_to_pl_slicename(hrn) + slice = {} + slice_record = None + registries = Registries(self.api) + registry = registries[self.api.hrn] + credential = self.api.getCredential() + + site_id, remote_site_id = self.verify_site(registry, credential, hrn, peer, sfa_peer) + slice = self.verify_slice(registry, credential, hrn, site_id, remote_site_id, peer, sfa_peer) + + # find out where this slice is currently running + nodelist = self.api.plshell.GetNodes(self.api.plauth, slice['node_ids'], ['hostname']) + hostnames = [node['hostname'] for node in nodelist] + + # get netspec details + nodespecs = spec.getDictsByTagName('NodeSpec') + + # dict in which to store slice attributes to set for the nodes + nodes = {} + for nodespec in nodespecs: + if isinstance(nodespec['name'], list): + for nodename in nodespec['name']: + nodes[nodename] = {} + for k in nodespec.keys(): + rspec_attribute_value = nodespec[k] + if (self.rspec_to_slice_tag.has_key(k)): + slice_tag_name = self.rspec_to_slice_tag[k] + nodes[nodename][slice_tag_name] = rspec_attribute_value + elif isinstance(nodespec['name'], StringTypes): + nodename = nodespec['name'] + nodes[nodename] = {} + for k in nodespec.keys(): + rspec_attribute_value = nodespec[k] + if (self.rspec_to_slice_tag.has_key(k)): + slice_tag_name = self.rspec_to_slice_tag[k] + nodes[nodename][slice_tag_name] = rspec_attribute_value + + for k in nodespec.keys(): + rspec_attribute_value = nodespec[k] + if (self.rspec_to_slice_tag.has_key(k)): + slice_tag_name = self.rspec_to_slice_tag[k] + nodes[nodename][slice_tag_name] = rspec_attribute_value + + node_names = nodes.keys() + # remove nodes not in rspec + deleted_nodes = list(set(hostnames).difference(node_names)) + # add nodes from rspec + added_nodes = list(set(node_names).difference(hostnames)) + + if peer: + self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', slice['slice_id'], peer) + + self.api.plshell.AddSliceToNodes(self.api.plauth, slicename, added_nodes) + + # Add recognized slice tags + for node_name in node_names: + node = nodes[node_name] + for slice_tag in node.keys(): + value = node[slice_tag] + if (isinstance(value, list)): + value = value[0] + + self.api.plshell.AddSliceTag(self.api.plauth, slicename, slice_tag, value, node_name) + + self.api.plshell.DeleteSliceFromNodes(self.api.plauth, slicename, deleted_nodes) + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', slice['slice_id'], peer, slice['peer_slice_id']) + + return 1 + + def start_slice(self, hrn): + self.start_slice_aggregate(hrn) + + def start_slice_aggregate(self, hrn): + slicename = hrn_to_pl_slicename(hrn) + slices = self.api.plshell.GetSlices(self.api.plauth, {'name': slicename}, ['slice_id']) + if not slices: + raise RecordNotFound(hrn) + slice_id = slices[0] + attributes = self.api.plshell.GetSliceAttributes(self.api.plauth, {'slice_id': slice_id, 'name': 'enabled'}, ['slice_attribute_id']) + attribute_id = attreibutes[0]['slice_attribute_id'] + self.api.plshell.UpdateSliceAttribute(self.api.plauth, attribute_id, "1" ) + return 1 + + + def stop_slice(self, hrn): + self.stop_slice_aggregate(hrn) + + def stop_slice_aggregate(self, hrn): + slicename = hrn_to_pl_slicename(hrn) + slices = self.api.plshell.GetSlices(self.api.plauth, {'name': slicename}, ['slice_id']) + if not slices: + raise RecordNotFound(hrn) + slice_id = slices[0]['slice_id'] + attributes = self.api.plshell.GetSliceAttributes(self.api.plauth, {'slice_id': slice_id, 'name': 'enabled'}, ['slice_attribute_id']) + attribute_id = attributes[0]['slice_attribute_id'] + self.api.plshell.UpdateSliceAttribute(self.api.plauth, attribute_id, "0") + return 1 + diff --git a/sfa/managers/component/__init__.py b/sfa/managers/component/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/sfa/managers/component/component_manager_plc.py b/sfa/managers/component/component_manager_plc.py new file mode 100644 index 00000000..71cc6287 --- /dev/null +++ b/sfa/managers/component/component_manager_plc.py @@ -0,0 +1,39 @@ +import os +import xmlrpclib +from sfa.util.faults import * +from sfa.util.sfaticket import SfaTicket + +def start_slice(api, slicename): + api.nodemanger.Start(slicename) + +def stop_slice(api, slicename): + api.nodemanager.Stop(slicename) + +def delete_slice(api, slicename): + api.nodemanager.Destroy(slicename) + +def reset_slice(api, slicename): + if not api.sliver_exists(slicename): + raise SliverDoesNotExist(slicename) + api.nodemanager.ReCreate(slicename) + +def get_slices(api): + slicenames = api.nodemanager.GetXiDs().keys() + return slicenames + +def roboot(): + os.system("/sbin/reboot") + +def redeem_ticket(api, ticket_string): + ticket = SfaTicket(string=ticket_string) + ticket.decode() + hrn = ticket.attributes['slivers'][0]['hrn'] + slicename = hrn_to_pl_slicename(hrn) + if not api.sliver_exists(slicename): + raise SliverDoesNotExist(slicename) + + # convert ticket to format nm is used to + nm_ticket = xmlrpclib.dumps((ticket.attributes,), methodresponse=True) + self.api.nodemanager.AdminTicket(nm_ticket) + + diff --git a/sfa/managers/component/plc.py b/sfa/managers/component/plc.py new file mode 100644 index 00000000..f7d09f36 --- /dev/null +++ b/sfa/managers/component/plc.py @@ -0,0 +1,703 @@ +### $Id: slices.py 15842 2009-11-22 09:56:13Z anil $ +### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/plc/slices.py $ + +import datetime +import time +import traceback +import sys + +from types import StringTypes +from sfa.util.misc import * +from sfa.util.rspec import * +from sfa.util.specdict import * +from sfa.util.faults import * +from sfa.util.storage import * +from sfa.util.record import GeniRecord +from sfa.util.policy import Policy +from sfa.util.prefixTree import prefixTree +from sfa.util.debug import log +from sfa.server.aggregate import Aggregates +from sfa.server.registry import Registries + +MAXINT = 2L**31-1 + +class Slices(SimpleStorage): + + rspec_to_slice_tag = {'max_rate':'net_max_rate'} + + def __init__(self, api, ttl = .5, caller_cred=None): + self.api = api + self.ttl = ttl + self.threshold = None + path = self.api.config.SFA_DATA_DIR + filename = ".".join([self.api.interface, self.api.hrn, "slices"]) + filepath = path + os.sep + filename + self.slices_file = filepath + SimpleStorage.__init__(self, self.slices_file) + self.policy = Policy(self.api) + self.load() + self.caller_cred=caller_cred + + def get_slivers(self, hrn, node=None): + + slice_name = hrn_to_pl_slicename(hrn) + # XX Should we just call PLCAPI.GetSliceTicket(slice_name) instead + # of doing all of this? + #return self.api.GetSliceTicket(self.auth, slice_name) + + # from PLCAPI.GetSlivers.get_slivers() + slice_fields = ['slice_id', 'name', 'instantiation', 'expires', 'person_ids', 'slice_tag_ids'] + slices = self.api.plshell.GetSlices(self.api.plauth, slice_name, slice_fields) + # Build up list of users and slice attributes + person_ids = set() + all_slice_tag_ids = set() + for slice in slices: + person_ids.update(slice['person_ids']) + all_slice_tag_ids.update(slice['slice_tag_ids']) + person_ids = list(person_ids) + all_slice_tag_ids = list(all_slice_tag_ids) + # Get user information + all_persons_list = self.api.plshell.GetPersons(self.api.plauth, {'person_id':person_ids,'enabled':True}, ['person_id', 'enabled', 'key_ids']) + all_persons = {} + for person in all_persons_list: + all_persons[person['person_id']] = person + + # Build up list of keys + key_ids = set() + for person in all_persons.values(): + key_ids.update(person['key_ids']) + key_ids = list(key_ids) + # Get user account keys + all_keys_list = self.api.plshell.GetKeys(self.api.plauth, key_ids, ['key_id', 'key', 'key_type']) + all_keys = {} + for key in all_keys_list: + all_keys[key['key_id']] = key + # Get slice attributes + all_slice_tags_list = self.api.plshell.GetSliceTags(self.api.plauth, all_slice_tag_ids) + all_slice_tags = {} + for slice_tag in all_slice_tags_list: + all_slice_tags[slice_tag['slice_tag_id']] = slice_tag + + slivers = [] + for slice in slices: + keys = [] + for person_id in slice['person_ids']: + if person_id in all_persons: + person = all_persons[person_id] + if not person['enabled']: + continue + for key_id in person['key_ids']: + if key_id in all_keys: + key = all_keys[key_id] + keys += [{'key_type': key['key_type'], + 'key': key['key']}] + attributes = [] + # All (per-node and global) attributes for this slice + slice_tags = [] + for slice_tag_id in slice['slice_tag_ids']: + if slice_tag_id in all_slice_tags: + slice_tags.append(all_slice_tags[slice_tag_id]) + # Per-node sliver attributes take precedence over global + # slice attributes, so set them first. + # Then comes nodegroup slice attributes + # Followed by global slice attributes + sliver_attributes = [] + + if node is not None: + for sliver_attribute in filter(lambda a: a['node_id'] == node['node_id'], slice_tags): + sliver_attributes.append(sliver_attribute['tagname']) + attributes.append({'tagname': sliver_attribute['tagname'], + 'value': sliver_attribute['value']}) + + # set nodegroup slice attributes + for slice_tag in filter(lambda a: a['nodegroup_id'] in node['nodegroup_ids'], slice_tags): + # Do not set any nodegroup slice attributes for + # which there is at least one sliver attribute + # already set. + if slice_tag not in slice_tags: + attributes.append({'tagname': slice_tag['tagname'], + 'value': slice_tag['value']}) + + for slice_tag in filter(lambda a: a['node_id'] is None, slice_tags): + # Do not set any global slice attributes for + # which there is at least one sliver attribute + # already set. + if slice_tag['tagname'] not in sliver_attributes: + attributes.append({'tagname': slice_tag['tagname'], + 'value': slice_tag['value']}) + + # XXX Sanity check; though technically this should be a system invariant + # checked with an assertion + if slice['expires'] > MAXINT: slice['expires']= MAXINT + + slivers.append({ + 'hrn': hrn, + 'name': slice['name'], + 'slice_id': slice['slice_id'], + 'instantiation': slice['instantiation'], + 'expires': slice['expires'], + 'keys': keys, + 'attributes': attributes + }) + + return slivers + + def get_peer(self, hrn): + # Becaues of myplc federation, we first need to determine if this + # slice belongs to out local plc or a myplc peer. We will assume it + # is a local site, unless we find out otherwise + peer = None + + # get this slice's authority (site) + slice_authority = get_authority(hrn) + + # get this site's authority (sfa root authority or sub authority) + site_authority = get_authority(slice_authority).lower() + + # check if we are already peered with this site_authority, if so + peers = self.api.plshell.GetPeers(self.api.plauth, {}, ['peer_id', 'peername', 'shortname', 'hrn_root']) + for peer_record in peers: + names = [name.lower() for name in peer_record.values() if isinstance(name, StringTypes)] + if site_authority in names: + peer = peer_record['shortname'] + + return peer + + def get_sfa_peer(self, hrn): + # return the authority for this hrn or None if we are the authority + sfa_peer = None + slice_authority = get_authority(hrn) + site_authority = get_authority(slice_authority) + + if site_authority != self.api.hrn: + sfa_peer = site_authority + + return sfa_peer + + def refresh(self): + """ + Update the cached list of slices + """ + # Reload components list + now = datetime.datetime.now() + if not self.has_key('threshold') or not self.has_key('timestamp') or \ + now > datetime.datetime.fromtimestamp(time.mktime(time.strptime(self['threshold'], self.api.time_format))): + if self.api.interface in ['aggregate']: + self.refresh_slices_aggregate() + elif self.api.interface in ['slicemgr']: + self.refresh_slices_smgr() + + def refresh_slices_aggregate(self): + slices = self.api.plshell.GetSlices(self.api.plauth, {'peer_id': None}, ['name']) + slice_hrns = [slicename_to_hrn(self.api.hrn, slice['name']) for slice in slices] + + # update timestamp and threshold + timestamp = datetime.datetime.now() + hr_timestamp = timestamp.strftime(self.api.time_format) + delta = datetime.timedelta(hours=self.ttl) + threshold = timestamp + delta + hr_threshold = threshold.strftime(self.api.time_format) + + slice_details = {'hrn': slice_hrns, + 'timestamp': hr_timestamp, + 'threshold': hr_threshold + } + self.update(slice_details) + self.write() + + + def refresh_slices_smgr(self): + slice_hrns = [] + aggregates = Aggregates(self.api) + credential = self.api.getCredential() + for aggregate in aggregates: + success = False + # request hash is optional so lets try the call without it + try: + request_hash=None + slices = aggregates[aggregate].get_slices(credential, request_hash, self.caller_cred) + slice_hrns.extend(slices) + success = True + except: + print >> log, "%s" % (traceback.format_exc()) + print >> log, "Error calling slices at aggregate %(aggregate)s" % locals() + + # try sending the request hash if the previous call failed + if not success: + arg_list = [credential] + request_hash = self.api.key.compute_hash(arg_list) + try: + slices = aggregates[aggregate].get_slices(credential, request_hash, self.caller_cred) + slice_hrns.extend(slices) + success = True + except: + print >> log, "%s" % (traceback.format_exc()) + print >> log, "Error calling slices at aggregate %(aggregate)s" % locals() + + # update timestamp and threshold + timestamp = datetime.datetime.now() + hr_timestamp = timestamp.strftime(self.api.time_format) + delta = datetime.timedelta(hours=self.ttl) + threshold = timestamp + delta + hr_threshold = threshold.strftime(self.api.time_format) + + slice_details = {'hrn': slice_hrns, + 'timestamp': hr_timestamp, + 'threshold': hr_threshold + } + self.update(slice_details) + self.write() + + + def delete_slice(self, hrn): + if self.api.interface in ['aggregate']: + self.delete_slice_aggregate(hrn) + elif self.api.interface in ['slicemgr']: + self.delete_slice_smgr(hrn) + + def delete_slice_aggregate(self, hrn): + + slicename = hrn_to_pl_slicename(hrn) + slices = self.api.plshell.GetSlices(self.api.plauth, {'name': slicename}) + if not slices: + return 1 + slice = slices[0] + + # determine if this is a peer slice + peer = self.get_peer(hrn) + if peer: + self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', slice['slice_id'], peer) + self.api.plshell.DeleteSliceFromNodes(self.api.plauth, slicename, slice['node_ids']) + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', slice['slice_id'], peer, slice['peer_slice_id']) + return 1 + + def delete_slice_smgr(self, hrn): + credential = self.api.getCredential() + caller_cred = self.caller_cred + aggregates = Aggregates(self.api) + for aggregate in aggregates: + success = False + # request hash is optional so lets try the call without it + try: + request_hash=None + aggregates[aggregate].delete_slice(credential, hrn, request_hash, caller_cred) + success = True + except: + print >> log, "%s" % (traceback.format_exc()) + print >> log, "Error calling list nodes at aggregate %s" % aggregate + + # try sending the request hash if the previous call failed + if not success: + try: + arg_list = [credential, hrn] + request_hash = self.api.key.compute_hash(arg_list) + aggregates[aggregate].delete_slice(credential, hrn, request_hash, caller_cred) + success = True + except: + print >> log, "%s" % (traceback.format_exc()) + print >> log, "Error calling list nodes at aggregate %s" % aggregate + + def create_slice(self, hrn, rspec): + + # check our slice policy before we procede + whitelist = self.policy['slice_whitelist'] + blacklist = self.policy['slice_blacklist'] + + if whitelist and hrn not in whitelist or \ + blacklist and hrn in blacklist: + policy_file = self.policy.policy_file + print >> log, "Slice %(hrn)s not allowed by policy %(policy_file)s" % locals() + return 1 + + if self.api.interface in ['aggregate']: + self.create_slice_aggregate(hrn, rspec) + elif self.api.interface in ['slicemgr']: + self.create_slice_smgr(hrn, rspec) + + def verify_site(self, registry, credential, slice_hrn, peer, sfa_peer): + authority = get_authority(slice_hrn) + try: + site_records = registry.resolve(credential, authority) + except: + arg_list = [credential, authority] + request_hash = self.api.key.compute_hash(arg_list) + site_records = registry.resolve(credential, authority, request_hash) + + site = {} + for site_record in site_records: + if site_record['type'] == 'authority': + site = site_record + if not site: + raise RecordNotFound(authority) + remote_site_id = site.pop('site_id') + + login_base = get_leaf(authority) + sites = self.api.plshell.GetSites(self.api.plauth, login_base) + if not sites: + site_id = self.api.plshell.AddSite(self.api.plauth, site) + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'site', site_id, peer, remote_site_id) + # mark this site as an sfa peer record + if sfa_peer: + peer_dict = {'type': 'authority', 'hrn': authority, 'peer_authority': sfa_peer, 'pointer': site_id} + try: + registry.register_peer_object(credential, peer_dict) + except: + arg_list = [credential] + request_hash = self.api.key.compute_hash(arg_list) + registry.register_peer_object(credential, peer_dict, request_hash) + else: + site_id = sites[0]['site_id'] + remote_site_id = sites[0]['peer_site_id'] + + + return (site_id, remote_site_id) + + def verify_slice(self, registry, credential, slice_hrn, site_id, remote_site_id, peer, sfa_peer): + slice = {} + slice_record = None + authority = get_authority(slice_hrn) + try: + slice_records = registry.resolve(credential, slice_hrn) + except: + arg_list = [credential, slice_hrn] + request_hash = self.api.key.compute_hash(arg_list) + slice_records = registry.resolve(credential, slice_hrn, request_hash) + + for record in slice_records: + if record['type'] in ['slice']: + slice_record = record + if not slice_record: + raise RecordNotFound(hrn) + slicename = hrn_to_pl_slicename(slice_hrn) + parts = slicename.split("_") + login_base = parts[0] + slices = self.api.plshell.GetSlices(self.api.plauth, [slicename], ['slice_id', 'node_ids', 'site_id']) + if not slices: + slice_fields = {} + slice_keys = ['name', 'url', 'description'] + for key in slice_keys: + if key in slice_record and slice_record[key]: + slice_fields[key] = slice_record[key] + + # add the slice + slice_id = self.api.plshell.AddSlice(self.api.plauth, slice_fields) + slice = slice_fields + slice['slice_id'] = slice_id + + # mark this slice as an sfa peer record + if sfa_peer: + peer_dict = {'type': 'slice', 'hrn': slice_hrn, 'peer_authority': sfa_peer, 'pointer': slice_id} + try: + registry.register_peer_object(credential, peer_dict) + except: + arg_list = [credential] + request_hash = self.api.key.compute_hash(arg_list) + registry.register_peer_object(credential, peer_dict, request_hash) + + #this belongs to a peer + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', slice_id, peer, slice_record['pointer']) + slice['node_ids'] = [] + else: + slice = slices[0] + slice_id = slice['slice_id'] + site_id = slice['site_id'] + + slice['peer_slice_id'] = slice_record['pointer'] + self.verify_persons(registry, credential, slice_record, site_id, remote_site_id, peer, sfa_peer) + + return slice + + def verify_persons(self, registry, credential, slice_record, site_id, remote_site_id, peer, sfa_peer): + # get the list of valid slice users from the registry and make + # sure they are added to the slice + slicename = hrn_to_pl_slicename(slice_record['hrn']) + researchers = slice_record.get('researcher', []) + for researcher in researchers: + person_record = {} + try: + person_records = registry.resolve(credential, researcher) + except: + arg_list = [credential, researcher] + request_hash = self.api.key.compute_hash(arg_list) + person_records = registry.resolve(credential, researcher, request_hash) + for record in person_records: + if record['type'] in ['user']: + person_record = record + if not person_record: + pass + person_dict = person_record + local_person=False + if peer: + peer_id = self.api.plshell.GetPeers(self.api.plauth, {'shortname': peer}, ['peer_id'])[0]['peer_id'] + persons = self.api.plshell.GetPersons(self.api.plauth, {'email': [person_dict['email']], 'peer_id': peer_id}, ['person_id', 'key_ids']) + if not persons: + persons = self.api.plshell.GetPersons(self.api.plauth, [person_dict['email']], ['person_id', 'key_ids']) + if persons: + local_person=True + + else: + persons = self.api.plshell.GetPersons(self.api.plauth, [person_dict['email']], ['person_id', 'key_ids']) + + if not persons: + person_id=self.api.plshell.AddPerson(self.api.plauth, person_dict) + self.api.plshell.UpdatePerson(self.api.plauth, person_id, {'enabled' : True}) + + # mark this person as an sfa peer record + if sfa_peer: + peer_dict = {'type': 'user', 'hrn': researcher, 'peer_authority': sfa_peer, 'pointer': person_id} + try: + registry.register_peer_object(credential, peer_dict) + except: + arg_list = [credential] + request_hash = self.api.key.compute_hash(arg_list) + registry.register_peer_object(credential, peer_dict, request_hash) + + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', person_id, peer, person_dict['pointer']) + key_ids = [] + else: + person_id = persons[0]['person_id'] + key_ids = persons[0]['key_ids'] + + + # if this is a peer person, we must unbind them from the peer or PLCAPI will throw + # an error + if peer: + self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'person', person_id, peer) + self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'site', site_id, peer) + + self.api.plshell.AddPersonToSlice(self.api.plauth, person_dict['email'], slicename) + self.api.plshell.AddPersonToSite(self.api.plauth, person_dict['email'], site_id) + if peer and not local_person: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', person_id, peer, person_dict['pointer']) + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'site', site_id, peer, remote_site_id) + + self.verify_keys(registry, credential, person_dict, key_ids, person_id, peer, local_person) + + def verify_keys(self, registry, credential, person_dict, key_ids, person_id, peer, local_person): + keylist = self.api.plshell.GetKeys(self.api.plauth, key_ids, ['key']) + keys = [key['key'] for key in keylist] + + #add keys that arent already there + key_ids = person_dict['key_ids'] + for personkey in person_dict['keys']: + if personkey not in keys: + key = {'key_type': 'ssh', 'key': personkey} + if peer: + self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'person', person_id, peer) + key_id = self.api.plshell.AddPersonKey(self.api.plauth, person_dict['email'], key) + if peer and not local_person: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', person_id, peer, person_dict['pointer']) + if peer: + try: self.api.plshell.BindObjectToPeer(self.api.plauth, 'key', key_id, peer, key_ids.pop(0)) + + except: pass + + def create_slice_aggregate(self, hrn, rspec): + + # Determine if this is a peer slice + peer = self.get_peer(hrn) + sfa_peer = self.get_sfa_peer(hrn) + + spec = RSpec(rspec) + # Get the slice record from sfa + slicename = hrn_to_pl_slicename(hrn) + slice = {} + slice_record = None + registries = Registries(self.api) + registry = registries[self.api.hrn] + credential = self.api.getCredential() + + site_id, remote_site_id = self.verify_site(registry, credential, hrn, peer, sfa_peer) + slice = self.verify_slice(registry, credential, hrn, site_id, remote_site_id, peer, sfa_peer) + + # find out where this slice is currently running + nodelist = self.api.plshell.GetNodes(self.api.plauth, slice['node_ids'], ['hostname']) + hostnames = [node['hostname'] for node in nodelist] + + # get netspec details + nodespecs = spec.getDictsByTagName('NodeSpec') + + # dict in which to store slice attributes to set for the nodes + nodes = {} + for nodespec in nodespecs: + if isinstance(nodespec['name'], list): + for nodename in nodespec['name']: + nodes[nodename] = {} + for k in nodespec.keys(): + rspec_attribute_value = nodespec[k] + if (self.rspec_to_slice_tag.has_key(k)): + slice_tag_name = self.rspec_to_slice_tag[k] + nodes[nodename][slice_tag_name] = rspec_attribute_value + elif isinstance(nodespec['name'], StringTypes): + nodename = nodespec['name'] + nodes[nodename] = {} + for k in nodespec.keys(): + rspec_attribute_value = nodespec[k] + if (self.rspec_to_slice_tag.has_key(k)): + slice_tag_name = self.rspec_to_slice_tag[k] + nodes[nodename][slice_tag_name] = rspec_attribute_value + + for k in nodespec.keys(): + rspec_attribute_value = nodespec[k] + if (self.rspec_to_slice_tag.has_key(k)): + slice_tag_name = self.rspec_to_slice_tag[k] + nodes[nodename][slice_tag_name] = rspec_attribute_value + + node_names = nodes.keys() + # remove nodes not in rspec + deleted_nodes = list(set(hostnames).difference(node_names)) + # add nodes from rspec + added_nodes = list(set(node_names).difference(hostnames)) + + if peer: + self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', slice['slice_id'], peer) + + self.api.plshell.AddSliceToNodes(self.api.plauth, slicename, added_nodes) + + # Add recognized slice tags + for node_name in node_names: + node = nodes[node_name] + for slice_tag in node.keys(): + value = node[slice_tag] + if (isinstance(value, list)): + value = value[0] + + self.api.plshell.AddSliceTag(self.api.plauth, slicename, slice_tag, value, node_name) + + self.api.plshell.DeleteSliceFromNodes(self.api.plauth, slicename, deleted_nodes) + if peer: + self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', slice['slice_id'], peer, slice['peer_slice_id']) + + return 1 + + def create_slice_smgr(self, hrn, rspec): + spec = RSpec() + tempspec = RSpec() + spec.parseString(rspec) + slicename = hrn_to_pl_slicename(hrn) + specDict = spec.toDict() + if specDict.has_key('RSpec'): specDict = specDict['RSpec'] + if specDict.has_key('start_time'): start_time = specDict['start_time'] + else: start_time = 0 + if specDict.has_key('end_time'): end_time = specDict['end_time'] + else: end_time = 0 + + rspecs = {} + aggregates = Aggregates(self.api) + credential = self.api.getCredential() + + # split the netspecs into individual rspecs + netspecs = spec.getDictsByTagName('NetSpec') + for netspec in netspecs: + net_hrn = netspec['name'] + resources = {'start_time': start_time, 'end_time': end_time, 'networks': netspec} + resourceDict = {'RSpec': resources} + tempspec.parseDict(resourceDict) + rspecs[net_hrn] = tempspec.toxml() + + # send each rspec to the appropriate aggregate/sm + caller_cred = self.caller_cred + for net_hrn in rspecs: + try: + # if we are directly connected to the aggregate then we can just send them the rspec + # if not, then we may be connected to an sm thats connected to the aggregate + if net_hrn in aggregates: + # send the whloe rspec to the local aggregate + if net_hrn in [self.api.hrn]: + try: + request_hash = None + aggregates[net_hrn].create_slice(credential, hrn, rspec, request_hash, caller_cred) + except: + arg_list = [credential,hrn,rspec] + request_hash = self.api.key.compute_hash(arg_list) + aggregates[net_hrn].create_slice(credential, hrn, rspec, request_hash, caller_cred) + else: + try: + request_hash = None + aggregates[net_hrn].create_slice(credential, hrn, rspecs[net_hrn], request_hash, caller_cred) + except: + arg_list = [credential,hrn,rspecs[net_hrn]] + request_hash = self.api.key.compute_hash(arg_list) + aggregates[net_hrn].create_slice(credential, hrn, rspecs[net_hrn], request_hash, caller_cred) + else: + # lets forward this rspec to a sm that knows about the network + arg_list = [credential, net_hrn] + request_hash = self.api.compute_hash(arg_list) + for aggregate in aggregates: + try: + network_found = aggregates[aggregate].get_aggregates(credential, net_hrn) + except: + network_found = aggregates[aggregate].get_aggregates(credential, net_hrn, request_hash) + if network_networks: + try: + request_hash = None + aggregates[aggregate].create_slice(credential, hrn, rspecs[net_hrn], request_hash, caller_cred) + except: + arg_list = [credential, hrn, rspecs[net_hrn]] + request_hash = self.api.key.compute_hash(arg_list) + aggregates[aggregate].create_slice(credential, hrn, rspecs[net_hrn], request_hash, caller_cred) + + except: + print >> log, "Error creating slice %(hrn)s at aggregate %(net_hrn)s" % locals() + traceback.print_exc() + return 1 + + + def start_slice(self, hrn): + if self.api.interface in ['aggregate']: + self.start_slice_aggregate(hrn) + elif self.api.interface in ['slicemgr']: + self.start_slice_smgr(hrn) + + def start_slice_aggregate(self, hrn): + slicename = hrn_to_pl_slicename(hrn) + slices = self.api.plshell.GetSlices(self.api.plauth, {'name': slicename}, ['slice_id']) + if not slices: + raise RecordNotFound(hrn) + slice_id = slices[0] + attributes = self.api.plshell.GetSliceAttributes(self.api.plauth, {'slice_id': slice_id, 'name': 'enabled'}, ['slice_attribute_id']) + attribute_id = attreibutes[0]['slice_attribute_id'] + self.api.plshell.UpdateSliceAttribute(self.api.plauth, attribute_id, "1" ) + return 1 + + def start_slice_smgr(self, hrn): + credential = self.api.getCredential() + aggregates = Aggregates(self.api) + for aggregate in aggregates: + aggregates[aggregate].start_slice(credential, hrn) + return 1 + + + def stop_slice(self, hrn): + if self.api.interface in ['aggregate']: + self.stop_slice_aggregate(hrn) + elif self.api.interface in ['slicemgr']: + self.stop_slice_smgr(hrn) + + def stop_slice_aggregate(self, hrn): + slicename = hrn_to_pl_slicename(hrn) + slices = self.api.plshell.GetSlices(self.api.plauth, {'name': slicename}, ['slice_id']) + if not slices: + raise RecordNotFound(hrn) + slice_id = slices[0]['slice_id'] + attributes = self.api.plshell.GetSliceAttributes(self.api.plauth, {'slice_id': slice_id, 'name': 'enabled'}, ['slice_attribute_id']) + attribute_id = attributes[0]['slice_attribute_id'] + self.api.plshell.UpdateSliceAttribute(self.api.plauth, attribute_id, "0") + return 1 + + def stop_slice_smgr(self, hrn): + credential = self.api.getCredential() + aggregates = Aggregates(self.api) + arg_list = [credential, hrn] + request_hash = self.api.key.compute_hash(arg_list) + for aggregate in aggregates: + try: + aggregates[aggregate].stop_slice(credential, hrn) + except: + aggregates[aggregate].stop_slice(credential, hrn, request_hash) + diff --git a/sfa/managers/slicemanager/plc.py b/sfa/managers/slicemanager/plc.py new file mode 100644 index 00000000..44ebfbfe --- /dev/null +++ b/sfa/managers/slicemanager/plc.py @@ -0,0 +1,244 @@ +### $Id: slices.py 15842 2009-11-22 09:56:13Z anil $ +### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/plc/slices.py $ + +import datetime +import time +import traceback +import sys + +from types import StringTypes +from sfa.util.misc import * +from sfa.util.rspec import * +from sfa.util.specdict import * +from sfa.util.faults import * +from sfa.util.storage import * +from sfa.util.record import GeniRecord +from sfa.util.policy import Policy +from sfa.util.prefixTree import prefixTree +from sfa.util.debug import log +from sfa.server.aggregate import Aggregates +from sfa.server.registry import Registries + +MAXINT = 2L**31-1 + +class Slices(SimpleStorage): + + rspec_to_slice_tag = {'max_rate':'net_max_rate'} + + def __init__(self, api, ttl = .5, caller_cred=None): + self.api = api + self.ttl = ttl + self.threshold = None + path = self.api.config.SFA_DATA_DIR + filename = ".".join([self.api.interface, self.api.hrn, "slices"]) + filepath = path + os.sep + filename + self.slices_file = filepath + SimpleStorage.__init__(self, self.slices_file) + self.policy = Policy(self.api) + self.load() + self.caller_cred=caller_cred + self.aggregates = Aggregates(self.api) + + def get_slivers(self, hrn, node=None): + """ + Get the slivers at each aggregate + """ + slivers = [] + for aggregate in self.aggregates: + slivers += aggregate.get_slivers() + return slivers + + def refresh(self): + """ + Update the cached list of slices + """ + # Reload components list + now = datetime.datetime.now() + if not self.has_key('threshold') or not self.has_key('timestamp') or \ + now > datetime.datetime.fromtimestamp(time.mktime(time.strptime(self['threshold'], self.api.time_format))): + self.refresh_slices_smgr() + + def refresh_slices_smgr(self): + slice_hrns = [] + aggregates = Aggregates(self.api) + credential = self.api.getCredential() + for aggregate in aggregates: + success = False + # request hash is optional so lets try the call without it + try: + request_hash=None + slices = aggregates[aggregate].get_slices(credential, request_hash, self.caller_cred) + slice_hrns.extend(slices) + success = True + except: + print >> log, "%s" % (traceback.format_exc()) + print >> log, "Error calling slices at aggregate %(aggregate)s" % locals() + + # try sending the request hash if the previous call failed + if not success: + arg_list = [credential] + request_hash = self.api.key.compute_hash(arg_list) + try: + slices = aggregates[aggregate].get_slices(credential, request_hash, self.caller_cred) + slice_hrns.extend(slices) + success = True + except: + print >> log, "%s" % (traceback.format_exc()) + print >> log, "Error calling slices at aggregate %(aggregate)s" % locals() + + # update timestamp and threshold + timestamp = datetime.datetime.now() + hr_timestamp = timestamp.strftime(self.api.time_format) + delta = datetime.timedelta(hours=self.ttl) + threshold = timestamp + delta + hr_threshold = threshold.strftime(self.api.time_format) + + slice_details = {'hrn': slice_hrns, + 'timestamp': hr_timestamp, + 'threshold': hr_threshold + } + self.update(slice_details) + self.write() + + + def delete_slice(self, hrn): + self.delete_slice_smgr(hrn) + + def delete_slice_smgr(self, hrn): + credential = self.api.getCredential() + caller_cred = self.caller_cred + aggregates = Aggregates(self.api) + for aggregate in aggregates: + success = False + # request hash is optional so lets try the call without it + try: + request_hash=None + aggregates[aggregate].delete_slice(credential, hrn, request_hash, caller_cred) + success = True + except: + print >> log, "%s" % (traceback.format_exc()) + print >> log, "Error calling list nodes at aggregate %s" % aggregate + + # try sending the request hash if the previous call failed + if not success: + try: + arg_list = [credential, hrn] + request_hash = self.api.key.compute_hash(arg_list) + aggregates[aggregate].delete_slice(credential, hrn, request_hash, caller_cred) + success = True + except: + print >> log, "%s" % (traceback.format_exc()) + print >> log, "Error calling list nodes at aggregate %s" % aggregate + + def create_slice(self, hrn, rspec): + + # check our slice policy before we procede + whitelist = self.policy['slice_whitelist'] + blacklist = self.policy['slice_blacklist'] + + if whitelist and hrn not in whitelist or \ + blacklist and hrn in blacklist: + policy_file = self.policy.policy_file + print >> log, "Slice %(hrn)s not allowed by policy %(policy_file)s" % locals() + return 1 + + self.create_slice_smgr(hrn, rspec) + + def create_slice_smgr(self, hrn, rspec): + spec = RSpec() + tempspec = RSpec() + spec.parseString(rspec) + slicename = hrn_to_pl_slicename(hrn) + specDict = spec.toDict() + if specDict.has_key('RSpec'): specDict = specDict['RSpec'] + if specDict.has_key('start_time'): start_time = specDict['start_time'] + else: start_time = 0 + if specDict.has_key('end_time'): end_time = specDict['end_time'] + else: end_time = 0 + + rspecs = {} + aggregates = Aggregates(self.api) + credential = self.api.getCredential() + + # split the netspecs into individual rspecs + netspecs = spec.getDictsByTagName('NetSpec') + for netspec in netspecs: + net_hrn = netspec['name'] + resources = {'start_time': start_time, 'end_time': end_time, 'networks': netspec} + resourceDict = {'RSpec': resources} + tempspec.parseDict(resourceDict) + rspecs[net_hrn] = tempspec.toxml() + + # send each rspec to the appropriate aggregate/sm + caller_cred = self.caller_cred + for net_hrn in rspecs: + try: + # if we are directly connected to the aggregate then we can just send them the rspec + # if not, then we may be connected to an sm thats connected to the aggregate + if net_hrn in aggregates: + # send the whloe rspec to the local aggregate + if net_hrn in [self.api.hrn]: + try: + request_hash = None + aggregates[net_hrn].create_slice(credential, hrn, rspec, request_hash, caller_cred) + except: + arg_list = [credential,hrn,rspec] + request_hash = self.api.key.compute_hash(arg_list) + aggregates[net_hrn].create_slice(credential, hrn, rspec, request_hash, caller_cred) + else: + try: + request_hash = None + aggregates[net_hrn].create_slice(credential, hrn, rspecs[net_hrn], request_hash, caller_cred) + except: + arg_list = [credential,hrn,rspecs[net_hrn]] + request_hash = self.api.key.compute_hash(arg_list) + aggregates[net_hrn].create_slice(credential, hrn, rspecs[net_hrn], request_hash, caller_cred) + else: + # lets forward this rspec to a sm that knows about the network + arg_list = [credential, net_hrn] + request_hash = self.api.compute_hash(arg_list) + for aggregate in aggregates: + try: + network_found = aggregates[aggregate].get_aggregates(credential, net_hrn) + except: + network_found = aggregates[aggregate].get_aggregates(credential, net_hrn, request_hash) + if network_networks: + try: + request_hash = None + aggregates[aggregate].create_slice(credential, hrn, rspecs[net_hrn], request_hash, caller_cred) + except: + arg_list = [credential, hrn, rspecs[net_hrn]] + request_hash = self.api.key.compute_hash(arg_list) + aggregates[aggregate].create_slice(credential, hrn, rspecs[net_hrn], request_hash, caller_cred) + + except: + print >> log, "Error creating slice %(hrn)s at aggregate %(net_hrn)s" % locals() + traceback.print_exc() + return 1 + + + def start_slice(self, hrn): + self.start_slice_smgr(hrn) + + def start_slice_smgr(self, hrn): + credential = self.api.getCredential() + aggregates = Aggregates(self.api) + for aggregate in aggregates: + aggregates[aggregate].start_slice(credential, hrn) + return 1 + + + def stop_slice(self, hrn): + self.stop_slice_smgr(hrn) + + def stop_slice_smgr(self, hrn): + credential = self.api.getCredential() + aggregates = Aggregates(self.api) + arg_list = [credential, hrn] + request_hash = self.api.key.compute_hash(arg_list) + for aggregate in aggregates: + try: + aggregates[aggregate].stop_slice(credential, hrn) + except: + aggregates[aggregate].stop_slice(credential, hrn, request_hash) + diff --git a/sfa/server/component.py b/sfa/server/component.py index ab76961d..e811a2d4 100644 --- a/sfa/server/component.py +++ b/sfa/server/component.py @@ -10,8 +10,7 @@ import os import time import sys -from sfa.util.geniserver import GeniServer -from sfacomponent.plc.api import ComponentAPI +from sfa.util.componentserver import ComponentServer # GeniLight client support is optional try: @@ -32,7 +31,5 @@ class Component(GeniServer): # @param cert_file certificate filename containing public key (could be a GID file) def __init__(self, ip, port, key_file, cert_file): - GeniServer.__init__(self, ip, port, key_file, cert_file) - # re-initialize the servers api as Component api - self.server.api = ComponentAPI(interface='component', key_file=key_file, cert_file=cert_file) + ComponentServer.__init__(self, ip, port, key_file, cert_file) self.server.interface = 'component' -- 2.43.0