-- test rpms: build/install
+- Tag
+* test rpm build/install
+
+- Trunk
+* test federation
+* test sub authority import and federation
+
+- Client
+ * update getNodes to use lxml.etree for parsing the rspec
- Stop invalid users
* a recently disabled/deleted user may still have a valid cred. Keep a list of valid/invalid users on the aggregate and check callers against this list
- Component manager
+ * GetGids - make this work for peer slices
* GetTicket - must verify_{site,slice,person,keys} on remote aggregate
* Redeem ticket - RedeemTicket/AdminTicket not working. Why?
* install the slice and node gid when the slice is created (create NM plugin to execute sfa_component_setup.py ?)
-- Protogeni
-* agree on standard set of functon calls
-* agree on standard set of privs
-* on permission error, return priv needed to make call
-* cache slice resource states (if aggregate goes down, how do we know what
- slices were on it and recreate them? do we make some sort of transaction log)
-
- Registry
-* sign peer gids
-* update call should attempt to push updates to federated peers if
- the peer has a record for an object that is updated locally
-* api.update_membership() shoudl behave more like resolve when looking up records (attempt to resolve records at federated registeries) instead of only looking in the local registry
* move db tables into db with less overhead (tokyocabinet?)
-* make resolve, fill_record_info more fault tolerent. Skip records with failures
-- Auth Service
+- GUI/Auth Service
* develop a simple service where users auth using username/passord and
receive their cred
* service manages users key/cert,creds
-
-- GUI
- * requires user's cred (depends on Auth Service above)
+ * gui requires user's cred (depends on Auth Service above)
- SM call routing
* sfi -a option should send request to sm with an extra argument to
specify which am to contact instead of connecting directly to the am
(am may not trust client directly)
+- Protogeni
+* agree on standard set of functon calls
+* agree on standard set of privs
+* on permission error, return priv needed to make call
+* cache slice resource states (if aggregate goes down, how do we know what
+ slices were on it and recreate them? do we make some sort of transaction log)
+
+
+Questions
+=========
+- SM/Aggregate
+* should the rspec contain only the resources a slice is using or all resources availa and mark what the slice is using.
+
- Initscripts on sfa / geniwrapper
* should sfa have native initscript support or should we piggyback off of myplc?
* should this be in the rspec
<description>The hrn of the registry's root auth.</description>
</variable>
- <variable id="level1_auth" type="string">
- <name>Level1 Authority</name>
- <value></value>
- <description>The hrn of the registry's level1 auth (sub
- authority). The full name of this interface (only secify if
- this interface is a sub authority).</description>
- </variable>
- </variablelist>
+ </variablelist>
</category>
<!-- ======================================== -->
usual_variables = [
"SFA_INTERFACE_HRN",
"SFA_REGISTRY_ROOT_AUTH",
- "SFA_REGISTRY_LEVEL1_AUTH",
"SFA_REGISTRY_HOST",
"SFA_AGGREGATE_HOST",
"SFA_SM_HOST",
'config/gen-sfa-cm-config.py',
'sfa/plc/sfa-import-plc.py',
'sfa/plc/sfa-nuke-plc.py',
+ 'sfa/server/sfa-ca.py',
'sfa/server/sfa-server.py',
'sfa/server/sfa-clean-peer-records.py',
'sfa/server/sfa_component_setup.py',
Requires: python-ZSI
# xmlbuilder depends on lxml
Requires: python-lxml
+Requires: python-setuptools
# python 2.5 has uuid module added, for python 2.4 we still need it.
# we can't really check for if we can load uuid as a python module,
%post cm
chkconfig --add sfa-cm
%changelog
-* Thu Apr 08 2010 Tony Mack <tmack@cs.princeton.edu> - sfa-0.9-11
+* Thu May 11 2010 Tony Mack <tmack@cs.princeton.edu> - sfa-0.9-11
- SfaServer now uses a pool of threads to handle requests concurrently
- sfa.util.rspec no longer used to process/manage rspecs (deprecated). This is now handled by sfa.plc.network and is not backwards compatible
-- PIs can now get a slice credential for any slice at thier site without having to be a member of the slice
+- PIs can now get a slice credential for any slice at their site without having to be a member of the slice
- Registry records for federated peers (defined in registries.xml, aggregates.xml) updated when sfa service is started
- Interfaces will try to fetch and install gids from peers listed in registries.xml/aggregates.xml if gid is not found in /etc/sfa/trusted_roots dir
-- Componet manager does not install gid files if slice already has them
-
+- Component manager does not install gid files if slice already has them
+- Server automatically fetches and installs peer certificats (defined in registries/aggregates.xml) when service is restarted.
+- fix credential verification exploit (verify that the trusted signer is a parent of the object it it signed)
+- made it easier for root authorities to sign their sub's certifiacate using the sfa-ca.py (sfa/server/sfa-ca.py) tool
+
* Thu Jan 21 2010 anil vengalil <avengali@sophia.inria.fr> - sfa-0.9-10
- This tag is quite same as the previous one (sfa-0.9-9) except that the vini and max aggregate managers are also updated for urn support. Other features are:
- - sfa-config-tty now has the same features like plc-config-tty
import tempfile
import traceback
import socket
+from lxml import etree
+from StringIO import StringIO
from types import StringTypes, ListType
from optparse import OptionParser
from sfa.trust.certificate import Keypair, Certificate
from sfa.util.sfaticket import SfaTicket
from sfa.util.record import *
from sfa.util.namespace import *
-from sfa.util.rspec import RSpec
from sfa.util.xmlrpcprotocol import ServerException
import sfa.util.xmlrpcprotocol as xmlrpcprotocol
from sfa.util.config import Config
+
# utility methods here
# display methods
def display_rspec(rspec, format = 'rspec'):
if format in ['dns']:
- spec = RSpec()
- spec.parseString(rspec)
- hostnames = []
- nodespecs = spec.getDictsByTagName('NodeSpec')
- for nodespec in nodespecs:
- if nodespec.has_key('name') and nodespec['name']:
- if isinstance(nodespec['name'], ListType):
- hostnames.extend(nodespec['name'])
- elif isinstance(nodespec['name'], StringTypes):
- hostnames.append(nodespec['name'])
- result = hostnames
+ tree = etree.parse(StringIO(rspec))
+ root = tree.getroot()
+ result = root.xpath("./network/site/node/hostname/text()")
elif format in ['ip']:
- spec = RSpec()
- spec.parseString(rspec)
- ips = []
- ifspecs = spec.getDictsByTagName('IfSpec')
- for ifspec in ifspecs:
- if ifspec.has_key('addr') and ifspec['addr']:
- ips.append(ifspec['addr'])
- result = ips
+ # The IP address is not yet part of the new RSpec
+ # so this doesn't do anything yet.
+ tree = etree.parse(StringIO(rspec))
+ root = tree.getroot()
+ result = root.xpath("./network/site/node/ipv4/text()")
else:
result = rspec
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="verbose mode")
+ parser.add_option("-D", "--debug",
+ action="store_true", dest="debug", default=False,
+ help="Debug (xml-rpc) protocol messages")
parser.add_option("-p", "--protocol",
dest="protocol", default="xmlrpc",
help="RPC protocol (xmlrpc or soap)")
self.cert_file = cert_file
self.cert = Certificate(filename=cert_file)
# Establish connection to server(s)
- self.registry = xmlrpcprotocol.get_server(reg_url, key_file, cert_file)
- self.slicemgr = xmlrpcprotocol.get_server(sm_url, key_file, cert_file)
+ self.registry = xmlrpcprotocol.get_server(reg_url, key_file, cert_file, self.options.debug)
+ self.slicemgr = xmlrpcprotocol.get_server(sm_url, key_file, cert_file, self.options.debug)
return
#
record = records[0]
cm_port = "12346"
url = "https://%s:%s" % (record['hostname'], cm_port)
- return xmlrpcprotocol.get_server(url, self.key_file, self.cert_file)
+ return xmlrpcprotocol.get_server(url, self.key_file, self.cert_file, self.options.debug)
#
# Following functions implement the commands
raise Exception, "No such aggregate %s" % agg_hrn
aggregate = aggregates[0]
url = "http://%s:%s" % (aggregate['addr'], aggregate['port'])
- server = xmlrpcprotocol.get_server(url, self.key_file, self.cert_file)
+ server = xmlrpcprotocol.get_server(url, self.key_file, self.cert_file, self.options.debug)
if args:
cred = self.get_slice_cred(args[0]).save_to_string(save_parents=True)
hrn = args[0]
raise Exception, "No such aggregate %s" % opts.aggregate
aggregate = aggregates[0]
url = "http://%s:%s" % (aggregate['addr'], aggregate['port'])
- server = xmlrpcprotocol.get_server(url, self.key_file, self.cert_file, self.options.protocol)
+ server = xmlrpcprotocol.get_server(url, self.key_file, self.cert_file, self.options.debug)
return server.create_slice(slice_cred, slice_hrn, rspec)
# get a ticket for the specified slice
raise Exception, "No such aggregate %s" % opts.aggregate
aggregate = aggregates[0]
url = "http://%s:%s" % (aggregate['addr'], aggregate['port'])
- server = xmlrpcprotocol.get_server(url, self.key_file, self.cert_file, self.options.protocol)
+ server = xmlrpcprotocol.get_server(url, self.key_file, self.cert_file, self.options.debug)
ticket_string = server.get_ticket(slice_cred, slice_hrn, rspec)
file = os.path.join(self.options.sfi_dir, get_leaf(slice_hrn) + ".ticket")
print "writing ticket to ", file
user_cred = self.get_user_cred()
slice_cred = self.get_slice_cred(slice_hrn).save_to_string(save_parents=True)
- # get a list node hostnames from the nodespecs in the rspec
- rspec = RSpec()
- rspec.parseString(ticket.rspec)
- nodespecs = rspec.getDictsByTagName('NodeSpec')
- hostnames = [nodespec['name'] for nodespec in nodespecs]
+ # get a list of node hostnames from the RSpec
+ tree = etree.parse(StringIO(ticket.rspec))
+ root = tree.getroot()
+ hostnames = root.xpath("./network/site/node/hostname/text()")
# create an xmlrpc connection to the component manager at each of these
# components and gall redeem_ticket
cm_port = "12346"
url = "https://%(hostname)s:%(cm_port)s" % locals()
print "Calling redeem_ticket at %(url)s " % locals(),
- cm = xmlrpcprotocol.get_server(url, self.key_file, self.cert_file)
+ cm = xmlrpcprotocol.get_server(url, self.key_file, self.cert_file, self.options.debug)
cm.redeem_ticket(slice_cred, ticket.save_to_string(save_parents=True))
print "Success"
except socket.gaierror:
return hostnames
def create_slice(api, xrn, xml):
- hrn, type = urn_to_hrn(xrn)
- peer = None
-
"""
Verify HRN and initialize the slice record in PLC if necessary.
"""
+
+ hrn, type = urn_to_hrn(xrn)
+ peer = None
slices = Slices(api)
peer = slices.get_peer(hrn)
sfa_peer = slices.get_sfa_peer(hrn)
slice = network.get_slice(api, hrn)
current = __get_hostnames(slice.get_nodes())
-
+
network.addRSpec(xml, api.config.SFA_AGGREGATE_RSPEC_SCHEMA)
request = __get_hostnames(network.nodesWithSlivers())
return 1
def get_slices(api):
- # XX just import the legacy module and excute that until
- # we transition the code to this module
- from sfa.plc.slices import Slices
- slices = Slices(api)
- slices.refresh()
- return [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slices['hrn']]
-
-
+ # look in cache first
+ if api.cache:
+ slices = api.cache.get('slices')
+ if slices:
+ return slices
+
+ # get data from db
+ slices = api.plshell.GetSlices(api.plauth, {'peer_id': None}, ['name'])
+ slice_hrns = [slicename_to_hrn(api.hrn, slice['name']) for slice in slices]
+ slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
+
+ # cache the result
+ if api.cache:
+ api.cache.add('slices', slice_urns)
+
+ return slice_urns
+
def get_rspec(api, xrn=None, origin_hrn=None):
+ # look in cache first
+ if api.cache and not xrn:
+ rspec = api.cache.get('nodes')
+ if rspec:
+ return rspec
+
hrn, type = urn_to_hrn(xrn)
network = Network(api)
if (hrn):
if network.get_slice(api, hrn):
network.addSlice()
- return network.toxml()
+ rspec = network.toxml()
+
+ # cache the result
+ if api.cache and not xrn:
+ api.cache.add('nodes', rspec)
+
+ return rspec
"""
Returns the request context required by sfatables. At some point, this
def get_slices(api):
return
-def roboot():
+def reboot():
return
def redeem_ticket(api, ticket_string):
slices = eval(xids[1])
return slices.keys()
-def roboot():
+def reboot():
os.system("/sbin/reboot")
def redeem_ticket(api, ticket_string):
tree = prefixTree()
tree.load(registry_hrns)
registry_hrn = tree.best_match(hrn)
-
+
#if there was no match then this record belongs to an unknow registry
if not registry_hrn:
raise MissingAuthority(xrn)
from sfa.util.record import SfaRecord
from sfa.util.policy import Policy
from sfa.util.prefixTree import prefixTree
-from sfa.util.rspec import *
from sfa.util.sfaticket import *
from sfa.util.debug import log
import sfa.plc.peers as peers
return 1
def get_slices(api):
- # XX just import the legacy module and excute that until
- # we transition the code to this module
- from sfa.plc.slices import Slices
- slices = Slices(api)
- slices.refresh()
- return [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slices['hrn']]
-
+ # look in cache first
+ if api.cache:
+ slices = api.cache.get('slices')
+ if slices:
+ return slices
+
+ # fetch from aggregates
+ slices = []
+ credential = api.getCredential()
+ for aggregate in api.aggregates:
+ try:
+ tmp_slices = api.aggregates[aggregate].get_slices(credential)
+ slices.extend(tmp_slices)
+ except:
+ print >> log, "%s" % (traceback.format_exc())
+ print >> log, "Error calling slices at aggregate %(aggregate)s" % locals()
+
+ # cache the result
+ if api.cache:
+ api.cache.add('slices', slices)
+
+ return slices
+
def get_rspec(api, xrn=None, origin_hrn=None):
+ # look in cache first
+ if api.cache and not xrn:
+ rspec = api.cache.get('nodes')
+ if rspec:
+ return rspec
+
hrn, type = urn_to_hrn(xrn)
rspec = None
-
aggs = api.aggregates
cred = api.getCredential()
for agg in aggs:
for request in root.iterfind("./request"):
rspec.append(deepcopy(request))
- return etree.tostring(rspec, xml_declaration=True, pretty_print=True)
+ rspec = etree.tostring(rspec, xml_declaration=True, pretty_print=True)
+ if api.cache and not xrn:
+ api.cache.add('nodes', rspec)
+
+ return rspec
"""
Returns the request context required by sfatables. At some point, this
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
# validate the credential
- self.api.auth.check(cred, 'createslice')
+ self.api.auth.check(cred, 'createslice', hrn)
manager_base = 'sfa.managers'
if self.api.interface in ['aggregate']:
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
# validate the credential
- self.api.auth.check(cred, 'deleteslice')
+ self.api.auth.check(cred, 'deleteslice', hrn)
# send the call to the right manager
manager_base = 'sfa.managers'
from sfa.util.parameter import Parameter, Mixed
from sfa.trust.auth import Auth
from sfa.util.config import Config
-from sfa.plc.nodes import Nodes
# RSpecManager_pl is not used. This line is a check that ensures that everything is in place for the import to work.
import sfa.rspecs.aggregates.rspec_manager_pl
from sfa.trust.credential import Credential
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
# validate the cred
- self.api.auth.check(cred, 'listnodes')
+ self.api.auth.check(cred, 'listnodes', hrn)
# send the call to the right manager
manager_base = 'sfa.managers'
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
# validate the cred
- self.api.auth.check(cred, "getticket")
+ self.api.auth.check(cred, "getticket", hrn)
# set the right outgoing rules
manager_base = 'sfa.managers'
def call(self, cred, xrn, origin_hrn=None):
hrn, type = urn_to_hrn(xrn)
- self.api.auth.check(cred, 'resetslice')
+ self.api.auth.check(cred, 'resetslice', hrn)
# send the call to the right manager
manager_base = 'sfa.managers'
if self.api.interface in ['component']:
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
# validate the cred
- self.api.auth.check(cred, 'startslice')
+ self.api.auth.check(cred, 'startslice', hrn)
# send the call to the right manager
manager_base = 'sfa.managers'
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
# validate the cred
- self.api.auth.check(cred, 'stopslice')
+ self.api.auth.check(cred, 'stopslice', hrn)
# send the call to the right manager
manager_base = 'sfa.managers'
from sfa.util.api import *
from sfa.util.nodemanager import NodeManager
from sfa.util.sfalogging import *
+from collections import defaultdict
def list_to_dict(recs, key):
"""
import sfa.methods
methods = sfa.methods.all
- def __init__(self, config = "/etc/sfa/sfa_config.py", encoding = "utf-8", methods='sfa.methods', \
- peer_cert = None, interface = None, key_file = None, cert_file = None):
+ def __init__(self, config = "/etc/sfa/sfa_config.py", encoding = "utf-8",
+ methods='sfa.methods', peer_cert = None, interface = None,
+ key_file = None, cert_file = None, cache = None):
BaseAPI.__init__(self, config=config, encoding=encoding, methods=methods, \
peer_cert=peer_cert, interface=interface, key_file=key_file, \
- cert_file=cert_file)
+ cert_file=cert_file, cache=cache)
self.encoding = encoding
return records
def fill_record_sfa_info(self, records):
+
+ def startswith(prefix, values):
+ return [value for value in values if value.startswith(prefix)]
+
# get person ids
person_ids = []
site_ids = []
table = self.SfaTable()
person_list, persons = [], {}
person_list = table.find({'type': 'user', 'pointer': person_ids})
- persons = list_to_dict(person_list, 'pointer')
+ # create a hrns keyed on the sfa record's pointer.
+ # Its possible for multiple records to have the same pointer so
+ # the dict's value will be a list of hrns.
+ persons = defaultdict(list)
+ for person in person_list:
+ persons[person['pointer']].append(person)
# get the pl records
pl_person_list, pl_persons = [], {}
sfa_info = {}
type = record['type']
if (type == "slice"):
- # slice users
- researchers = [persons[person_id]['hrn'] for person_id in record['person_ids'] \
- if person_id in persons]
- sfa_info['researcher'] = researchers
+ # all slice users are researchers
+ record['PI'] = []
+ record['researchers'] = []
+ for person_id in record['person_ids']:
+ hrns = [person['hrn'] for person in persons[person_id]]
+ record['researchers'].extend(hrns)
+
# pis at the slice's site
pl_pis = site_pis[record['site_id']]
- pi_ids = [pi['person_id'] for pi in pl_pis]
- sfa_info['PI'] = [persons[person_id]['hrn'] for person_id in pi_ids]
+ pi_ids = [pi['person_id'] for pi in pl_pis]
+ for person_id in pi_ids:
+ hrns = [person['hrn'] for person in persons[person_id]]
+ record['PI'].extend(hrns)
elif (type == "authority"):
- pis, techs, admins = [], [], []
+ record['PI'] = []
+ record['operator'] = []
+ record['owner'] = []
for pointer in record['person_ids']:
if pointer not in persons or pointer not in pl_persons:
# this means there is not sfa or pl record for this user
continue
- hrn = persons[pointer]['hrn']
+ hrns = [person['hrn'] for person in persons[pointer]]
roles = pl_persons[pointer]['roles']
if 'pi' in roles:
- pis.append(hrn)
+ record['PI'].extend(hrns)
if 'tech' in roles:
- techs.append(hrn)
+ record['operator'].extend(hrns)
if 'admin' in roles:
- admins.append(hrn)
- sfa_info['PI'] = pis
- sfa_info['operator'] = techs
- sfa_info['owner'] = admins
+ record['owner'].extend(hrns)
# xxx TODO: OrganizationName
elif (type == "node"):
sfa_info['dns'] = record.get("hostname", "")
return True
-"""
-A Network is a compound object consisting of:
-* a dictionary mapping site IDs to Site objects
-* a dictionary mapping node IDs to Node objects
-* a dictionary mapping interface IDs to Iface objects
-"""
class Network:
+ """
+ A Network is a compound object consisting of:
+ * a dictionary mapping site IDs to Site objects
+ * a dictionary mapping node IDs to Node objects
+ * a dictionary mapping interface IDs to Iface objects
+ """
def __init__(self, api, type = "SFA"):
self.api = api
self.type = type
self.tagtypes = self.get_tag_types(api)
self.slice = None
- """ Lookup site based on id or idtag value """
def lookupSite(self, id):
+ """ Lookup site based on id or idtag value """
val = None
if isinstance(id, basestring):
id = int(id.lstrip('s'))
sites.append(self.sites[s])
return sites
- """ Lookup node based on id or idtag value """
def lookupNode(self, id):
+ """ Lookup node based on id or idtag value """
val = None
if isinstance(id, basestring):
id = int(id.lstrip('n'))
nodes.append(self.nodes[n])
return nodes
- """ Lookup iface based on id or idtag value """
def lookupIface(self, id):
+ """ Lookup iface based on id or idtag value """
val = None
if isinstance(id, basestring):
id = int(id.lstrip('i'))
tags.append(self.tagtypes[t])
return tags
- """
- Process the elements under <sliver_defaults> or <sliver>
- """
def __process_attributes(self, element, node=None):
+ """
+ Process the elements under <sliver_defaults> or <sliver>
+ """
if element is None:
return
if e is not None:
self.slice.update_tag(tt.tagname, e.text, node)
- """
- Annotate the objects in the Network with information from the RSpec
- """
def addRSpec(self, xml, schema=None):
+ """
+ Annotate the objects in the Network with information from the RSpec
+ """
try:
tree = etree.parse(StringIO(xml))
except etree.XMLSyntaxError:
return
- """
- Annotate the objects in the Network with information from the slice
- """
def addSlice(self):
+ """
+ Annotate the objects in the Network with information from the slice
+ """
slice = self.slice
if not slice:
raise InvalidRSpec("no slice associated with network")
for node in slice.get_nodes():
node.add_sliver()
- """
- Write any slice tags that have been added or modified back to the DB
- """
def updateSliceTags(self):
+ """
+ Write any slice tags that have been added or modified back to the DB
+ """
for tag in self.getSliceTags():
if tag.category == 'slice/rspec' and not tag.was_updated() and tag.permit_update(None, 40):
# The user wants to delete this tag
if tag.slice_id == self.slice.id:
tag.write(self.api)
- """
- Produce XML directly from the topology specification.
- """
def toxml(self):
+ """
+ Produce XML directly from the topology specification.
+ """
xml = XMLBuilder(format = True, tab_step = " ")
with xml.RSpec(type=self.type):
if self.slice:
header = '<?xml version="1.0"?>\n'
return header + str(xml)
- """
- Create a dictionary of site objects keyed by site ID
- """
def get_sites(self, api):
+ """
+ Create a dictionary of site objects keyed by site ID
+ """
tmp = []
for site in api.plshell.GetSites(api.plauth, {'peer_id': None}):
t = site['site_id'], Site(self, site)
return dict(tmp)
- """
- Create a dictionary of node objects keyed by node ID
- """
def get_nodes(self, api):
+ """
+ Create a dictionary of node objects keyed by node ID
+ """
tmp = []
for node in api.plshell.GetNodes(api.plauth, {'peer_id': None}):
t = node['node_id'], Node(self, node)
tmp.append(t)
return dict(tmp)
- """
- Create a dictionary of node objects keyed by node ID
- """
def get_ifaces(self, api):
+ """
+ Create a dictionary of node objects keyed by node ID
+ """
tmp = []
for iface in api.plshell.GetInterfaces(api.plauth):
t = iface['interface_id'], Iface(self, iface)
tmp.append(t)
return dict(tmp)
- """
- Create a dictionary of slicetag objects keyed by slice tag ID
- """
def get_slice_tags(self, api):
+ """
+ Create a dictionary of slicetag objects keyed by slice tag ID
+ """
tmp = []
for tag in api.plshell.GetSliceTags(api.plauth):
t = tag['slice_tag_id'], Slicetag(tag)
tmp.append(t)
return dict(tmp)
- """
- Create a list of tagtype obects keyed by tag name
- """
def get_tag_types(self, api):
+ """
+ Create a list of tagtype obects keyed by tag name
+ """
tmp = []
for tag in api.plshell.GetTagTypes(api.plauth):
t = tag['tagname'], TagType(tag)
tmp.append(t)
return dict(tmp)
- """
- Return a Slice object for a single slice
- """
def get_slice(self, api, hrn):
+ """
+ Return a Slice object for a single slice
+ """
slicename = hrn_to_pl_slicename(hrn)
slice = api.plshell.GetSlices(api.plauth, [slicename])
if len(slice):
+++ /dev/null
-### $Id$
-### $URL$
-
-import os
-import time
-import datetime
-import sys
-import traceback
-
-from sfa.util.namespace import *
-from sfa.util.rspec import *
-from sfa.util.specdict import *
-from sfa.util.faults import *
-from sfa.util.storage import *
-from sfa.util.debug import log
-from sfa.util.rspec import *
-from sfa.util.specdict import *
-from sfa.util.policy import Policy
-
-class Nodes(SimpleStorage):
-
- def __init__(self, api, ttl = 1, origin_hrn=None):
- self.api = api
- self.ttl = ttl
- self.threshold = None
- path = self.api.config.SFA_DATA_DIR
- filename = ".".join([self.api.interface, self.api.hrn, "nodes"])
- filepath = path + os.sep + filename
- self.nodes_file = filepath
- SimpleStorage.__init__(self, self.nodes_file)
- self.policy = Policy(api)
- self.load()
- self.origin_hrn = origin_hrn
-
-
- def refresh(self):
- """
- Update the cached list of nodes
- """
-
- # Reload components list
- now = datetime.datetime.now()
- if not self.has_key('threshold') or not self.has_key('timestamp') or \
- now > datetime.datetime.fromtimestamp(time.mktime(time.strptime(self['threshold'], self.api.time_format))):
- if self.api.interface in ['aggregate']:
- self.refresh_nodes_aggregate()
- elif self.api.interface in ['slicemgr']:
- self.refresh_nodes_smgr()
-
- def refresh_nodes_aggregate(self):
- rspec = RSpec()
- rspec.parseString(self.get_rspec())
-
- # filter nodes according to policy
- blist = self.policy['node_blacklist']
- wlist = self.policy['node_whitelist']
- rspec.filter('NodeSpec', 'name', blacklist=blist, whitelist=wlist)
-
- # extract ifspecs from rspec to get ips'
- ips = []
- ifspecs = rspec.getDictsByTagName('IfSpec')
- for ifspec in ifspecs:
- if ifspec.has_key('addr') and ifspec['addr']:
- ips.append(ifspec['addr'])
-
- # extract nodespecs from rspec to get dns names
- hostnames = []
- nodespecs = rspec.getDictsByTagName('NodeSpec')
- for nodespec in nodespecs:
- if nodespec.has_key('name') and nodespec['name']:
- hostnames.append(nodespec['name'])
-
- # update timestamp and threshold
- timestamp = datetime.datetime.now()
- hr_timestamp = timestamp.strftime(self.api.time_format)
- delta = datetime.timedelta(hours=self.ttl)
- threshold = timestamp + delta
- hr_threshold = threshold.strftime(self.api.time_format)
-
- node_details = {}
- node_details['rspec'] = rspec.toxml()
- node_details['ip'] = ips
- node_details['dns'] = hostnames
- node_details['timestamp'] = hr_timestamp
- node_details['threshold'] = hr_threshold
- # save state
- self.update(node_details)
- self.write()
-
- def get_rspec_smgr(self, xrn = None):
- hrn, type = urn_to_hrn(xrn)
- # convert and threshold to ints
- if self.has_key('timestamp') and self['timestamp']:
- hr_timestamp = self['timestamp']
- timestamp = datetime.datetime.fromtimestamp(time.mktime(time.strptime(hr_timestamp, self.api.time_format)))
- hr_threshold = self['threshold']
- threshold = datetime.datetime.fromtimestamp(time.mktime(time.strptime(hr_threshold, self.api.time_format)))
- else:
- timestamp = datetime.datetime.now()
- hr_timestamp = timestamp.strftime(self.api.time_format)
- delta = datetime.timedelta(hours=self.ttl)
- threshold = timestamp + delta
- hr_threshold = threshold.strftime(self.api.time_format)
-
- start_time = int(timestamp.strftime("%s"))
- end_time = int(threshold.strftime("%s"))
- duration = end_time - start_time
-
- aggregates = self.api.aggregates
- rspecs = {}
- networks = []
- rspec = RSpec()
- credential = self.api.getCredential()
- origin_hrn = self.origin_hrn
- for aggregate in aggregates:
- if aggregate not in [self.api.auth.client_cred.get_gid_caller().get_hrn()]:
- try:
- # get the rspec from the aggregate
- agg_rspec = aggregates[aggregate].get_resources(credential, xrn, origin_hrn)
- # extract the netspec from each aggregates rspec
- rspec.parseString(agg_rspec)
- networks.extend([{'NetSpec': rspec.getDictsByTagName('NetSpec')}])
- except:
- # XX print out to some error log
- print >> log, "Error getting resources at aggregate %s" % aggregate
- traceback.print_exc(log)
- print >> log, "%s" % (traceback.format_exc())
- # create the rspec dict
- resources = {'networks': networks, 'start_time': start_time, 'duration': duration}
- resourceDict = {'RSpec': resources}
- # convert rspec dict to xml
- rspec.parseDict(resourceDict)
- return rspec.toxml()
-
- def refresh_nodes_smgr(self):
-
- rspec = RSpec(xml=self.get_rspec_smgr())
- # filter according to policy
- blist = self.policy['node_blacklist']
- wlist = self.policy['node_whitelist']
- rspec.filter('NodeSpec', 'name', blacklist=blist, whitelist=wlist)
-
- # update timestamp and threshold
- timestamp = datetime.datetime.now()
- hr_timestamp = timestamp.strftime(self.api.time_format)
- delta = datetime.timedelta(hours=self.ttl)
- threshold = timestamp + delta
- hr_threshold = threshold.strftime(self.api.time_format)
-
- nodedict = {'rspec': rspec.toxml(),
- 'timestamp': hr_timestamp,
- 'threshold': hr_threshold}
-
- self.update(nodedict)
- self.write()
-
- def get_rspec(self, xrn = None):
-
- if self.api.interface in ['slicemgr']:
- return self.get_rspec_smgr(xrn)
- elif self.api.interface in ['aggregate']:
- return self.get_rspec_aggregate(xrn)
-
- def get_rspec_aggregate(self, xrn = None):
- """
- Get resource information from PLC
- """
- hrn, type = urn_to_hrn(xrn)
- slicename = None
- # Get the required nodes
- if not hrn:
- nodes = self.api.plshell.GetNodes(self.api.plauth, {'peer_id': None})
- try: linkspecs = self.api.plshell.GetLinkSpecs() # if call is supported
- except: linkspecs = []
- else:
- slicename = hrn_to_pl_slicename(hrn)
- slices = self.api.plshell.GetSlices(self.api.plauth, [slicename])
- if not slices:
- nodes = []
- else:
- slice = slices[0]
- node_ids = slice['node_ids']
- nodes = self.api.plshell.GetNodes(self.api.plauth, {'peer_id': None, 'node_id': node_ids})
-
- # Filter out whitelisted nodes
- public_nodes = lambda n: n.has_key('slice_ids_whitelist') and not n['slice_ids_whitelist']
-
- # ...only if they are not already assigned to this slice.
- if (not slicename):
- nodes = filter(public_nodes, nodes)
-
- # Get all network interfaces
- interface_ids = []
- for node in nodes:
- # The field name has changed in plcapi 4.3
- if self.api.plshell_version in ['4.2']:
- interface_ids.extend(node['nodenetwork_ids'])
- elif self.api.plshell_version in ['4.3']:
- interface_ids.extend(node['interface_ids'])
- else:
- raise SfaAPIError, "Unsupported plcapi version ", \
- self.api.plshell_version
-
- if self.api.plshell_version in ['4.2']:
- interfaces = self.api.plshell.GetNodeNetworks(self.api.plauth, interface_ids)
- elif self.api.plshell_version in ['4.3']:
- interfaces = self.api.plshell.GetInterfaces(self.api.plauth, interface_ids)
- else:
- raise SfaAPIError, "Unsupported plcapi version ", \
- self.api.plshell_version
- interface_dict = {}
- for interface in interfaces:
- if self.api.plshell_version in ['4.2']:
- interface_dict[interface['nodenetwork_id']] = interface
- elif self.api.plshell_version in ['4.3']:
- interface_dict[interface['interface_id']] = interface
- else:
- raise SfaAPIError, "Unsupported plcapi version", \
- self.api.plshell_version
-
- # join nodes with thier interfaces
- for node in nodes:
- node['interfaces'] = []
- if self.api.plshell_version in ['4.2']:
- for nodenetwork_id in node['nodenetwork_ids']:
- node['interfaces'].append(interface_dict[nodenetwork_id])
- elif self.api.plshell_version in ['4.3']:
- for interface_id in node['interface_ids']:
- node['interfaces'].append(interface_dict[interface_id])
- else:
- raise SfaAPIError, "Unsupported plcapi version", \
- self.api.plshell_version
-
- # convert and threshold to ints
- if self.has_key('timestamp') and self['timestamp']:
- timestamp = datetime.datetime.fromtimestamp(time.mktime(time.strptime(self['timestamp'], self.api.time_format)))
- threshold = datetime.datetime.fromtimestamp(time.mktime(time.strptime(self['threshold'], self.api.time_format)))
- else:
- timestamp = datetime.datetime.now()
- delta = datetime.timedelta(hours=self.ttl)
- threshold = timestamp + delta
-
- start_time = int(timestamp.strftime("%s"))
- end_time = int(threshold.strftime("%s"))
- duration = end_time - start_time
-
- # create the plc dict
- networks = [{'nodes': nodes,
- 'name': self.api.hrn,
- 'start_time': start_time,
- 'duration': duration}]
- if not hrn:
- networks[0]['links'] = linkspecs
- resources = {'networks': networks, 'start_time': start_time, 'duration': duration}
-
- # convert the plc dict to an rspec dict
- resourceDict = RSpecDict(resources)
- # convert the rspec dict to xml
- rspec = RSpec()
- rspec.parseDict(resourceDict)
- return rspec.toxml()
-
if not config.SFA_REGISTRY_ENABLED:
sys.exit(0)
root_auth = config.SFA_REGISTRY_ROOT_AUTH
- level1_auth = config.SFA_REGISTRY_LEVEL1_AUTH
+ interface_hrn = config.SFA_INTERFACE_HRN
keys_filename = config.config_path + os.sep + 'person_keys.py'
sfaImporter = sfaImport(logger)
shell = sfaImporter.shell
if not table.exists():
table.create()
- if not level1_auth or level1_auth in ['']:
- level1_auth = None
-
- if not level1_auth:
- sfaImporter.create_top_level_auth_records(root_auth)
- import_auth = root_auth
- else:
- if not AuthHierarchy.auth_exists(level1_auth):
- AuthHierarchy.create_auth(level1_auth)
- sfaImporter.create_top_level_auth_records(level1_auth)
- import_auth = level1_auth
+ # create root authority
+ sfaImporter.create_top_level_auth_records(root_auth)
+ if not root_auth == interface_hrn:
+ sfaImporter.create_top_level_auth_records(interface_hrn)
- trace("Import: adding " + import_auth + " to trusted list", logger)
- authority = AuthHierarchy.get_auth_info(import_auth)
+ trace("Import: adding " + interface_hrn + " to trusted list", logger)
+ authority = AuthHierarchy.get_auth_info(interface_hrn)
TrustedRoots.add_gid(authority.get_gid_object())
- if ".vini" in import_auth and import_auth.endswith('vini'):
+ if ".vini" in interface_hrn and interface_hrn.endswith('vini'):
# create a fake internet2 site first
i2site = {'name': 'Internet2', 'abbreviated_name': 'I2',
'login_base': 'internet2', 'site_id': -1}
- sfaImporter.import_site(import_auth, i2site)
+ sfaImporter.import_site(interface_hrn, i2site)
# create dict of all existing sfa records
existing_records = {}
# start importing
for site in sites:
- site_hrn = import_auth + "." + site['login_base']
+ site_hrn = interface_hrn + "." + site['login_base']
# import if hrn is not in list of existing hrns or if the hrn exists
# but its not a site record
if site_hrn not in existing_hrns or \
(site_hrn, 'authority') not in existing_records:
- site_hrn = sfaImporter.import_site(import_auth, site)
+ site_hrn = sfaImporter.import_site(interface_hrn, site)
# import node records
for node_id in site['node_ids']:
if node_id not in nodes_dict:
continue
node = nodes_dict[node_id]
- hrn = hostname_to_hrn(import_auth, site['login_base'], node['hostname'])
+ hrn = hostname_to_hrn(interface_hrn, site['login_base'], node['hostname'])
if hrn not in existing_hrns or \
(hrn, 'node') not in existing_records:
sfaImporter.import_node(site_hrn, node)
if slice_id not in slices_dict:
continue
slice = slices_dict[slice_id]
- hrn = slicename_to_hrn(import_auth, slice['name'])
+ hrn = slicename_to_hrn(interface_hrn, slice['name'])
if hrn not in existing_hrns or \
(hrn, 'slice') not in existing_records:
sfaImporter.import_slice(site_hrn, slice)
for (record_hrn, type) in existing_records.keys():
record = existing_records[(record_hrn, type)]
# if this is the interface name dont do anything
- if record_hrn == import_auth or record['peer_authority']:
+ if record_hrn == interface_hrn or \
+ record_hrn == root_auth or \
+ record['peer_authority']:
continue
# dont delete vini's internet2 placeholdder record
# normally this would be deleted becuase it does not have a plc record
- if ".vini" in import_auth and import_auth.endswith('vini') and \
+ if ".vini" in interface_hrn and interface_hrn.endswith('vini') and \
record_hrn.endswith("internet2"):
continue
if type == 'authority':
for site in sites:
- site_hrn = import_auth + "." + site['login_base']
+ site_hrn = interface_hrn + "." + site['login_base']
if site_hrn == record_hrn and site['site_id'] == record['pointer']:
found = True
break
self.TrustedRoots = TrustedRootList(Config.get_trustedroots_dir(self.config))
self.plc_auth = self.config.get_plc_auth()
self.root_auth = self.config.SFA_REGISTRY_ROOT_AUTH
- self.level1_auth = self.config.SFA_REGISTRY_LEVEL1_AUTH
- if not self.level1_auth or self.level1_auth in ['']:
- self.level1_auth = None
# connect to planetlab
self.shell = None
def create_top_level_auth_records(self, hrn):
+ # create the authority if it doesnt already exist
AuthHierarchy = self.AuthHierarchy
urn = hrn_to_urn(hrn, 'authority')
- # if auth records for this hrn dont exist, create it
if not AuthHierarchy.auth_exists(urn):
trace("Import: creating top level authorites", self.logger)
AuthHierarchy.create_auth(urn)
-
-
- # get the auth info of the newly created root auth (parent)
- # or level1_auth if it exists
- if self.level1_auth:
- auth_info = AuthHierarchy.get_auth_info(hrn)
+ parent_hrn = get_authority(hrn)
+ if not parent_hrn:
parent_hrn = hrn
- else:
- parent_hrn = get_authority(hrn)
- if not parent_hrn:
- parent_hrn = hrn
- auth_info = AuthHierarchy.get_auth_info(parent_hrn)
-
+ auth_info = AuthHierarchy.get_auth_info(parent_hrn)
+
+ # create the db record if it doesnt already exist
table = SfaTable()
auth_record = table.find({'type': 'authority', 'hrn': hrn})
from sfa.util.rspec import *
from sfa.util.specdict import *
from sfa.util.faults import *
-from sfa.util.storage import *
from sfa.util.record import SfaRecord
from sfa.util.policy import Policy
from sfa.util.prefixTree import prefixTree
MAXINT = 2L**31-1
-class Slices(SimpleStorage):
+class Slices:
rspec_to_slice_tag = {'max_rate':'net_max_rate'}
def __init__(self, api, ttl = .5, origin_hrn=None):
self.api = api
- self.ttl = ttl
- self.threshold = None
- path = self.api.config.SFA_DATA_DIR
- filename = ".".join([self.api.interface, self.api.hrn, "slices"])
- filepath = path + os.sep + filename
- self.slices_file = filepath
- SimpleStorage.__init__(self, self.slices_file)
+ #filepath = path + os.sep + filename
self.policy = Policy(self.api)
- self.load()
self.origin_hrn = origin_hrn
def get_slivers(self, xrn, node=None):
return sfa_peer
- def refresh(self):
- """
- Update the cached list of slices
- """
- # Reload components list
- now = datetime.datetime.now()
- if not self.has_key('threshold') or not self.has_key('timestamp') or \
- now > datetime.datetime.fromtimestamp(time.mktime(time.strptime(self['threshold'], self.api.time_format))):
- if self.api.interface in ['aggregate']:
- self.refresh_slices_aggregate()
- elif self.api.interface in ['slicemgr']:
- self.refresh_slices_smgr()
-
- def refresh_slices_aggregate(self):
- slices = self.api.plshell.GetSlices(self.api.plauth, {'peer_id': None}, ['name'])
- slice_hrns = [slicename_to_hrn(self.api.hrn, slice['name']) for slice in slices]
-
- # update timestamp and threshold
- timestamp = datetime.datetime.now()
- hr_timestamp = timestamp.strftime(self.api.time_format)
- delta = datetime.timedelta(hours=self.ttl)
- threshold = timestamp + delta
- hr_threshold = threshold.strftime(self.api.time_format)
-
- slice_details = {'hrn': slice_hrns,
- 'timestamp': hr_timestamp,
- 'threshold': hr_threshold
- }
- self.update(slice_details)
- self.write()
-
-
- def refresh_slices_smgr(self):
- slice_hrns = []
- credential = self.api.getCredential()
- for aggregate in self.api.aggregates:
- success = False
- try:
- slices = self.api.aggregates[aggregate].get_slices(credential)
- slice_hrns.extend(slices)
- success = True
- except:
- print >> log, "%s" % (traceback.format_exc())
- print >> log, "Error calling slices at aggregate %(aggregate)s" % locals()
-
- # update timestamp and threshold
- timestamp = datetime.datetime.now()
- hr_timestamp = timestamp.strftime(self.api.time_format)
- delta = datetime.timedelta(hours=self.ttl)
- threshold = timestamp + delta
- hr_threshold = threshold.strftime(self.api.time_format)
-
- slice_details = {'hrn': slice_hrns,
- 'timestamp': hr_timestamp,
- 'threshold': hr_threshold
- }
- self.update(slice_details)
- self.write()
-
-
def verify_site(self, registry, credential, slice_hrn, peer, sfa_peer):
authority = get_authority(slice_hrn)
authority_urn = hrn_to_urn(authority, 'authority')
default_dict = {'aggregates': {'aggregate': [Interfaces.default_fields]}}
def __init__(self, api, conf_file = "/etc/sfa/aggregates.xml"):
- Interfaces.__init__(self, api, conf_file, 'ma')
-
- def get_connections(self, interfaces):
- """
- Get connection details for the trusted peer aggregates from file and
- create an connection to each.
- """
- connections = Interfaces.get_connections(self, interfaces)
-
+ Interfaces.__init__(self, api, conf_file)
# set up a connection to the local registry
address = self.api.config.SFA_AGGREGATE_HOST
port = self.api.config.SFA_AGGREGATE_PORT
url = 'http://%(address)s:%(port)s' % locals()
local_aggregate = {'hrn': self.api.hrn, 'addr': address, 'port': port}
- self.interfaces.append(local_aggregate)
- connections[self.api.hrn] = xmlrpcprotocol.get_server(url, self.api.key_file, self.api.cert_file)
- return connections
+ self.interfaces[self.api.hrn] = local_aggregate
+ # get connections
+ self.update(self.get_connections())
### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/server/interface.py $
#
-
from sfa.util.faults import *
from sfa.util.storage import *
from sfa.util.namespace import *
from sfa.trust.gid import GID
from sfa.util.table import SfaTable
from sfa.util.record import SfaRecord
+import traceback
import sfa.util.xmlrpcprotocol as xmlrpcprotocol
import sfa.util.soapprotocol as soapprotocol
# defined by the class
default_dict = {}
- # allowed types
- types = ['sa', 'ma']
+ types = ['authority']
- def __init__(self, api, conf_file, type):
+ def __init__(self, api, conf_file, type='authority'):
if type not in self.types:
raise SfaInfaildArgument('Invalid type %s: must be in %s' % (type, self.types))
dict.__init__(self, {})
# load config file
self.interface_info = XmlStorage(conf_file, self.default_dict)
self.interface_info.load()
- self.interfaces = self.interface_info.values()[0].values()[0]
- if not isinstance(self.interfaces, list):
- self.interfaces = [self.interfaces]
- # get connections
- self.update(self.get_connections(self.interfaces))
+ interfaces = self.interface_info.values()[0].values()[0]
+ if not isinstance(interfaces, list):
+ interfaces = [self.interfaces]
+ self.interfaces = {}
+ for interface in interfaces:
+ self.interfaces[interface['hrn']] = interface
+
def sync_interfaces(self):
"""
# are any missing gids, request a new one from the peer registry.
gids_current = self.api.auth.trusted_cert_list
hrns_current = [gid.get_hrn() for gid in gids_current]
- hrns_expected = [interface['hrn'] for interface in self.interfaces]
+ hrns_expected = self.interfaces.keys()
new_hrns = set(hrns_expected).difference(hrns_current)
- self.get_peer_gids(new_hrns)
-
+ gids = self.get_peer_gids(new_hrns)
# update the local db records for these registries
- self.update_db_records(self.type)
+ self.update_db_records(self.type, gids)
def get_peer_gids(self, new_hrns):
"""
Install trusted gids from the specified interfaces.
"""
+ peer_gids = []
if not new_hrns:
- return
+ return peer_gids
trusted_certs_dir = self.api.config.get_trustedroots_dir()
for new_hrn in new_hrns:
# the gid for this interface should already be installed
continue
try:
# get gid from the registry
- interface = self.get_connections(self.interfaces[new_hrn])[new_hrn]
+ interface_info = self.interfaces[new_hrn]
+ interface = self[new_hrn]
trusted_gids = interface.get_trusted_certs()
- # default message
- message = "interface: %s\tunable to install trusted gid for %s" % \
- (self.api.interface, new_hrn)
if trusted_gids:
# the gid we want shoudl be the first one in the list,
# but lets make sure
for trusted_gid in trusted_gids:
+ # default message
+ message = "interface: %s\t" % (self.api.interface)
+ message += "unable to install trusted gid for %s" % \
+ (new_hrn)
gid = GID(string=trusted_gids[0])
+ peer_gids.append(gid)
if gid.get_hrn() == new_hrn:
gid_filename = os.path.join(trusted_certs_dir, '%s.gid' % new_hrn)
gid.save_to_file(gid_filename, save_parents=True)
message = "interface: %s\tinstalled trusted gid for %s" % \
(self.api.interface, new_hrn)
- # log the message
- self.api.logger.info(message)
+ # log the message
+ self.api.logger.info(message)
except:
message = "interface: %s\tunable to install trusted gid for %s" % \
(self.api.interface, new_hrn)
self.api.logger.info(message)
+ traceback.print_exc()
# reload the trusted certs list
self.api.auth.load_trusted_certs()
+ return peer_gids
- def update_db_records(self, type):
+ def update_db_records(self, type, gids):
"""
Make sure there is a record in the local db for allowed registries
defined in the config file (registries.xml). Removes old records from
the db.
"""
+ if not gids:
+ return
# get hrns we expect to find
# ignore records for local interfaces
ignore_interfaces = [self.api.config.SFA_INTERFACE_HRN]
- hrns_expected = [interface['hrn'] for interface in self.interfaces \
- if interface['hrn'] not in ignore_interfaces]
+ hrns_expected = [gid.get_hrn() for gid in gids \
+ if gid.get_hrn() not in ignore_interfaces]
# get hrns that actually exist in the db
table = SfaTable()
table.remove(record)
# add new records
- for hrn in hrns_expected:
+ for gid in gids:
+ hrn = gid.get_hrn()
if hrn not in hrns_found:
record = {
'hrn': hrn,
'type': type,
'pointer': -1,
'authority': get_authority(hrn),
+ 'gid': gid.save_to_string(save_parents=True),
}
record = SfaRecord(dict=record)
table.insert(record)
-
- def get_connections(self, interfaces):
+ def get_connections(self):
"""
read connection details for the trusted peer registries from file return
a dictionary of connections keyed on interface hrn.
"""
connections = {}
required_fields = self.default_fields.keys()
- if not isinstance(interfaces, list):
- interfaces = [interfaces]
- for interface in interfaces:
+ for interface in self.interfaces.values():
# make sure the required fields are present and not null
- for key in required_fields:
- if not interface.get(key):
- continue
+ if not all([interface.get(key) for key in required_fields]):
+ continue
+
hrn, address, port = interface['hrn'], interface['addr'], interface['port']
url = 'http://%(address)s:%(port)s' % locals()
# check which client we should use
default_dict = {'registries': {'registry': [Interfaces.default_fields]}}
def __init__(self, api, conf_file = "/etc/sfa/registries.xml"):
- Interfaces.__init__(self, api, conf_file, 'sa')
-
- def get_connections(self, interfaces):
- """
- read connection details for the trusted peer registries from file return
- a dictionary of connections keyed on interface hrn.
- """
- connections = Interfaces.get_connections(self, interfaces)
-
- # set up a connection to the local registry
+ Interfaces.__init__(self, api, conf_file)
address = self.api.config.SFA_REGISTRY_HOST
port = self.api.config.SFA_REGISTRY_PORT
url = 'http://%(address)s:%(port)s' % locals()
local_registry = {'hrn': self.api.hrn, 'addr': address, 'port': port}
- connections[self.api.hrn] = xmlrpcprotocol.get_server(url, self.api.key_file, self.api.cert_file)
- return connections
+ self.interfaces[self.api.hrn] = local_registry
+
+ # get connections
+ self.update(self.get_connections())
--- /dev/null
+#!/usr/bin/python
+
+#
+# SFA Certificate Signing and management
+#
+
+import os
+import sys
+from optparse import OptionParser
+from sfa.trust.certificate import Keypair, Certificate
+from sfa.trust.gid import GID, create_uuid
+from sfa.trust.hierarchy import Hierarchy
+from sfa.util.config import Config
+from collections import defaultdict
+
+def main():
+ args = sys.argv
+ script_name = args[0]
+ parser = OptionParser(usage="%(script_name)s [options]" % locals())
+ parser.add_option("-d", "--display", dest="display", default=None,
+ help="print contents of specified gid")
+ parser.add_option("-s", "--sign", dest="sign", default=None,
+ help="gid to sign" )
+ parser.add_option("-k", "--key", dest="key", default=None,
+ help="keyfile to use for signing")
+ parser.add_option("-a", "--authority", dest="authority", default=None,
+ help="sign the gid using the specified authority ")
+ parser.add_option("-i", "--import", dest="importgid", default=None,
+ help="gid file to import into the registry")
+ parser.add_option("-e", "--export", dest="export",
+ help="name of gid to export from registry")
+ parser.add_option("-o", "--outfile", dest="outfile",
+ help="where to write the exprted gid")
+ parser.add_option("-v", "--verbose", dest="verbose", default=False,
+ action="store_true", help="be verbose")
+
+ (options, args) = parser.parse_args()
+
+
+ if options.display:
+ display(options)
+ elif options.sign:
+ sign(options)
+ elif options.importgid:
+ import_gid(options)
+ elif options.export:
+ export_gid(options)
+ else:
+ parser.print_help()
+ sys.exit(1)
+
+
+def display(options):
+ """
+ Display the sepcified GID
+ """
+ gidfile = os.path.abspath(options.display)
+ if not gidfile or not os.path.isfile(gidfile):
+ print "No such gid: %s" % gidfile
+ sys.exit(1)
+ gid = GID(filename=gidfile)
+ gid.dump(dump_parents=True)
+
+def sign_gid(gid, parent_key, parent_gid):
+ gid.set_issuer(parent_key, parent_gid.get_hrn())
+ gid.set_parent(parent_gid)
+ gid.sign()
+ return gid
+
+def sign(options):
+ """
+ Sign the specified gid
+ """
+ hierarchy = Hierarchy()
+ config = Config()
+ default_authority = config.SFA_INTERFACE_HRN
+ auth_info = hierarchy.get_auth_info(default_authority)
+
+ # load the gid
+ gidfile = os.path.abspath(options.sign)
+ if not os.path.isfile(gidfile):
+ print "no such gid: %s" % gidfile
+ sys.exit(1)
+ gid = GID(filename=gidfile)
+
+ # remove previous parent
+ gid = GID(string=gid.save_to_string(save_parents=False))
+
+ # load the parent private info
+ authority = options.authority
+ # if no pkey was specified, then use the this authority's key
+ if not authority:
+ authority = default_authority
+
+ if not hierarchy.auth_exists(authority):
+ print "no such authority: %s" % authority
+
+ # load the parent gid and key
+ auth_info = hierarchy.get_auth_info(authority)
+ pkeyfile = auth_info.privkey_filename
+ parent_key = Keypair(filename=pkeyfile)
+ parent_gid = auth_info.gid_object
+
+ # get the outfile
+ outfile = options.outfile
+ if not outfile:
+ outfile = os.path.abspath('./signed-%s.gid' % gid.get_hrn())
+
+ # check if gid already has a parent
+
+ # sign the gid
+ if options.verbose:
+ print "Signing %s gid with parent %s" % \
+ (gid.get_hrn(), parent_gid.get_hrn())
+ gid = sign_gid(gid, parent_key, parent_gid)
+ # save the signed gid
+ if options.verbose:
+ print "Writing signed gid %s" % outfile
+ gid.save_to_file(outfile, save_parents=True)
+
+
+def export_gid(options):
+ from sfa.util.table import SfaTable
+ # lookup the record for the specified hrn
+ hrn = options.export
+
+ # check sfa table first
+ table = SfaTable()
+ records = table.find({'hrn': hrn, type: 'authority'})
+ if not records:
+ # check the authorities hierarchy
+ hierarchy = Hierarchy()
+ try:
+ auth_info = hierarchy.get_auth_info()
+ gid = auth_info.gid_object
+ except:
+ print "Record: %s not found" % hrn
+ sys.exit(1)
+ else:
+ record = records[0]
+ gid = GID(string=record['gid'])
+
+ # get the outfile
+ outfile = options.outfile
+ if not outfile:
+ outfile = os.path.abspath('./%s.gid' % gid.get_hrn())
+
+ # save it
+ if options.verbose:
+ print "Writing %s gid to %s" % (gid.get_hrn(), outfile)
+ gid.save_to_file(outfile, save_parents=True)
+
+def import_gid(options):
+ """
+ Import the specified gid into the registry (db and authorities
+ hierarchy) overwriting any previous gid.
+ """
+ from sfa.util.table import SfaTable
+ from sfa.util.record import SfaRecord
+ # load the gid
+ gidfile = os.path.abspath(options.importgid)
+ if not gidfile or not os.path.isfile(gidfile):
+ print "No such gid: %s" % gidfile
+ sys.exit(1)
+ gid = GID(filename=gidfile)
+
+ # check if it exists within the hierarchy
+ hierarchy = Hierarchy()
+ if not hierarchy.auth_exists(gid.get_hrn()):
+ print "%s not found in hierarchy" % gid.get_hrn()
+ sys.exit(1)
+
+ # check if record exists in db
+ table = SfaTable()
+ records = table.find({'hrn': gid.get_hrn(), 'type': 'authority'})
+ if not records:
+ print "%s not found in record database" % get.get_hrn()
+ sys.exit(1)
+
+ # update the database record
+ record = records[0]
+ record['gid'] = gid.save_to_string(save_parents=True)
+ table.update(record)
+ if options.verbose:
+ print "Imported %s gid into db" % record['hrn']
+
+ # update the hierarchy
+ auth_info = hierarchy.get_auth_info(gid.get_hrn())
+ filename = auth_info.gid_filename
+ gid.save_to_file(filename, save_parents=True)
+ if options.verbose:
+ print "Writing %s gid to %s" % (gid.get_hrn(), filename)
+
+ # re-sign all existing gids signed by this authority
+ # create a dictionary of records keyed on the record's authority
+ record_dict = defaultdict(list)
+ # only get regords that belong to this authority
+ # or any of its sub authorities
+ child_records = table.find({'hrn': '%s*' % gid.get_hrn()})
+ if not child_records:
+ return
+
+ for record in child_records:
+ record_dict[record['authority']].append(record)
+
+ # start with the authority we just imported
+ authorities = [gid.get_hrn()]
+ while authorities:
+ next_authorities = []
+ for authority in authorities:
+ # create a new signed gid for each record at this authority
+ # and update the registry
+ auth_info = hierarchy.get_auth_info(authority)
+ records = record_dict[authority]
+ for record in records:
+ record_gid = GID(string=record['gid'])
+ parent_pkey = Keypair(filename=auth_info.privkey_filename)
+ parent_gid = GID(filename=auth_info.gid_filename)
+ if options.verbose:
+ print "re-signing %s gid with parent %s" % \
+ (record['hrn'], parent_gid.get_hrn())
+ signed_gid = sign_gid(record_gid, parent_pkey, parent_gid)
+ record['gid'] = signed_gid.save_to_string(save_parents=True)
+ table.update(record)
+
+ # if this is an authority then update the hierarchy
+ if record['type'] == 'authority':
+ record_info = hierarchy.get_auth_info(record['hrn'])
+ if options.verbose:
+ print "Writing %s gid to %s" % (record['hrn'], record_info.gid_filename)
+ signed_gid.save_to_file(filename=record_info.gid_filename, save_parents=True)
+
+ # update list of next authorities
+ tmp_authorities = set([record['hrn'] for record in records \
+ if record['type'] == 'authority'])
+ next_authorities.extend(tmp_authorities)
+
+ # move on to next set of authorities
+ authorities = next_authorities
+
+if __name__ == '__main__':
+ main()
manager.init_server()
-def sync_interfaces():
+def sync_interfaces(server_key_file, server_cert_file):
"""
Attempt to install missing trusted gids and db records for
our federated interfaces
"""
- api = SfaAPI()
+ api = SfaAPI(key_file = server_key_file, cert_file = server_cert_file)
registries = Registries(api)
aggregates = Aggregates(api)
registries.sync_interfaces()
help="Run as daemon.", default=False)
(options, args) = parser.parse_args()
- if (options.daemon): daemon()
config = Config()
hierarchy = Hierarchy()
init_server_key(server_key_file, server_cert_file, config, hierarchy)
init_server(options, config)
- sync_interfaces()
+ sync_interfaces(server_key_file, server_cert_file)
+ if (options.daemon): daemon()
# start registry server
if (options.registry):
from sfa.server.registry import Registry
def load_trusted_certs(self):
self.trusted_cert_list = TrustedRootList(self.config.get_trustedroots_dir()).get_list()
- def check(self, cred, operation):
+ def check(self, cred, operation, hrn = None):
"""
Check the credential against the peer cert (callerGID included
in the credential matches the caller that is connected to the
else:
raise MissingTrustedRoots(self.config.get_trustedroots_dir())
+ # Make sure the credential's target matches the specified hrn.
+ # This check does not apply to trusted peers
+ trusted_peers = [gid.get_hrn() for gid in self.trusted_cert_list]
+ if hrn and self.client_gid.get_hrn() not in trusted_peers:
+ if not hrn == self.object_gid.get_hrn():
+ raise PermissionError("Target hrn: %s doesn't match specified hrn: %s " % \
+ (self.object_gid.get_hrn(), hrn) )
return True
def check_ticket(self, ticket):
self.issuerReq = req
if cert:
# if a cert was supplied, then get the subject from the cert
- subject = cert.cert.get_issuer()
+ subject = cert.cert.get_subject()
assert(subject)
self.issuerSubject = subject
#print "TRUSTED CERT", trusted_cert.dump()
#print "Client is signed by Trusted?", self.is_signed_by_cert(trusted_cert)
if self.is_signed_by_cert(trusted_cert):
+ # make sure sure the trusted cert's hrn is a prefix of the
+ # signed cert's hrn
+ if not self.get_subject().startswith(trusted_cert.get_subject()):
+ raise GidParentHrn(trusted_cert.get_subject())
#print self.get_subject(), "is signed by a root"
return
if self.parent and dump_parents:
print " "*indent, "parent:"
- self.parent.dump(indent+4)
+ self.parent.dump(indent+4, dump_parents)
##
# Verify the chain of authenticity of the GID. First perform the checks
class BaseAPI:
- def __init__(self, config = "/etc/sfa/sfa_config.py", encoding = "utf-8", methods='sfa.methods',
-
- peer_cert = None, interface = None, key_file = None, cert_file = None):
+ cache = None
+
+ def __init__(self, config = "/etc/sfa/sfa_config.py", encoding = "utf-8",
+ methods='sfa.methods', peer_cert = None, interface = None,
+ key_file = None, cert_file = None, cache = cache):
self.encoding = encoding
# flat list of method names
-
self.methods_module = methods_module = __import__(methods, fromlist=[methods])
self.methods = methods_module.all
self.key = Keypair(filename=self.key_file)
self.cert_file = cert_file
self.cert = Certificate(filename=self.cert_file)
+ self.cache = cache
self.credential = None
self.source = None
self.time_format = "%Y-%m-%d %H:%M:%S"
--- /dev/null
+#
+# This module implements general purpose caching system
+#
+from __future__ import with_statement
+import time
+import threading
+from datetime import datetime
+
+# maximum lifetime of cached data (in seconds)
+MAX_CACHE_TTL = 60 * 60
+
+class CacheData:
+
+ data = None
+ created = None
+ expires = None
+ lock = None
+
+ def __init__(self, data, ttl = MAX_CACHE_TTL):
+ self.lock = threading.RLock()
+ self.data = data
+ self.renew(ttl)
+
+ def is_expired(self):
+ return time.time() > self.expires
+
+ def get_created_date(self):
+ return str(datetime.fromtimestamp(self.created))
+
+ def get_expires_date(self):
+ return str(datetime.fromtimestamp(self.expires))
+
+ def renew(self, ttl = MAX_CACHE_TTL):
+ self.created = time.time()
+ self.expires = self.created + ttl
+
+ def set_data(self, data, renew=True, ttl = MAX_CACHE_TTL):
+ with self.lock:
+ self.data = data
+ if renew:
+ self.renew(ttl)
+
+ def get_data(self):
+ return self.data
+
+class Cache:
+
+ cache = {}
+ lock = threading.RLock()
+
+ def add(self, key, value, ttl = MAX_CACHE_TTL):
+ with self.lock:
+ if self.cache.has_key(key):
+ self.cache[key].set_data(value, ttl=ttl)
+ else:
+ self.cache[key] = CacheData(value, ttl)
+
+ def get(self, key):
+ data = self.cache.get(key)
+ if not data or data.is_expired():
+ return None
+ return data.get_data()
'first_name': Parameter(str, 'First name'),
'last_name': Parameter(str, 'Last name'),
'phone': Parameter(str, 'Phone Number'),
- 'key': Parameter(str, 'Public key'),
+ 'keys': Parameter(str, 'Public key'),
'slices': Parameter([str], 'List of slices this user belongs to'),
}
fields.update(SfaRecord.fields)
from sfa.trust.certificate import Keypair, Certificate
from sfa.trust.credential import *
from sfa.util.faults import *
-from sfa.plc.api import SfaAPI
+from sfa.plc.api import SfaAPI
+from sfa.util.cache import Cache
from sfa.util.debug import log
##
def do_POST(self):
"""Handles the HTTPS POST request.
- It was copied out from SimpleXMLRPCServer.py and modified to shutdown the socket cleanly.
+ It was copied out from SimpleXMLRPCServer.py and modified to shutdown
+ the socket cleanly.
"""
try:
peer_cert = Certificate()
self.api = SfaAPI(peer_cert = peer_cert,
interface = self.server.interface,
key_file = self.server.key_file,
- cert_file = self.server.cert_file)
+ cert_file = self.server.cert_file,
+ cache = self.cache)
# get arguments
request = self.rfile.read(int(self.headers["content-length"]))
remote_addr = (remote_ip, remote_port) = self.connection.getpeername()
self.interface = None
self.key_file = key_file
self.cert_file = cert_file
+ # add cache to the request handler
+ HandlerClass.cache = Cache()
#for compatibility with python 2.4 (centos53)
if sys.version_info < (2, 5):
SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self)
self.db.do(querystr)
for index in indexes:
self.db.do(index)
+
self.db.commit()
-
+
def remove(self, record):
- query_str = "DELETE FROM %s WHERE record_id = %s" % \
- (self.tablename, record['record_id'])
- self.db.do(query_str)
+ params = {'record_id': record['record_id']}
+ template = "DELETE FROM %s " % self.tablename
+ sql = template + "WHERE record_id = %(record_id)s"
+ self.db.do(sql, params)
# if this is a site, remove all records where 'authority' == the
# site's hrn
- if record['type'] == 'site':
- sql = " DELETE FROM %s WHERE authority = %s" % \
- (self.tablename, record['hrn'])
- self.db.do(sql)
- self.db.commit()
+ if record['type'] == 'authority':
+ params = {'authority': record['hrn']}
+ sql = template + "WHERE authority = %(authority)s"
+ self.db.do(sql, params)
+ self.db.commit()
def insert(self, record):
db_fields = self.db_fields(record)
def drop(self):
try:
self.db.do('DROP TABLE IF EXISTS ' + self.tablename)
+ self.db.commit()
except:
try:
self.db.do('DROP TABLE ' + self.tablename)
+ self.db.commit()
except:
pass
parser = xmlrpclib.ExpatParser(unmarshaller)
return parser, unmarshaller
-def get_server(url, key_file, cert_file):
+def get_server(url, key_file, cert_file, debug=False):
transport = XMLRPCTransport()
transport.key_file = key_file
transport.cert_file = cert_file
- return xmlrpclib.ServerProxy(url, transport, allow_none=True)
+ return xmlrpclib.ServerProxy(url, transport, allow_none=True, verbose=debug)
-Metadata-Version: 1.0\r
-Name: xmlbuilder\r
-Version: 0.9\r
-Summary: Pythonic way to create xml files\r
-Home-page: http://pypi.python.org/pypi/xmlbuilder\r
-Author: koder\r
-Author-email: koder_dot_mail@gmail_dot_com\r
-License: MIT\r
-Download-URL: http://pypi.python.org/pypi/xmlbuilder\r
-Description: Example of usage:\r
- -----------------\r
- \r
- \r
- from __future__ import with_statement\r
- from xmlbuilder import XMLBuilder\r
- x = XMLBuilder(format=True)\r
- with x.root(a = 1):\r
- with x.data:\r
- [x << ('node',{'val':i}) for i in range(10)]\r
- \r
- print str(x)\r
- \r
- will print\r
- \r
- <root a="1">\r
- <data>\r
- <node val="0" />\r
- <node val="1" />\r
- <node val="2" />\r
- <node val="3" />\r
- <node val="4" />\r
- <node val="5" />\r
- <node val="6" />\r
- <node val="7" />\r
- <node val="8" />\r
- <node val="9" />\r
- </data>\r
- </root>\r
- \r
- Mercurial repo:http://hg.assembla.com/MyPackages/\r
- \r
- Documentations\r
- --------------\r
- `XMLBuilder` is simple library build on top of `ElementTree.TreeBuilder` to\r
- simplify xml files creation as much as possible. Althow it can produce\r
- structured result with identated child tags. `XMLBuilder` use python `with`\r
- statement to define xml tag levels and `<<` operator for simple cases -\r
- text and tag without childs.\r
- \r
- First we need to create xmlbuilder\r
- \r
- from xmlbuilder import XMLBuilder\r
- # params - encoding = 'utf8',\r
- # builder = None, - ElementTree.TreeBuilder\r
- # tab_level = None, - current tab l;evel - for formatted output only\r
- # format = False, - create formatted output\r
- # tab_step = " " * 4 - indentation step\r
- xml = XMLBuilder()\r
- \r
- \r
- Use `with` statement to make document structure\r
- #create and open tag 'root_tag' with text 'text' and attributes\r
- with xml.root_tag(text,attr1=val1,attr2=val2):\r
- #create and open tag 'sub_tag'\r
- with xml.sub_tag(text,attr3=val3):\r
- #create tag which are not valid python identificator\r
- with xml('one-more-sub-tag',attr7=val37):\r
- xml << "Some textual data"\r
- #here tag 'one-more-sub-tag' are closed\r
- #Tags without children can be created using `<<` operator\r
- for val in range(15):\r
- xml << ('message',"python rocks!"[:i])\r
- #create 15 child tag like <message> python r</message>\r
- #all tags closed\r
- node = ~x # get etree.ElementTree object\r
- xml_data = str(x)\r
- unicode_xml_data = unicode(x)\r
- \r
-Keywords: xml\r
-Platform: UNKNOWN\r
+Metadata-Version: 1.0
+Name: xmlbuilder
+Version: 0.9
+Summary: Pythonic way to create xml files
+Home-page: http://pypi.python.org/pypi/xmlbuilder
+Author: koder
+Author-email: koder_dot_mail@gmail_dot_com
+License: MIT
+Download-URL: http://pypi.python.org/pypi/xmlbuilder
+Description: Example of usage:
+ -----------------
+
+
+ from __future__ import with_statement
+ from xmlbuilder import XMLBuilder
+ x = XMLBuilder(format=True)
+ with x.root(a = 1):
+ with x.data:
+ [x << ('node',{'val':i}) for i in range(10)]
+
+ print str(x)
+
+ will print
+
+ <root a="1">
+ <data>
+ <node val="0" />
+ <node val="1" />
+ <node val="2" />
+ <node val="3" />
+ <node val="4" />
+ <node val="5" />
+ <node val="6" />
+ <node val="7" />
+ <node val="8" />
+ <node val="9" />
+ </data>
+ </root>
+
+ Mercurial repo:http://hg.assembla.com/MyPackages/
+
+ Documentations
+ --------------
+ `XMLBuilder` is simple library build on top of `ElementTree.TreeBuilder` to
+ simplify xml files creation as much as possible. Althow it can produce
+ structured result with identated child tags. `XMLBuilder` use python `with`
+ statement to define xml tag levels and `<<` operator for simple cases -
+ text and tag without childs.
+
+ First we need to create xmlbuilder
+
+ from xmlbuilder import XMLBuilder
+ # params - encoding = 'utf8',
+ # builder = None, - ElementTree.TreeBuilder
+ # tab_level = None, - current tab l;evel - for formatted output only
+ # format = False, - create formatted output
+ # tab_step = " " * 4 - indentation step
+ xml = XMLBuilder()
+
+
+ Use `with` statement to make document structure
+ #create and open tag 'root_tag' with text 'text' and attributes
+ with xml.root_tag(text,attr1=val1,attr2=val2):
+ #create and open tag 'sub_tag'
+ with xml.sub_tag(text,attr3=val3):
+ #create tag which are not valid python identificator
+ with xml('one-more-sub-tag',attr7=val37):
+ xml << "Some textual data"
+ #here tag 'one-more-sub-tag' are closed
+ #Tags without children can be created using `<<` operator
+ for val in range(15):
+ xml << ('message',"python rocks!"[:i])
+ #create 15 child tag like <message> python r</message>
+ #all tags closed
+ node = ~x # get etree.ElementTree object
+ xml_data = str(x)
+ unicode_xml_data = unicode(x)
+
+Keywords: xml
+Platform: UNKNOWN
LICENSE
MANIFEST.in
+PKG-INFO
README.txt
+setup.cfg
setup.py
xmlbuilder/__init__.py
xmlbuilder.egg-info/PKG-INFO