TAGS
*~
/sfa/util/version.py
-*.version
+wsdl/*.wsdl
+wsdl/*.xml
+wsdl/[ars]*.html
+sfa/client/*.version
*.png
*.svg
*.out
+*.pdf
+*.pkey
+*.cert
+*.cred
##########
tags:
- find . -type f | egrep -v '/\.git/|/\.svn/|TAGS|\.py[co]$$|\.doc$$|\.html$$|\.pdf$$|~$$|\.png$$|\.svg$$|\.out$$' | xargs etags
+ find . -type f | egrep -v '/\.git/|/\.svn/|TAGS|\.py[co]$$|\.doc$$|\.html$$|\.pdf$$|~$$|\.png$$|\.svg$$|\.out$$|\.bak$$|\.xml$$' | xargs etags
.PHONY: tags
signatures:
SSHCOMMAND:=ssh root@$(PLC)
endif
-LOCAL_RSYNC_EXCLUDES := --exclude '*.pyc'
+LOCAL_RSYNC_EXCLUDES += --exclude '*.pyc'
+LOCAL_RSYNC_EXCLUDES += --exclude '*.png' --exclude '*.svg' --exclude '*.out'
RSYNC_EXCLUDES := --exclude .svn --exclude .git --exclude '*~' --exclude TAGS $(LOCAL_RSYNC_EXCLUDES)
RSYNC_COND_DRY_RUN := $(if $(findstring n,$(MAKEFLAGS)),--dry-run,)
RSYNC := rsync -a -v $(RSYNC_COND_DRY_RUN) --no-owner $(RSYNC_EXCLUDES)
+CLIENTS = sfi.py getNodes.py getRecord.py setRecord.py \
+sfiAddAttribute.py sfiAddSliver.py sfiDeleteAttribute.py sfiDeleteSliver.py sfiListNodes.py \
+sfiListSlivers.py sfadump.py
+
BINS = ./config/sfa-config-tty ./config/gen-sfa-cm-config.py \
./sfa/plc/sfa-import-plc.py ./sfa/plc/sfa-nuke-plc.py ./sfa/server/sfa-server.py \
- ./sfa/client/sfi.py ./sfa/client/getNodes.py ./sfa/client/getRecord.py \
- ./sfa/client/setRecord.py ./sfa/client/sfadump.py
+ $(foreach client,$(CLIENTS),./sfa/client/$(client))
sync:
ifeq (,$(SSHURL))
def save_key_bio(self, bio, *args, **kw):
return self.save_pub_key_bio(bio)
-def rsa_new_pub_key((e, n)):
+def rsa_new_pub_key(couple):
+ (e,n)=couple
rsa = m2.rsa_new()
m2.rsa_set_e(rsa, e)
m2.rsa_set_n(rsa, n)
dsa.save_pub_key(fout)
# FIXME: This is wrong.
# M2Crypto doesn't allow us to set the public key parameter
- raise(Exception, "DSA keys are not supported yet: M2Crypto doesn't allow us to set the public key parameter")
+ raise Exception("DSA keys are not supported yet: M2Crypto doesn't allow us to set the public key parameter")
if __name__ == "__main__":
'sfa/util',
'sfa/managers',
'sfa/rspecs',
- 'sfa/rspecs/aggregates',
'sfatables',
'sfatables/commands',
'sfatables/processors',
%define name sfa
%define version 1.0
-%define taglevel 20
+%define taglevel 21
%define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}}
%global python_sitearch %( python -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)" )
Group: Applications/System
Requires: sfa
-%Package tests
+%package tests
Summary: unit tests suite for SFA
Group: Applications/System
Requires: sfa
%changelog
+* Wed Mar 16 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-1.0-21
+- stable sfascan
+- fix in initscript, *ENABLED tags in config now taken into account
+
* Fri Mar 11 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-1.0-20
- some commits had not been pushed in tag 19
cache and use in more general ways.
%changelog
+* Wed Mar 16 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-1.0-21
+- stable sfascan
+- fix in initscript, *ENABLED tags in config now taken into account
+
* Fri Mar 11 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-1.0-20
- some commits had not been pushed in tag 19
# recompute the SFA graphs from different locations
+# AMs, at least MyPLC AMs, are boring
#BUNDLES += http://planet-lab.eu:12346/@auto-ple-am
BUNDLES += http://planet-lab.eu:12345/@auto-ple-reg
BUNDLES += http://planet-lab.eu:12347/@auto-ple-sa
BUNDLES += http://planet-lab.org:12347/@auto-plc-sa
BUNDLES += http://planet-lab.kr:12345/@auto-ppk-reg
BUNDLES += http://planet-lab.kr:12347/@auto-ppk-sa
-BUNDLES += http://www.emulab.net:443/protogeni/xmlrpc/am/@auto-emulab-am
BUNDLES += http://geni-myvini.umkc.gpeni.net:12345/@auto-gpeni-reg
BUNDLES += http://geni-myvini.umkc.gpeni.net:12347/@auto-gpeni-sa
BUNDLES += http://198.248.241.100:12345/@auto-glc-reg
BUNDLES += http://www.emanicslab.org:12345/@auto-elc-reg
BUNDLES += http://www.emanicslab.org:12347/@auto-elc-sa
-# dont output svg yet
-#EXTENSIONS := png svg
-EXTENSIONS := png
+# left to right
+BUNDLES-LR += http://www.emulab.net:443/protogeni/xmlrpc/am/@auto-emulab-am
+BUNDLES-LR += http://planet-lab.eu:12345/@auto-ple-reg
+BUNDLES-LR += http://planet-lab.eu:12347/@auto-ple-sa
+BUNDLES-LR += http://planet-lab.org:12345/@auto-plc-reg
+BUNDLES-LR += http://planet-lab.org:12347/@auto-plc-sa
+BUNDLES-LR += http://planet-lab.kr:12345/@auto-ppk-reg
+#BUNDLES-LR += http://planet-lab.kr:12347/@auto-ppk-sa
+BUNDLES-LR += http://geni-myvini.umkc.gpeni.net:12345/@auto-gpeni-reg
+BUNDLES-LR += http://geni-myvini.umkc.gpeni.net:12347/@auto-gpeni-sa
+BUNDLES-LR += http://198.248.241.100:12345/@auto-glc-reg
+BUNDLES-LR += http://198.248.241.100:12347/@auto-glc-sa
+BUNDLES-LR += http://vini-veritas.net:12345/@auto-vini-reg
+BUNDLES-LR += http://vini-veritas.net:12347/@auto-vini-sa
+BUNDLES-LR += http://www.planet-lab.jp:12345/@auto-plj-reg
+BUNDLES-LR += http://www.planet-lab.jp:12347/@auto-plj-sa
+BUNDLES-LR += http://www.emanicslab.org:12345/@auto-elc-reg
+BUNDLES-LR += http://www.emanicslab.org:12347/@auto-elc-sa
+EXTENSIONS := png svg
+
+####################
+ALL += $(foreach bundle,$(BUNDLES),$(word 2,$(subst @, ,$(bundle))))
+ALL += $(foreach bundle,$(BUNDLES-LR),$(word 2,$(subst @, ,$(bundle)))-lr)
+
+all: $(ALL)
+
+####################
define bundle_scan_target
$(word 2,$(subst @, ,$(1))):
./sfascan.py $(foreach extension,$(EXTENSIONS),-o $(word 2,$(subst @, ,$(1))).$(extension)) $(word 1,$(subst @, ,$(1))) >& $(word 2,$(subst @, ,$(1))).out
.PHONY: $(word 2,$(subst @, ,$(1)))
endef
-ALL := $(foreach bundle,$(BUNDLES),$(word 2,$(subst @, ,$(bundle))))
+# the actual targets
+$(foreach bundle,$(BUNDLES),$(eval $(call bundle_scan_target,$(bundle))))
-all: $(ALL)
+#################### same but left-to-right
+define bundle_scan_target_lr
+$(word 2,$(subst @, ,$(1)))-lr:
+ ./sfascan.py -l $(foreach extension,$(EXTENSIONS),-o $(word 2,$(subst @, ,$(1)))-lr.$(extension)) $(word 1,$(subst @, ,$(1))) >& $(word 2,$(subst @, ,$(1)))-lr.out
+.PHONY: $(word 2,$(subst @, ,$(1)))-lr
+endef
# the actual targets
-$(foreach bundle,$(BUNDLES),$(eval $(call bundle_scan_target,$(bundle))))
+$(foreach bundle,$(BUNDLES-LR),$(eval $(call bundle_scan_target_lr,$(bundle))))
####################
+versions: $(VERSIONS)
+clean-versions:
+ rm -f $(VERSIONS)
+#################### make name.version
define bundle_version_target
$(word 2,$(subst @, ,$(1))).version:
-./sfi.py -s $(word 1,$(subst @, ,$(1))) version >& $$@
VERSIONS := $(foreach bundle,$(BUNDLES),$(word 2,$(subst @, ,$(bundle))).version)
-versions: $(VERSIONS)
-clean-versions:
- rm -f $(VERSIONS)
-
# the actual targets
$(foreach bundle,$(BUNDLES),$(eval $(call bundle_version_target,$(bundle))))
+####################
clean:
rm -f auto-*.{out,version}
$(foreach extension,$(EXTENSIONS),rm -rf auto-*.$(extension);)
+PUBEXTENSIONS=png
+publish:
+ $(foreach extension,$(PUBEXTENSIONS),rsync -av auto-*.$(extension) tparment@srv-planete.inria.fr:/proj/planete/www/Thierry.Parmentelat/sfascan/ ;)
+
#################### convenience, for debugging only
# make +foo : prints the value of $(foo)
# make ++foo : idem but verbose, i.e. foo=$(foo)
if not isinstance(rdict, dict):
raise "%s not a dict" % rdict
- for (key, value) in rdict.items():
+ for (key, value) in rdict.iteritems():
if isinstance(value, StringTypes):
if (attributes and key in attributes) or not attributes:
print tab * counter + "%s: %s" % (key, value)
import sys
import socket
-import re
+import traceback
+from urlparse import urlparse
import pygraphviz
from sfa.util.sfalogging import sfa_logger,sfa_logger_goes_to_console
import sfa.util.xmlrpcprotocol as xmlrpcprotocol
-m_url_with_proto=re.compile("\w+://(?P<hostname>[\w\-\.]+):(?P<port>[0-9]+).*")
-m_url_without_proto=re.compile("(?P<hostname>[\w\-\.]+):(?P<port>[0-9]+).*")
-def url_to_hostname_port (url):
- match=m_url_with_proto.match(url)
- if match:
- return (match.group('hostname'),match.group('port'))
- match=m_url_without_proto.match(url)
- if match:
- return (match.group('hostname'),match.group('port'))
- return ('undefined','???')
+def url_hostname_port (url):
+ if url.find("://")<0:
+ url="http://"+url
+ parsed_url=urlparse(url)
+ # 0(scheme) returns protocol
+ default_port='80'
+ if parsed_url[0]=='https': default_port='443'
+ # 1(netloc) returns the hostname+port part
+ parts=parsed_url[1].split(":")
+ # just a hostname
+ if len(parts)==1:
+ return (url,parts[0],default_port)
+ else:
+ return (url,parts[0],parts[1])
###
class Interface:
def __init__ (self,url):
self._url=url
try:
- (self.hostname,self.port)=url_to_hostname_port(url)
+ (self._url,self.hostname,self.port)=url_hostname_port(url)
self.ip=socket.gethostbyname(self.hostname)
self.probed=False
except:
- import traceback
- traceback.print_exc()
+# traceback.print_exc()
self.hostname="unknown"
self.ip='0.0.0.0'
self.port="???"
+ # don't really try it
self.probed=True
self._version={}
def url(self):
-# return "http://%s:%s/"%(self.hostname,self.port)
return self._url
# this is used as a key for creating graph nodes and to avoid duplicates
client.read_config()
key_file = client.get_key_file()
cert_file = client.get_cert_file(key_file)
- url="http://%s:%s/"%(self.hostname,self.port)
+ url=self.url()
sfa_logger().info('issuing get version at %s'%url)
server=xmlrpcprotocol.get_server(url, key_file, cert_file, options)
self._version=server.GetVersion()
except:
+# traceback.print_exc()
self._version={}
self.probed=True
return self._version
@staticmethod
def multi_lines_label(*lines):
- return '<<TABLE BORDER="0" CELLBORDER="0"><TR><TD>' + \
+ result='<<TABLE BORDER="0" CELLBORDER="0"><TR><TD>' + \
'</TD></TR><TR><TD>'.join(lines) + \
'</TD></TR></TABLE>>'
+# print 'multilines=',result
+ return result
# default is for when we can't determine the type of the service
# typically the server is down, or we can't authenticate, or it's too old code
shapes = {"registry": "diamond", "slicemgr":"ellipse", "aggregate":"box", 'default':'plaintext'}
- abbrevs = {"registry": "REG", "slicemgr":"SA", "aggregate":"AM", 'default':'[unknown]>'}
+ abbrevs = {"registry": "REG", "slicemgr":"SA", "aggregate":"AM", 'default':'[unknown interface]'}
# return a dictionary that translates into the node's attr
def get_layout (self):
else:
label=''
try: abbrev=Interface.abbrevs[version['interface']]
- except: abbrev=['default']
+ except: abbrev=Interface.abbrevs['default']
label += abbrev
if 'hrn' in version: label += " %s"%version['hrn']
else: label += "[no hrn]"
except: shape=Interface.shapes['default']
layout['shape']=shape
### fill color to outline wrongly configured bodies
- print 'Version for %s'%self.url(),version
- if 'sfa' not in version:
+ if 'geni_api' not in version and 'sfa' not in version:
layout['style']='filled'
layout['fillcolor']='gray'
return layout
class SfaScan:
# provide the entry points (a list of interfaces)
- def __init__ (self):
- pass
+ def __init__ (self, left_to_right=False, verbose=False):
+ self.verbose=verbose
+ self.left_to_right=left_to_right
def graph (self,entry_points):
graph=pygraphviz.AGraph(directed=True)
+ if self.left_to_right:
+ graph.graph_attr['rankdir']='LR'
self.scan(entry_points,graph)
return graph
for interface in to_scan:
# performing xmlrpc call
version=interface.get_version()
- # 'sfa' is expected if the call succeeded at all
+ if self.verbose:
+ sfa_logger().info("GetVersion at interface %s"%interface.url())
+ if not version:
+ sfa_logger().info("<EMPTY GetVersion(); offline or cannot authenticate>")
+ else:
+ for (k,v) in version.iteritems():
+ if not isinstance(v,dict):
+ sfa_logger().info("\r\t%s:%s"%(k,v))
+ else:
+ sfa_logger().info(k)
+ for (k1,v1) in v.iteritems():
+ sfa_logger().info("\r\t\t%s:%s"%(k1,v1))
+ # 'geni_api' is expected if the call succeeded at all
# 'peers' is needed as well as AMs typically don't have peers
- if 'sfa' in version and 'peers' in version:
+ if 'geni_api' in version and 'peers' in version:
# proceed with neighbours
- for (next_name,next_url) in version['peers'].items():
+ for (next_name,next_url) in version['peers'].iteritems():
next_interface=Interface(next_url)
# locate or create node in graph
try:
for node in graph.nodes():
interface=node2interface.get(node,None)
if interface:
- for (k,v) in interface.get_layout().items():
+ for (k,v) in interface.get_layout().iteritems():
node.attr[k]=v
else:
sfa_logger().error("MISSED interface with node %s"%node)
parser=OptionParser(usage=usage)
parser.add_option("-o","--output",action='append',dest='outfiles',default=[],
help="output filenames (cumulative) - defaults are %r"%default_outfiles)
+ parser.add_option("-l","--left-to-right",action="store_true",dest="left_to_right",default=False,
+ help="instead of top-to-bottom")
+ parser.add_option("-v","--verbose",action='store_true',dest='verbose',default=False,
+ help="verbose")
(options,args)=parser.parse_args()
if not args:
parser.print_help()
sys.exit(1)
if not options.outfiles:
options.outfiles=default_outfiles
- scanner=SfaScan()
+ scanner=SfaScan(left_to_right=options.left_to_right, verbose=options.verbose)
entries = [ Interface(entry) for entry in args ]
g=scanner.graph(entries)
sfa_logger().info("creating layout")
from sfa.util.sfaticket import SfaTicket
from sfa.util.record import SfaRecord, UserRecord, SliceRecord, NodeRecord, AuthorityRecord
from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
-from sfa.util.xmlrpcprotocol import ServerException
import sfa.util.xmlrpcprotocol as xmlrpcprotocol
from sfa.util.config import Config
from sfa.util.version import version_core
return record
+import uuid
+def unique_call_id(): return uuid.uuid4().urn
class Sfi:
elif record['type'] in ["slice"]:
try:
cred = self.get_slice_cred(record.get_name()).save_to_string(save_parents=True)
- except ServerException, e:
+ except xmlrpcprotocol.ServerException, e:
# XXX smbaker -- once we have better error return codes, update this
# to do something better than a string compare
if "Permission error" in e.args[0]:
else:
server = self.get_server_from_opts(opts)
version=server.GetVersion()
- for (k,v) in version.items():
+ for (k,v) in version.iteritems():
print "%-20s: %s"%(k,v)
# list instantiated slices
delegated_cred = self.delegate_cred(user_cred, get_authority(self.authority))
creds.append(delegated_cred)
server = self.get_server_from_opts(opts)
- results = server.ListSlices(creds)
+ results = server.ListSlices(creds, unique_call_id())
display_list(results)
return
if opts.delegate:
delegated_cred = self.delegate_cred(cred, get_authority(self.authority))
creds.append(delegated_cred)
- result = server.ListResources(creds, call_options)
+ result = server.ListResources(creds, call_options,unique_call_id())
format = opts.format
if opts.file is None:
display_rspec(result, format)
rspec_file = self.get_rspec_file(args[1])
rspec = open(rspec_file).read()
server = self.get_server_from_opts(opts)
- result = server.CreateSliver(slice_urn, creds, rspec, [])
+ result = server.CreateSliver(slice_urn, creds, rspec, [], unique_call_id())
print result
return result
delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
creds.append(delegated_cred)
server = self.get_server_from_opts(opts)
- return server.DeleteSliver(slice_urn, creds)
+ return server.DeleteSliver(slice_urn, creds, unique_call_id())
# start named slice
def start(self, opts, args):
delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
creds.append(delegated_cred)
time = args[1]
- return server.RenewSliver(slice_urn, creds, time)
+ return server.RenewSliver(slice_urn, creds, time, unique_call_id())
def status(self, opts, args):
delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
creds.append(delegated_cred)
server = self.get_server_from_opts(opts)
- print server.SliverStatus(slice_urn, creds)
+ print server.SliverStatus(slice_urn, creds, unique_call_id())
def shutdown(self, opts, args):
#
# sfa Wraps PLCAPI into the SFA compliant API
#
-# chkconfig: 2345 5 99
+# hopefully right after plc
+# chkconfig: 2345 61 39
#
# description: Wraps PLCAPI into the SFA compliant API
#
-# $Id$
-# $URL$
-#
# Source config
[ -f /etc/sfa/sfa_config ] && . /etc/sfa/sfa_config
reload
- if [ "$SFA_REGISTRY_ENABLED" ]; then
+ if [ "$SFA_REGISTRY_ENABLED" -eq 1 ]; then
action $"SFA Registry" daemon /usr/bin/sfa-server.py -r -d $OPTIONS
fi
- if [ "$SFA_AGGREGATE_ENABLED" ]; then
+ if [ "$SFA_AGGREGATE_ENABLED" -eq 1 ]; then
action $"SFA Aggregate" daemon /usr/bin/sfa-server.py -a -d $OPTIONS
fi
- if [ "$SFA_SM_ENABLED" ]; then
+ if [ "$SFA_SM_ENABLED" -eq 1 ]; then
action "SFA SliceMgr" daemon /usr/bin/sfa-server.py -s -d $OPTIONS
fi
- if [ "$SFA_FLASHPOLICY_ENABLED" ]; then
+ if [ "$SFA_FLASHPOLICY_ENABLED" -eq 1 ]; then
action "Flash Policy Server" daemon /usr/bin/sfa_flashpolicy.py --file="$SFA_FLASHPOLICY_CONFIG_FILE" --port=$SFA_FLASHPOLICY_PORT -d
fi
from sfa.trust.credential import Credential
from sfa.plc.api import SfaAPI
from sfa.util.plxrn import hrn_to_pl_slicename, slicename_to_hrn
+from sfa.util.callids import Callids
##
# The data structure used to represent a cloud.
return clusterList
-def get_rspec(api, creds, options):
+def ListResources(api, creds, options, call_id):
+ if Callids().already_handled(call_id): return ""
global cloud
# get slice's hrn from options
xrn = options.get('geni_slice_urn', '')
"""
Hook called via 'sfi.py create'
"""
-def create_slice(api, xrn, creds, xml, users):
+def CreateSliver(api, xrn, creds, xml, users, call_id):
+ if Callids().already_handled(call_id): return ""
+
global cloud
hrn = urn_to_hrn(xrn)[0]
conn = getEucaConnection()
if not conn:
print >>sys.stderr, 'Error: Cannot create a connection to Eucalyptus'
- return False
+ return ""
# Validate RSpec
schemaXML = ET.parse(EUCALYPTUS_RSPEC_SCHEMA)
inst_type = instType)
eucaInst.reserveInstance(conn, pubKeys)
- return True
+ # xxx - should return altered rspec
+ # with enough data for the client to understand what's happened
+ return xml
def main():
init_server()
#theRSpec = None
#with open(sys.argv[1]) as xml:
# theRSpec = xml.read()
- #create_slice(None, 'planetcloud.pc.test', theRSpec)
+ #CreateSliver(None, 'planetcloud.pc.test', theRSpec, 'call-id-cloudtest')
- #rspec = get_rspec('euca', 'planetcloud.pc.test', 'planetcloud.pc.marcoy')
+ #rspec = ListResources('euca', 'planetcloud.pc.test', 'planetcloud.pc.marcoy', 'test_euca')
#print rspec
print getKeysForSlice('gc.gc.test1')
#!/usr/bin/python
-from sfa.util.rspec import RSpec
import sys
import pdb
+import xml.dom.minidom
+
+from sfa.util.rspec import RSpec
from sfa.util.xrn import urn_to_hrn, hrn_to_urn, get_authority
from sfa.util.plxrn import hrn_to_pl_slicename
from sfa.util.plxrn import hrn_to_pl_slicename
from sfa.server.aggregate import Aggregates
from sfa.server.registry import Registries
from sfa.util.faults import *
+from sfa.util.callids import Callids
-import xml.dom.minidom
SFA_MAX_CONF_FILE = '/etc/sfa/max_allocations'
SFA_MAX_DEFAULT_RSPEC = '/etc/sfa/max_physical.xml'
return 1
-def get_rspec(api, creds, options):
+def ListResources(api, creds, options, call_id):
+ if Callids().already_handled(call_id): return ""
# get slice's hrn from options
xrn = options.get('geni_slice_urn', '')
hrn, type = urn_to_hrn(xrn)
return (ret_rspec)
-def create_slice(api, xrn, creds, rspec_xml, users):
+def CreateSliver(api, xrn, creds, rspec_xml, users, call_id):
+ if Callids().already_handled(call_id): return ""
+
global topology
hrn = urn_to_hrn(xrn)[0]
topology = get_interface_map()
unlock_state_file()
- return True
+ # xxx - should return altered rspec
+ # with enough data for the client to understand what's happened
+ return rspec_xml
def rspec_to_allocations(rspec):
ifs = []
t = get_interface_map()
r = RSpec()
rspec_xml = open(sys.argv[1]).read()
- #get_rspec(None,'foo')
- create_slice(None, "plc.princeton.sap0", rspec_xml)
+ #ListResources(None,'foo')
+ CreateSliver(None, "plc.princeton.sap0", rspec_xml, 'call-id-sap0')
if __name__ == "__main__":
main()
from sfa.server.registry import Registries
from sfa.util.config import Config
from sfa.plc.nodes import *
+from sfa.util.callids import Callids
# Message IDs for all the SFA light calls
# This will be used by the aggrMgr controller
if DEBUG: print "Received stop_slice call"
return msg_aggrMgr(SFA_STOP_SLICE)
-def delete_slice(cred, xrn):
+def DeleteSliver(cred, xrn, call_id):
+ if Callids().already_handled(call_id): return ""
hrn = urn_to_hrn(xrn)[0]
- if DEBUG: print "Received delete_slice call"
+ if DEBUG: print "Received DeleteSliver call"
return msg_aggrMgr(SFA_DELETE_SLICE)
def reset_slices(cred, xrn):
if DEBUG: print "Received reset_slices call"
return msg_aggrMgr(SFA_RESET_SLICES)
-def create_slice(cred, xrn, rspec):
+### Thierry: xxx this should ahve api as a first arg - probably outdated
+def CreateSliver(cred, xrn, rspec, call_id):
+ if Callids().already_handled(call_id): return ""
+
hrn = urn_to_hrn(xrn)[0]
- if DEBUG: print "Received create_slice call"
+ if DEBUG: print "Received CreateSliver call"
slice_id = generate_slide_id(cred, hrn)
msg = struct.pack('> B%ds%ds' % (len(slice_id)+1, len(rspec)), SFA_CREATE_SLICE, slice_id, rspec)
aggrMgr_sock.close()
if DEBUG: print "----------------"
- return 1
+ return rspec
except socket.error, message:
print "Socket error"
except IOerror, message:
print "IO error"
- return 0
+ return ""
-def get_rspec(cred, xrn=None):
+# Thierry : xxx this would need to handle call_id like the other AMs but is outdated...
+def ListResources(cred, xrn=None):
hrn = urn_to_hrn(xrn)[0]
- if DEBUG: print "Received get_rspec call"
+ if DEBUG: print "Received ListResources call"
slice_id = generate_slide_id(cred, hrn)
msg = struct.pack('> B%ds' % len(slice_id), SFA_GET_RESOURCES, slice_id)
r = RSpec()
r.parseFile(sys.argv[1])
rspec = r.toDict()
- create_slice(None,'plc',rspec)
+ CreateSliver(None,'plc',rspec,'call-id-plc')
if __name__ == "__main__":
main()
from sfa.plc.slices import *
from sfa.util.version import version_core
from sfa.util.sfatime import utcparse
+from sfa.util.callids import Callids
def GetVersion(api):
xrn=Xrn(api.hrn)
hostnames.append(node.hostname)
return hostnames
-def slice_status(api, slice_xrn, creds):
- hrn, type = urn_to_hrn(slice_xrn)
+def SliverStatus(api, slice_xrn, creds, call_id):
+ if Callids().already_handled(call_id): return {}
+
+ (hrn, type) = urn_to_hrn(slice_xrn)
# find out where this slice is currently running
api.logger.info(hrn)
slicename = hrn_to_pl_slicename(hrn)
raise Exception("Slice %s not found (used %s as slicename internally)" % slice_xrn, slicename)
slice = slices[0]
- nodes = api.plshell.GetNodes(api.plauth, slice['node_ids'],
- ['hostname', 'site_id', 'boot_state', 'last_contact'])
+ # report about the local nodes only
+ nodes = api.plshell.GetNodes(api.plauth, {'node_id':slice['node_ids'],'peer_id':None},
+ ['hostname', 'site_id', 'boot_state', 'last_contact'])
site_ids = [node['site_id'] for node in nodes]
sites = api.plshell.GetSites(api.plauth, site_ids, ['site_id', 'login_base'])
- sites_dict = {}
- for site in sites:
- sites_dict[site['site_id']] = site['login_base']
-
- # XX remove me
- #api.logger.info(slice_xrn)
- #api.logger.info(slice)
- #api.logger.info(nodes)
- # XX remove me
+ sites_dict = dict ( [ (site['site_id'],site['login_base'] ) for site in sites ] )
result = {}
top_level_status = 'unknown'
res['pl_hostname'] = node['hostname']
res['pl_boot_state'] = node['boot_state']
res['pl_last_contact'] = node['last_contact']
- if not node['last_contact'] is None:
+ if node['last_contact'] is not None:
res['pl_last_contact'] = datetime.datetime.fromtimestamp(node['last_contact']).ctime()
res['geni_urn'] = hostname_to_urn(api.hrn, sites_dict[node['site_id']], node['hostname'])
if node['boot_state'] == 'boot':
res['geni_status'] = 'ready'
else:
res['geni_status'] = 'failed'
- top_level_staus = failed
+ top_level_staus = 'failed'
res['geni_error'] = ''
# XX remove me
return result
-def create_slice(api, slice_xrn, creds, rspec, users):
+def CreateSliver(api, slice_xrn, creds, rspec, users, call_id):
"""
Create the sliver[s] (slice) at this aggregate.
Verify HRN and initialize the slice record in PLC if necessary.
"""
+ if Callids().already_handled(call_id): return ""
reg_objects = __get_registry_objects(slice_xrn, creds, users)
- hrn, type = urn_to_hrn(slice_xrn)
+ (hrn, type) = urn_to_hrn(slice_xrn)
peer = None
slices = Slices(api)
peer = slices.get_peer(hrn)
sfa_peer = slices.get_sfa_peer(hrn)
registry = api.registries[api.hrn]
credential = api.getCredential()
- site_id, remote_site_id = slices.verify_site(registry, credential, hrn,
- peer, sfa_peer, reg_objects)
+ (site_id, remote_site_id) = slices.verify_site(registry, credential, hrn,
+ peer, sfa_peer, reg_objects)
slice_record = slices.verify_slice(registry, credential, hrn, site_id,
- remote_site_id, peer, sfa_peer, reg_objects)
+ remote_site_id, peer, sfa_peer, reg_objects)
network = Network(api)
api.plshell.BindObjectToPeer(api.plauth, 'slice', slice.id, peer,
slice.peer_id)
- # print network.toxml()
-
- return True
+ # xxx - check this holds enough data for the client to understand what's happened
+ return network.toxml()
-def renew_slice(api, xrn, creds, expiration_time):
- hrn, type = urn_to_hrn(xrn)
+def RenewSliver(api, xrn, creds, expiration_time, call_id):
+ if Callids().already_handled(call_id): return True
+ (hrn, type) = urn_to_hrn(xrn)
slicename = hrn_to_pl_slicename(hrn)
slices = api.plshell.GetSlices(api.plauth, {'name': slicename}, ['slice_id'])
if not slices:
slice = slices[0]
requested_time = utcparse(expiration_time)
record = {'expires': int(time.mktime(requested_time.timetuple()))}
- api.plshell.UpdateSlice(api.plauth, slice['slice_id'], record)
- return 1
+ try:
+ api.plshell.UpdateSlice(api.plauth, slice['slice_id'], record)
+ return True
+ except:
+ return False
def start_slice(api, xrn, creds):
hrn, type = urn_to_hrn(xrn)
# XX not implemented at this interface
return 1
-def delete_slice(api, xrn, creds):
- hrn, type = urn_to_hrn(xrn)
+def DeleteSliver(api, xrn, creds, call_id):
+ if Callids().already_handled(call_id): return ""
+ (hrn, type) = urn_to_hrn(xrn)
slicename = hrn_to_pl_slicename(hrn)
slices = api.plshell.GetSlices(api.plauth, {'name': slicename})
if not slices:
api.plshell.BindObjectToPeer(api.plauth, 'slice', slice['slice_id'], peer, slice['peer_slice_id'])
return 1
-def get_slices(api, creds):
+# xxx Thierry : caching at the aggregate level sounds wrong...
+caching=True
+#caching=False
+def ListSlices(api, creds, call_id):
+ if Callids().already_handled(call_id): return []
# look in cache first
- if api.cache:
+ if caching and api.cache:
slices = api.cache.get('slices')
if slices:
return slices
slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
# cache the result
- if api.cache:
+ if caching and api.cache:
api.cache.add('slices', slice_urns)
return slice_urns
-def get_rspec(api, creds, options):
+def ListResources(api, creds, options,call_id):
+ if Callids().already_handled(call_id): return ""
# get slice's hrn from options
xrn = options.get('geni_slice_urn', '')
- hrn, type = urn_to_hrn(xrn)
+ (hrn, type) = urn_to_hrn(xrn)
# look in cache first
- if api.cache and not xrn:
+ if caching and api.cache and not xrn:
rspec = api.cache.get('nodes')
if rspec:
+ api.logger.info("aggregate.ListResources: returning cached value for hrn %s"%hrn)
return rspec
network = Network(api)
rspec = network.toxml()
# cache the result
- if api.cache and not xrn:
+ if caching and api.cache and not xrn:
api.cache.add('nodes', rspec)
return rspec
credential = api.getCredential()
records = registry.Resolve(xrn, credential)
- # similar to create_slice, we must verify that the required records exist
+ # similar to CreateSliver, we must verify that the required records exist
# at this aggregate before we can issue a ticket
site_id, remote_site_id = slices.verify_site(registry, credential, slice_hrn,
peer, sfa_peer, reg_objects)
def main():
api = SfaAPI()
"""
- rspec = get_rspec(api, "plc.princeton.sapan", None)
- #rspec = get_rspec(api, "plc.princeton.coblitz", None)
- #rspec = get_rspec(api, "plc.pl.sirius", None)
+ rspec = ListResources(api, "plc.princeton.sapan", None, 'pl_test_sapan')
+ #rspec = ListResources(api, "plc.princeton.coblitz", None, 'pl_test_coblitz')
+ #rspec = ListResources(api, "plc.pl.sirius", None, 'pl_test_sirius')
print rspec
"""
f = open(sys.argv[1])
xml = f.read()
f.close()
- create_slice(api, "plc.princeton.sapan", xml)
+ CreateSliver(api, "plc.princeton.sapan", xml, 'CreateSliver_sapan')
if __name__ == "__main__":
main()
from sfa.plc.slices import *
from sfa.managers.aggregate_manager_pl import __get_registry_objects, __get_hostnames
from sfa.util.version import version_core
+from sfa.util.callids import Callids
# VINI aggregate is almost identical to PLC aggregate for many operations,
# so lets just import the methods form the PLC manager
from sfa.managers.aggregate_manager_pl import (
-start_slice, stop_slice, renew_slice, reset_slice, get_slices, get_ticket, slice_status)
+start_slice, stop_slice, RenewSliver, reset_slice, ListSlices, get_ticket, SliverStatus)
def GetVersion(api):
'hrn':xrn.get_hrn(),
})
-def delete_slice(api, xrn, creds):
- hrn, type = urn_to_hrn(xrn)
+def DeleteSliver(api, xrn, creds, call_id):
+ if Callids().already_handled(call_id): return ""
+ (hrn, type) = urn_to_hrn(xrn)
slicename = hrn_to_pl_slicename(hrn)
slices = api.plshell.GetSlices(api.plauth, {'name': slicename})
if not slices:
api.plshell.DeleteSliceFromNodes(api.plauth, slicename, slice['node_ids'])
return 1
-def create_slice(api, xrn, creds, xml, users):
+def CreateSliver(api, xrn, creds, xml, users, call_id):
"""
Verify HRN and initialize the slice record in PLC if necessary.
"""
+ if Callids().already_handled(call_id): return ""
+
hrn, type = urn_to_hrn(xrn)
peer = None
reg_objects = __get_registry_objects(xrn, creds, users)
api.plshell.DeleteSliceFromNodes(api.plauth, slice.name, deleted_nodes)
network.updateSliceTags()
- # print network.toxml()
-
- return True
+ # xxx - check this holds enough data for the client to understand what's happened
+ return network.toxml()
-def get_rspec(api, creds, options):
+def ListResources(api, creds, options,call_id):
+ if Callids().already_handled(call_id): return ""
# get slice's hrn from options
xrn = options.get('geni_slice_urn', '')
hrn, type = urn_to_hrn(xrn)
def main():
api = SfaAPI()
"""
- #rspec = get_rspec(api, None, None)
- rspec = get_rspec(api, "plc.princeton.iias", None)
+ #rspec = ListResources(api, None, None,)
+ rspec = ListResources(api, "plc.princeton.iias", None, 'vini_test')
print rspec
"""
f = open(sys.argv[1])
xml = f.read()
f.close()
- create_slice(api, "plc.princeton.iias", xml)
+ CreateSliver(api, "plc.princeton.iias", xml, 'call-id-iias')
if __name__ == "__main__":
main()
def stop_slice(api, slicename):
return
-def delete_slice(api, slicename):
+def DeleteSliver(api, slicename, call_id):
return
def reset_slice(api, slicename):
return
-def get_slices(api):
- return
+def ListSlices(api):
+ return []
def reboot():
return
sfa_component_setup.get_credential(force=True)
sfa_component_setup.get_trusted_certs()
-def slice_status(api, slice_xrn, creds):
+def SliverStatus(api, slice_xrn, creds):
result = {}
result['geni_urn'] = slice_xrn
result['geni_status'] = 'unknown'
slicename = PlXrn(xrn, type='slice').pl_slicename()
api.nodemanager.Stop(slicename)
-def delete_slice(api, xrn, creds):
+def DeleteSliver(api, xrn, creds, call_id):
slicename = PlXrn(xrn, type='slice').pl_slicename()
api.nodemanager.Destroy(slicename)
raise SliverDoesNotExist(slicename)
api.nodemanager.ReCreate(slicename)
-def get_slices(api):
+# xxx outdated - this should accept a credential & call_id
+def ListSlices(api):
# this returns a tuple, the data we want is at index 1
xids = api.nodemanager.GetXIDs()
# unfortunately the data we want is given to us as
from sfa.trust.certificate import Certificate, Keypair
from sfa.trust.gid import create_uuid
from sfa.util.version import version_core
-from sfa.managers.aggregate_manager_pl import slice_status
+# Thierry - turning this off, it's a slice interface not a registry one ?!?
+#from sfa.managers.aggregate_manager_pl import SliverStatus
# The GENI GetVersion call
def GetVersion(api):
- peers =dict ([ (peername,v._ServerProxy__host) for (peername,v) in api.registries.items()
+ peers =dict ([ (peername,v._ServerProxy__host) for (peername,v) in api.registries.iteritems()
if peername != api.hrn])
xrn=Xrn(api.hrn)
return version_core({'interface':'registry',
def resolve(api, xrns, type=None, full=True):
- # load all know registry names into a prefix tree and attempt to find
+ # load all known registry names into a prefix tree and attempt to find
# the longest matching prefix
if not isinstance(xrns, types.ListType):
if not type:
type = Xrn(xrns).get_type()
xrns = [xrns]
hrns = [urn_to_hrn(xrn)[0] for xrn in xrns]
- # create a dict whre key is an registry hrn and its value is a
+ # create a dict where key is a registry hrn and its value is a
# hrns at that registry (determined by the known prefix tree).
xrn_dict = {}
registries = api.registries
import sfa.util.xmlrpcprotocol as xmlrpcprotocol
import sfa.plc.peers as peers
from sfa.util.version import version_core
-
-# XX FIX ME: should merge result from multiple aggregates instead of
-# calling aggregate implementation
-from sfa.managers.aggregate_manager_pl import slice_status
+from sfa.util.callids import Callids
# we have specialized xmlrpclib.ServerProxy to remember the input url
# OTOH it's not clear if we're only dealing with XMLRPCServerProxy instances
def GetVersion(api):
# peers explicitly in aggregates.xml
- peers =dict ([ (peername,get_serverproxy_url(v)) for (peername,v) in api.aggregates.items()
+ peers =dict ([ (peername,get_serverproxy_url(v)) for (peername,v) in api.aggregates.iteritems()
if peername != api.hrn])
xrn=Xrn (api.hrn)
sm_version=version_core({'interface':'slicemgr',
sm_version['peers'][api.hrn]=local_am_url.replace('localhost',sm_version['hostname'])
return sm_version
-def create_slice(api, xrn, creds, rspec, users):
+def CreateSliver(api, xrn, creds, rspec, users, call_id):
+
+ if Callids().already_handled(call_id): return ""
+
hrn, type = urn_to_hrn(xrn)
# Validate the RSpec against PlanetLab's schema --disabled for now
# Just send entire RSpec to each aggregate
server = api.aggregates[aggregate]
- threads.run(server.CreateSliver, xrn, credential, rspec, users)
+ threads.run(server.CreateSliver, xrn, credential, rspec, users, call_id)
results = threads.get_results()
merged_rspec = merge_rspecs(results)
return merged_rspec
-def renew_slice(api, xrn, creds, expiration_time):
- hrn, type = urn_to_hrn(xrn)
+def RenewSliver(api, xrn, creds, expiration_time, call_id):
+ if Callids().already_handled(call_id): return True
+ (hrn, type) = urn_to_hrn(xrn)
# get the callers hrn
valid_cred = api.auth.checkCredentials(creds, 'renewsliver', hrn)[0]
caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
continue
server = api.aggregates[aggregate]
- threads.run(server.RenewSliver, xrn, [credential], expiration_time)
- threads.get_results()
- return 1
+ threads.run(server.RenewSliver, xrn, [credential], expiration_time, call_id)
+ # 'and' the results
+ return reduce (lambda x,y: x and y, threads.get_results() , True)
def get_ticket(api, xrn, creds, rspec, users):
slice_hrn, type = urn_to_hrn(xrn)
if not credential:
credential = api.getCredential()
threads = ThreadManager()
- for aggregate, aggregate_rspec in aggregate_rspecs.items():
+ for (aggregate, aggregate_rspec) in aggregate_rspecs.iteritems():
# prevent infinite loop. Dont send request back to caller
# unless the caller is the aggregate's SM
if caller_hrn == aggregate and aggregate != api.hrn:
return ticket.save_to_string(save_parents=True)
-def delete_slice(api, xrn, creds):
- hrn, type = urn_to_hrn(xrn)
-
+def DeleteSliver(api, xrn, creds, call_id):
+ if Callids().already_handled(call_id): return ""
+ (hrn, type) = urn_to_hrn(xrn)
# get the callers hrn
valid_cred = api.auth.checkCredentials(creds, 'deletesliver', hrn)[0]
caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
if caller_hrn == aggregate and aggregate != api.hrn:
continue
server = api.aggregates[aggregate]
- threads.run(server.DeleteSliver, xrn, credential)
+ threads.run(server.DeleteSliver, xrn, credential, call_id)
threads.get_results()
return 1
"""
return 1
-def get_slices(api, creds):
+# Thierry : caching at the slicemgr level makes sense to some extent
+caching=True
+#caching=False
+def ListSlices(api, creds, call_id):
+
+ if Callids().already_handled(call_id): return []
# look in cache first
- if api.cache:
+ if caching and api.cache:
slices = api.cache.get('slices')
if slices:
return slices
if caller_hrn == aggregate and aggregate != api.hrn:
continue
server = api.aggregates[aggregate]
- threads.run(server.ListSlices, credential)
+ threads.run(server.ListSlices, credential, call_id)
# combime results
results = threads.get_results()
slices.extend(result)
# cache the result
- if api.cache:
+ if caching and api.cache:
api.cache.add('slices', slices)
return slices
-
-def get_rspec(api, creds, options):
-
+
+
+def ListResources(api, creds, options, call_id):
+
+ if Callids().already_handled(call_id): return ""
+
# get slice's hrn from options
xrn = options.get('geni_slice_urn', '')
- hrn, type = urn_to_hrn(xrn)
+ (hrn, type) = urn_to_hrn(xrn)
# get hrn of the original caller
origin_hrn = options.get('origin_hrn', None)
origin_hrn = Credential(string=creds).get_gid_caller().get_hrn()
# look in cache first
- if api.cache and not xrn:
+ if caching and api.cache and not xrn:
rspec = api.cache.get('nodes')
if rspec:
return rspec
- hrn, type = urn_to_hrn(xrn)
-
# get the callers hrn
valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0]
caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
server = api.aggregates[aggregate]
my_opts = copy(options)
my_opts['geni_compressed'] = False
- threads.run(server.ListResources, credential, my_opts)
+ threads.run(server.ListResources, credential, my_opts, call_id)
#threads.run(server.get_resources, cred, xrn, origin_hrn)
results = threads.get_results()
merged_rspec = merge_rspecs(results)
# cache the result
- if api.cache and not xrn:
+ if caching and api.cache and not xrn:
api.cache.add('nodes', merged_rspec)
return merged_rspec
+# first draft at a merging SliverStatus
+def SliverStatus(api, slice_xrn, creds, call_id):
+ if Callids().already_handled(call_id): return {}
+ # attempt to use delegated credential first
+ credential = api.getDelegatedCredential(creds)
+ if not credential:
+ credential = api.getCredential()
+ threads = ThreadManager()
+ for aggregate in api.aggregates:
+ server = api.aggregates[aggregate]
+ threads.run (server.SliverStatus, slice_xrn, credential, call_id)
+ results = threads.get_results()
+
+ # get rid of any void result - e.g. when call_id was hit where by convention we return {}
+ results = [ result for result in results if result and result['geni_resources']]
+
+ # do not try to combine if there's no result
+ if not results : return {}
+
+ # otherwise let's merge stuff
+ overall = {}
+
+ # mmh, it is expected that all results carry the same urn
+ overall['geni_urn'] = results[0]['geni_urn']
+
+ # consolidate geni_status - simple model using max on a total order
+ states = [ 'ready', 'configuring', 'failed', 'unknown' ]
+ # hash name to index
+ shash = dict ( zip ( states, range(len(states)) ) )
+ def combine_status (x,y):
+ return shash [ max (shash(x),shash(y)) ]
+ overall['geni_status'] = reduce (combine_status, [ result['geni_status'] for result in results], 'ready' )
+
+ # {'ready':0,'configuring':1,'failed':2,'unknown':3}
+ # append all geni_resources
+ overall['geni_resources'] = \
+ reduce (lambda x,y: x+y, [ result['geni_resources'] for result in results] , [])
+
+ return overall
+
def main():
r = RSpec()
r.parseFile(sys.argv[1])
rspec = r.toDict()
- create_slice(None,'plc.princeton.tmacktestslice',rspec)
+ CreateSliver(None,'plc.princeton.tmacktestslice',rspec,'create-slice-tmacktestslice')
if __name__ == "__main__":
main()
key = self.network.free_egre_key()
except:
raise InvalidRSpec("ran out of EGRE keys!")
- tag = self.update_tag('egre_key', key, None, 10)
+ tag = self.update_tag('egre_key', key, None, 'admin')
return
def turn_on_netns(self):
tag = self.get_tag('netns')
if (not tag) or (tag.value != '1'):
- tag = self.update_tag('netns', '1', None, 10)
+ tag = self.update_tag('netns', '1', None, 'admin')
return
def turn_off_netns(self):
break
else:
newcaps = "CAP_NET_ADMIN," + tag.value
- self.update_tag('capabilities', newcaps, None, 10)
+ self.update_tag('capabilities', newcaps, None, 'admin')
else:
- tag = self.add_tag('capabilities', 'CAP_NET_ADMIN', None, 10)
+ tag = self.add_tag('capabilities', 'CAP_NET_ADMIN', None, 'admin')
return
def remove_cap_net_admin(self):
newcaps.append(cap)
if newcaps:
value = ','.join(newcaps)
- self.update_tag('capabilities', value, None, 10)
+ self.update_tag('capabilities', value, None, 'admin')
else:
tag.delete()
return
def updateSliceTags(self):
slice = self.slice
- tag = slice.update_tag('vini_topo', 'manual', None, 10)
+ tag = slice.update_tag('vini_topo', 'manual', None, 'admin')
slice.assign_egre_key()
slice.turn_on_netns()
slice.add_cap_net_admin()
linkdesc.append(node.get_topo_rspec(link))
if linkdesc:
topo_str = "%s" % linkdesc
- tag = slice.update_tag('topo_rspec', topo_str, node, 10)
+ tag = slice.update_tag('topo_rspec', topo_str, node, 'admin')
# Expire the un-updated topo_rspec tags
for tag in self.getSliceTags():
Mixed(Parameter(str, "Credential string"),
Parameter(type([str]), "List of credentials")),
Parameter(str, "RSpec"),
- Parameter(type([]), "List of user information")
+ Parameter(type([]), "List of user information"),
+ Parameter(str, "call_id"),
]
returns = Parameter(str, "Allocated RSpec")
- def call(self, slice_xrn, creds, rspec, users):
+ def call(self, slice_xrn, creds, rspec, users, call_id=""):
hrn, type = urn_to_hrn(slice_xrn)
self.api.logger.info("interface: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, hrn, self.name))
chain_name = 'INCOMING'
elif self.api.interface in ['slicemgr']:
chain_name = 'FORWARD-INCOMING'
+ self.api.logger.debug("CreateSliver: sfatables on chain %s"%chain_name)
rspec = run_sfatables(chain_name, hrn, origin_hrn, rspec)
- allocated = manager.create_slice(self.api, slice_xrn, creds, rspec, users)
- return rspec
+ return manager.CreateSliver(self.api, slice_xrn, creds, rspec, users, call_id)
-### $Id: stop_slice.py 17732 2010-04-19 21:10:45Z tmack $
-### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/methods/stop_slice.py $
-
from sfa.util.faults import *
from sfa.util.xrn import urn_to_hrn
from sfa.util.method import Method
Parameter(str, "Human readable name of slice to delete (hrn or urn)"),
Mixed(Parameter(str, "Credential string"),
Parameter(type([str]), "List of credentials")),
+ Parameter(str, "call_id"),
]
returns = Parameter(int, "1 if successful")
- def call(self, xrn, creds):
- hrn, type = urn_to_hrn(xrn)
+ def call(self, xrn, creds, call_id=""):
+ (hrn, type) = urn_to_hrn(xrn)
valid_creds = self.api.auth.checkCredentials(creds, 'deletesliver', hrn)
#log the call
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
manager = self.api.get_interface_manager()
- manager.delete_slice(self.api, xrn, creds)
+ manager.DeleteSliver(self.api, xrn, creds, call_id)
return 1
Parameter(type([]), "List of user information")
]
- returns = Parameter(str, "String represeneation of a ticket object")
+ returns = Parameter(str, "String representation of a ticket object")
def call(self, xrn, creds, rspec, users):
hrn, type = urn_to_hrn(xrn)
manager = self.api.get_interface_manager()
- # flter rspec through sfatables
+ # filter rspec through sfatables
if self.api.interface in ['aggregate']:
chain_name = 'OUTGOING'
elif self.api.interface in ['slicemgr']:
accepts = [
Mixed(Parameter(str, "Credential string"),
Parameter(type([str]), "List of credentials")),
- Parameter(dict, "Options")
+ Parameter(dict, "Options"),
+ Parameter(str, "call_id"),
]
returns = Parameter(str, "List of resources")
- def call(self, creds, options):
+ def call(self, creds, options, call_id=""):
self.api.logger.info("interface: %s\tmethod-name: %s" % (self.api.interface, self.name))
# get slice's hrn from options
xrn = options.get('geni_slice_urn', '')
- hrn, _ = urn_to_hrn(xrn)
+ (hrn, _) = urn_to_hrn(xrn)
# Find the valid credentials
valid_creds = self.api.auth.checkCredentials(creds, 'listnodes', hrn)
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
# get manager for this interface
manager = self.api.get_interface_manager()
- rspec = manager.get_rspec(self.api, creds, options)
+ rspec = manager.ListResources(self.api, creds, options, call_id)
# filter rspec through sfatables
if self.api.interface in ['aggregate']:
chain_name = 'OUTGOING'
elif self.api.interface in ['slicemgr']:
chain_name = 'FORWARD-OUTGOING'
+ self.api.logger.debug("ListResources: sfatables on chain %s"%chain_name)
filtered_rspec = run_sfatables(chain_name, hrn, origin_hrn, rspec)
if options.has_key('geni_compressed') and options['geni_compressed'] == True:
accepts = [
Mixed(Parameter(str, "Credential string"),
Parameter(type([str]), "List of credentials")),
+ Parameter(str, "call_id"),
]
returns = Parameter(list, "List of slice names")
- def call(self, creds):
+ def call(self, creds, call_id=""):
valid_creds = self.api.auth.checkCredentials(creds, 'listslices')
#log the call
self.api.logger.info("interface: %s\tcaller-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, self.name))
manager = self.api.get_interface_manager()
- return manager.get_slices(self.api, creds)
+ return manager.ListSlices(self.api, creds, call_id)
accepts = [
Parameter(str, "Slice URN"),
Parameter(type([str]), "List of credentials"),
- Parameter(str, "Expiration time in RFC 3339 format")
+ Parameter(str, "Expiration time in RFC 3339 format"),
+ Parameter(str, "call_id"),
]
returns = Parameter(bool, "Success or Failure")
- def call(self, slice_xrn, creds, expiration_time):
- hrn, type = urn_to_hrn(slice_xrn)
+ def call(self, slice_xrn, creds, expiration_time, call_id=""):
+
+ (hrn, type) = urn_to_hrn(slice_xrn)
self.api.logger.info("interface: %s\ttarget-hrn: %s\tcaller-creds: %s\tmethod-name: %s"%(self.api.interface, hrn, creds, self.name))
# Validate that the time does not go beyond the credential's expiration time
requested_time = utcparse(expiration_time)
if requested_time > Credential(string=valid_creds[0]).get_expiration():
- raise InsufficientRights('SliverStatus: Credential expires before requested expiration time')
+ raise InsufficientRights('Renewsliver: Credential expires before requested expiration time')
if requested_time > datetime.datetime.utcnow() + datetime.timedelta(days=60):
raise Exception('Cannot renew > 60 days from now')
manager = self.api.get_interface_manager()
- manager.renew_slice(self.api, slice_xrn, valid_creds, expiration_time)
-
- return 1
+ return manager.RenewSliver(self.api, slice_xrn, valid_creds, expiration_time, call_id)
Parameter(str, "Slice URN"),
Mixed(Parameter(str, "Credential string"),
Parameter(type([str]), "List of credentials")),
+ Parameter(str, "call_id"),
]
returns = Parameter(dict, "Status details")
- def call(self, slice_xrn, creds):
+ def call(self, slice_xrn, creds, call_id=""):
hrn, type = urn_to_hrn(slice_xrn)
valid_creds = self.api.auth.checkCredentials(creds, 'sliverstatus', hrn)
self.api.logger.info("interface: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, hrn, self.name))
manager = self.api.get_interface_manager()
- status = manager.slice_status(self.api, hrn, valid_creds)
+ status = manager.SliverStatus(self.api, hrn, valid_creds, call_id)
return status
from sfa.util.faults import *
from sfa.util.method import Method
from sfa.util.parameter import Parameter, Mixed
-import sys
from sfa.methods.CreateSliver import CreateSliver
class UpdateSliver(CreateSliver):
Mixed(Parameter(str, "Credential string"),
Parameter(type([str]), "List of credentials")),
Parameter(str, "RSpec"),
- Parameter(type([]), "List of user information")
+ Parameter(type([]), "List of user information"),
+ Parameter(str, "call_id"),
]
returns = Parameter(str, "Allocated RSpec")
- def call(self, slice_xrn, creds, rspec, users):
+ def call(self, slice_xrn, creds, rspec, users, call_id=""):
- return CreateSliver.call(self, slice_xrn, creds, rspec, users)
+ return CreateSliver.call(self, slice_xrn, creds, rspec, users, call_id)
-### $Id: get_slices.py 14387 2009-07-08 18:19:11Z faiyaza $
-### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/methods/get_registries.py $
from types import StringTypes
from sfa.util.faults import *
from sfa.util.xrn import urn_to_hrn
break
# fill in key info
if record['type'] == 'user':
- pubkeys = [keys[key_id]['key'] for key_id in record['key_ids'] if key_id in keys]
- record['keys'] = pubkeys
+ if 'key_ids' not in record:
+ self.logger.info("user record has no 'key_ids' - need to import from myplc ?")
+ else:
+ pubkeys = [keys[key_id]['key'] for key_id in record['key_ids'] if key_id in keys]
+ record['keys'] = pubkeys
# fill in record hrns
records = self.fill_record_hrns(records)
from __future__ import with_statement
+import sys
import re
import socket
+from StringIO import StringIO
+from lxml import etree
+from xmlbuilder import XMLBuilder
+
+from sfa.util.faults import *
+#from sfa.util.sfalogging import sfa_logger
from sfa.util.xrn import get_authority
from sfa.util.plxrn import hrn_to_pl_slicename, hostname_to_urn
-from sfa.util.faults import *
-from xmlbuilder import XMLBuilder
-from lxml import etree
-import sys
-from StringIO import StringIO
class Sliver:
def __init__(self, node):
try:
val = self.sites[id]
except:
+ self.api.logger.error("Invalid RSpec: site ID %s not found" % id )
raise InvalidRSpec("site ID %s not found" % id)
return val
"""
tmp = []
for node in api.plshell.GetNodes(api.plauth, {'peer_id': None}):
- t = node['node_id'], Node(self, node)
- tmp.append(t)
+ try:
+ t = node['node_id'], Node(self, node)
+ tmp.append(t)
+ except:
+ self.api.logger.error("Failed to add node %s (%s) to RSpec" % (node['hostname'], node['node_id']))
+
return dict(tmp)
def get_ifaces(self, api):
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<rspec id="max_rspec_slice1" xmlns="http://geni.maxgigapop.net/aggregate/rspec/20100412/" schemaLocation="http://geni.dragon.maxgigapop.net/max-rspec.xsd"
- xmlns:CtrlPlane="http://ogf.org/schema/network/topology/ctrlPlane/20080828/" CtrlPlane:schemaLocation="http://www.controlplane.net/idcp-v1.1/nmtopo-ctrlp.xsd">
- <aggregate>geni.maxgigapop.net</aggregate>
- <description>Example MAX RSpec</description>
- <lifetime id="time-1271533930-1271563981">
- <CtrlPlane:start type="CtrlPlane:TimeContent">1279848020</CtrlPlane:start>
- <CtrlPlane:end type="CtrlPlane:TimeContent">1280712039</CtrlPlane:end>
- </lifetime>
- <computeResource id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1">
- <planetlabNodeSliver id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab3">
- <address>206.196.176.55</address>
- <computeCapacity>
- <cpuType>generic</cpuType>
- <cpuSpeed>2.0GHz</cpuSpeed>
- <numCpuCores>1</numCpuCores>
- <memorySize>256MB</memorySize>
- <diskSize>16GB</diskSize>
- </computeCapacity>
- <networkInterface id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab3:interface=eth1.any_1">
- <deviceType>Ethernet</deviceType>
- <deviceName>eth1</deviceName>
- <capacity>100Mbps</capacity>
- <ipAddress>10.10.10.2/24</ipAddress>
- <vlanRange>any</vlanRange>
- <peerNetworkInterface>urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab2:interface=eth1.any_1</peerNetworkInterface>
- </networkInterface>
- <networkInterface id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab3:interface=eth1.any_2">
- <deviceType>Ethernet</deviceType>
- <deviceName>eth1</deviceName>
- <capacity>100Mbps</capacity>
- <ipAddress>10.10.30.1/24</ipAddress>
- <vlanRange>any</vlanRange>
- <peerNetworkInterface>urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab5:interface=eth1.any_2</peerNetworkInterface>
- </networkInterface>
- </planetlabNodeSliver>
- <planetlabNodeSliver id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab5">
- <address>206.196.176.138</address>
- <computeCapacity>
- <cpuType>generic</cpuType>
- <cpuSpeed>2.0GHz</cpuSpeed>
- <numCpuCores>1</numCpuCores>
- <memorySize>256MB</memorySize>
- <diskSize>16GB</diskSize>
- </computeCapacity>
- <networkInterface id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab5:interface=eth1.any_3">
- <deviceType>Ethernet</deviceType>
- <deviceName>eth1</deviceName>
- <capacity>100Mbps</capacity>
- <ipAddress>10.10.20.2/24</ipAddress>
- <vlanRange>any</vlanRange>
- <peerNetworkInterface>urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab2:interface=eth1.any_3</peerNetworkInterface>
- </networkInterface>
- <networkInterface id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab5:interface=eth1.any_2">
- <deviceType>Ethernet</deviceType>
- <deviceName>eth1</deviceName>
- <capacity>100Mbps</capacity>
- <ipAddress>10.10.30.2/24</ipAddress>
- <vlanRange>any</vlanRange>
- <peerNetworkInterface>urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab3:interface=eth1.any_2</peerNetworkInterface>
- </networkInterface>
- </planetlabNodeSliver>
- <planetlabNodeSliver id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab2">
- <address>206.196.176.133</address>
- <computeCapacity>
- <cpuType>generic</cpuType>
- <cpuSpeed>2.0GHz</cpuSpeed>
- <numCpuCores>1</numCpuCores>
- <memorySize>256MB</memorySize>
- <diskSize>16GB</diskSize>
- </computeCapacity>
- <networkInterface id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab2:interface=eth1.any_1">
- <deviceType>Ethernet</deviceType>
- <deviceName>eth1</deviceName>
- <capacity>100Mbps</capacity>
- <ipAddress>10.10.10.1/24</ipAddress>
- <vlanRange>any</vlanRange>
- <peerNetworkInterface>urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab3:interface=eth1.any_1</peerNetworkInterface>
- </networkInterface>
- <networkInterface id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab2:interface=eth1.any_3">
- <deviceType>Ethernet</deviceType>
- <deviceName>eth1</deviceName>
- <capacity>100Mbps</capacity>
- <ipAddress>10.10.20.1/24</ipAddress>
- <vlanRange>any</vlanRange>
- <peerNetworkInterface>urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab5:interface=eth1.any_3</peerNetworkInterface>
- </networkInterface>
- </planetlabNodeSliver>
- </computeResource>
-</rspec>
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>\r
-<tns:RSpec xmlns:tns="http://yuba.stanford.edu/egeni/rspec" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://yuba.stanford.edu/egeni/rspec stanford-rspec.xsd ">\r
- <tns:version>tns:version</tns:version>\r
- <tns:switchEntry>\r
- <tns:node>\r
- <tns:nodeId>tns:nodeId</tns:nodeId>\r
- <tns:type>0</tns:type>\r
- <tns:interfaceEntry>\r
- <tns:port>0</tns:port>\r
- <tns:remoteNodeId>tns:remoteNodeId</tns:remoteNodeId>\r
- <tns:remotePort>0</tns:remotePort>\r
- <tns:flowSpaceEntry>\r
- <tns:policy>readonly</tns:policy>\r
- <tns:dl_src>0F00</tns:dl_src>\r
- <tns:dl_dst>0F00</tns:dl_dst>\r
- <tns:dl_type>0</tns:dl_type>\r
- <tns:vlan_id>0</tns:vlan_id>\r
- <tns:ip_src>0F00</tns:ip_src>\r
- <tns:ip_dst>0F00</tns:ip_dst>\r
- <tns:ip_proto>0</tns:ip_proto>\r
- <tns:tp_src>0</tns:tp_src>\r
- <tns:tp_dst>0</tns:tp_dst>\r
- </tns:flowSpaceEntry>\r
- <tns:bandwidth>0.0</tns:bandwidth>\r
- <tns:latency>0.0</tns:latency>\r
- <tns:lossRate>0.0</tns:lossRate>\r
- </tns:interfaceEntry>\r
- </tns:node>\r
- <tns:switchFeatures>tns:switchFeatures</tns:switchFeatures>\r
- <tns:controllerUrl>tns:controllerUrl</tns:controllerUrl>\r
- </tns:switchEntry>\r
- <tns:hostEntry>\r
- <tns:node>\r
- <tns:nodeId>tns:nodeId</tns:nodeId>\r
- <tns:type>0</tns:type>\r
- <tns:interfaceEntry>\r
- <tns:port>0</tns:port>\r
- <tns:remoteNodeId>tns:remoteNodeId</tns:remoteNodeId>\r
- <tns:remotePort>0</tns:remotePort>\r
- <tns:flowSpaceEntry>\r
- <tns:policy>readonly</tns:policy>\r
- <tns:dl_src>0F00</tns:dl_src>\r
- <tns:dl_dst>0F00</tns:dl_dst>\r
- <tns:dl_type>0</tns:dl_type>\r
- <tns:vlan_id>0</tns:vlan_id>\r
- <tns:ip_src>0F00</tns:ip_src>\r
- <tns:ip_dst>0F00</tns:ip_dst>\r
- <tns:ip_proto>0</tns:ip_proto>\r
- <tns:tp_src>0</tns:tp_src>\r
- <tns:tp_dst>0</tns:tp_dst>\r
- </tns:flowSpaceEntry>\r
- <tns:bandwidth>0.0</tns:bandwidth>\r
- <tns:latency>0.0</tns:latency>\r
- <tns:lossRate>0.0</tns:lossRate>\r
- </tns:interfaceEntry>\r
- </tns:node>\r
- <tns:cpuUtil>0.0</tns:cpuUtil>\r
- <tns:memUtil>0.0</tns:memUtil>\r
- </tns:hostEntry>\r
- <tns:remoteNodeEntry>\r
- <tns:remoteURL>tns:remoteURL</tns:remoteURL>\r
- <tns:remoteType>tns:remoteType</tns:remoteType>\r
- <tns:node>\r
- <tns:nodeId>tns:nodeId</tns:nodeId>\r
- <tns:type>0</tns:type>\r
- <tns:interfaceEntry>\r
- <tns:port>0</tns:port>\r
- <tns:remoteNodeId>tns:remoteNodeId</tns:remoteNodeId>\r
- <tns:remotePort>0</tns:remotePort>\r
- <tns:flowSpaceEntry>\r
- <tns:policy>readonly</tns:policy>\r
- <tns:dl_src>0F00</tns:dl_src>\r
- <tns:dl_dst>0F00</tns:dl_dst>\r
- <tns:dl_type>0</tns:dl_type>\r
- <tns:vlan_id>0</tns:vlan_id>\r
- <tns:ip_src>0F00</tns:ip_src>\r
- <tns:ip_dst>0F00</tns:ip_dst>\r
- <tns:ip_proto>0</tns:ip_proto>\r
- <tns:tp_src>0</tns:tp_src>\r
- <tns:tp_dst>0</tns:tp_dst>\r
- </tns:flowSpaceEntry>\r
- <tns:bandwidth>0.0</tns:bandwidth>\r
- <tns:latency>0.0</tns:latency>\r
- <tns:lossRate>0.0</tns:lossRate>\r
- </tns:interfaceEntry>\r
- </tns:node>\r
- </tns:remoteNodeEntry>\r
- <tns:flowSpaceEntry>\r
- <tns:policy>readonly</tns:policy>\r
- <tns:dl_src>0F00</tns:dl_src>\r
- <tns:dl_dst>0F00</tns:dl_dst>\r
- <tns:dl_type>0</tns:dl_type>\r
- <tns:vlan_id>0</tns:vlan_id>\r
- <tns:ip_src>0F00</tns:ip_src>\r
- <tns:ip_dst>0F00</tns:ip_dst>\r
- <tns:ip_proto>0</tns:ip_proto>\r
- <tns:tp_src>0</tns:tp_src>\r
- <tns:tp_dst>0</tns:tp_dst>\r
- </tns:flowSpaceEntry>\r
-</tns:RSpec>\r
-\r
+++ /dev/null
-#!/usr/bin/python
-
-from sfa.util.rspec import RSpec
-import sys
-import pdb
-from sfa.util.xrn import get_authority
-from sfa.util.plxrn import hrn_to_pl_slicename
-from sfa.util.rspec import *
-from sfa.util.specdict import *
-from sfa.util.faults import *
-from sfa.util.storage import *
-from sfa.util.policy import Policy
-from sfa.server.aggregate import Aggregates
-from sfa.server.registry import Registries
-from sfa.util.faults import *
-
-import xml.dom.minidom
-
-SFA_MAX_CONF_FILE = '/etc/sfa/max_allocations'
-SFA_MAX_DEFAULT_RSPEC = '/etc/sfa/max_physical.xml'
-SFA_MAX_CANNED_RSPEC = '/etc/sfa/max_physical_canned.xml'
-
-topology = {}
-
-class SfaOutOfResource(SfaFault):
- def __init__(self, interface):
- faultString = "Interface " + interface + " not available"
- SfaFault.__init__(self, 100, faultString, '')
-
-class SfaNoPairRSpec(SfaFault):
- def __init__(self, interface, interface2):
- faultString = "Interface " + interface + " should be paired with " + interface2
- SfaFault.__init__(self, 100, faultString, '')
-
-# Returns a mapping from interfaces to the nodes they lie on and their peer interfaces
-# i -> node,i_peer
-
-def get_interface_map():
- r = RSpec()
- r.parseFile(SFA_MAX_DEFAULT_RSPEC)
- rspec = r.toDict()
- capacity = rspec['rspec']['capacity']
- netspec = capacity[0]['netspec'][0]
- linkdefs = {}
- for n in netspec['nodespec']:
- ifspecs = n['ifspec']
- nodename = n['node']
- for i in ifspecs:
- ifname = i['name']
- linkid = i['linkid']
-
- if (linkdefs.has_key(linkid)):
- linkdefs[linkid].extend([(nodename,ifname)])
- else:
- linkdefs[linkid]=[(nodename,ifname)]
-
- # topology maps interface x interface -> link,node1,node2
- topology={}
-
- for k in linkdefs.keys():
- (n1,i1) = linkdefs[k][0]
- (n2,i2) = linkdefs[k][1]
-
- topology[i1] = (n1, i2)
- topology[i2] = (n2, i1)
-
-
- return topology
-
-
-def allocations_to_rspec(allocations):
- rspec = xml.dom.minidom.parse(SFA_MAX_DEFAULT_RSPEC)
- req = rspec.firstChild.appendChild(rspec.createElement("request"))
- for (iname,ip) in allocations:
- ifspec = req.appendChild(rspec.createElement("ifspec"))
- ifspec.setAttribute("name","tns:"+iname)
- ifspec.setAttribute("ip",ip)
-
- return rspec.toxml()
-
-
-def if_endpoints(ifs):
- nodes=[]
- for l in ifs:
- nodes.extend(topology[l][0])
- return nodes
-
-def lock_state_file():
- # Noop for demo
- return True
-
-def unlock_state_file():
- return True
- # Noop for demo
-
-def read_alloc_dict():
- alloc_dict={}
- rows = open(SFA_MAX_CONF_FILE).read().split('\n')
- for r in rows:
- columns = r.split(' ')
- if (len(columns)==2):
- hrn = columns[0]
- allocs = columns[1].split(',')
- ipallocs = map(lambda alloc:alloc.split('/'), allocs)
- alloc_dict[hrn]=ipallocs
- return alloc_dict
-
-def commit_alloc_dict(d):
- f = open(SFA_MAX_CONF_FILE, 'w')
- for hrn in d.keys():
- columns = d[hrn]
- ipcolumns = map(lambda x:"/".join(x), columns)
- row = hrn+' '+','.join(ipcolumns)+'\n'
- f.write(row)
- f.close()
-
-def collapse_alloc_dict(d):
- ret = []
- for k in d.keys():
- ret.extend(d[k])
- return ret
-
-
-def alloc_links(api, hrn, links_to_add, links_to_drop):
- slicename=hrn_to_pl_slicename(hrn)
- for (iface,ip) in links_to_add:
- node = topology[iface][0][0]
- try:
- api.plshell.AddSliceTag(api.plauth, slicename, "ip_addresses", ip, node)
- api.plshell.AddSliceTag(api.plauth, slicename, "vsys", "getvlan", node)
- except Exception:
- # Probably a duplicate tag. XXX July 21
- pass
- return True
-
-def alloc_nodes(api,hrn, requested_ifs):
- requested_nodes = if_endpoints(requested_ifs)
- create_slice_max_aggregate(api, hrn, requested_nodes)
-
-# Taken from slices.py
-
-def create_slice_max_aggregate(api, hrn, nodes):
- # Get the slice record from SFA
- global topology
- topology = get_interface_map()
- slice = {}
- registries = Registries(api)
- registry = registries[api.hrn]
- credential = api.getCredential()
- records = registry.resolve(credential, hrn)
- for record in records:
- if record.get_type() in ['slice']:
- slice = record.as_dict()
- if not slice:
- raise RecordNotFound(hrn)
-
- # Make sure slice exists at plc, if it doesnt add it
- slicename = hrn_to_pl_slicename(hrn)
- slices = api.plshell.GetSlices(api.plauth, [slicename], ['node_ids'])
- if not slices:
- parts = slicename.split("_")
- login_base = parts[0]
- # if site doesnt exist add it
- sites = api.plshell.GetSites(api.plauth, [login_base])
- if not sites:
- authority = get_authority(hrn)
- site_records = registry.resolve(credential, authority)
- site_record = {}
- if not site_records:
- raise RecordNotFound(authority)
- site_record = site_records[0]
- site = site_record.as_dict()
-
- # add the site
- site.pop('site_id')
- site_id = api.plshell.AddSite(api.plauth, site)
- else:
- site = sites[0]
-
- slice_fields = {}
- slice_keys = ['name', 'url', 'description']
- for key in slice_keys:
- if key in slice and slice[key]:
- slice_fields[key] = slice[key]
- api.plshell.AddSlice(api.plauth, slice_fields)
- slice = slice_fields
- slice['node_ids'] = 0
- else:
- slice = slices[0]
-
- # get the list of valid slice users from the registry and make
- # they are added to the slice
- researchers = record.get('researcher', [])
- for researcher in researchers:
- person_record = {}
- person_records = registry.resolve(credential, researcher)
- for record in person_records:
- if record.get_type() in ['user']:
- person_record = record
- if not person_record:
- pass
- person_dict = person_record.as_dict()
- persons = api.plshell.GetPersons(api.plauth, [person_dict['email']],
- ['person_id', 'key_ids'])
-
- # Create the person record
- if not persons:
- person_id=api.plshell.AddPerson(api.plauth, person_dict)
-
- # The line below enables the user account on the remote aggregate
- # soon after it is created.
- # without this the user key is not transfered to the slice
- # (as GetSlivers returns key of only enabled users),
- # which prevents the user from login to the slice.
- # We may do additional checks before enabling the user.
-
- api.plshell.UpdatePerson(api.plauth, person_id, {'enabled' : True})
- key_ids = []
- else:
- key_ids = persons[0]['key_ids']
-
- api.plshell.AddPersonToSlice(api.plauth, person_dict['email'],
- slicename)
-
- # Get this users local keys
- keylist = api.plshell.GetKeys(api.plauth, key_ids, ['key'])
- keys = [key['key'] for key in keylist]
-
- # add keys that arent already there
- for personkey in person_dict['keys']:
- if personkey not in keys:
- key = {'key_type': 'ssh', 'key': personkey}
- api.plshell.AddPersonKey(api.plauth, person_dict['email'], key)
-
- # find out where this slice is currently running
- nodelist = api.plshell.GetNodes(api.plauth, slice['node_ids'],
- ['hostname'])
- hostnames = [node['hostname'] for node in nodelist]
-
- # remove nodes not in rspec
- deleted_nodes = list(set(hostnames).difference(nodes))
- # add nodes from rspec
- added_nodes = list(set(nodes).difference(hostnames))
-
- api.plshell.AddSliceToNodes(api.plauth, slicename, added_nodes)
- api.plshell.DeleteSliceFromNodes(api.plauth, slicename, deleted_nodes)
-
- return 1
-
-
-def get_rspec(api, hrn):
- # Eg. config line:
- # plc.princeton.sapan vlan23,vlan45
-
- allocations = read_alloc_dict()
- if (hrn and allocations.has_key(hrn)):
- ret_rspec = allocations_to_rspec(allocations[hrn])
- else:
- ret_rspec = open(SFA_MAX_CANNED_RSPEC).read()
-
- return (ret_rspec)
-
-
-def create_slice(api, hrn, rspec_xml):
- global topology
- topology = get_interface_map()
-
- # Check if everything in rspec is either allocated by hrn
- # or not allocated at all.
- r = RSpec()
- r.parseString(rspec_xml)
- rspec = r.toDict()
-
- lock_state_file()
-
- allocations = read_alloc_dict()
- requested_allocations = rspec_to_allocations (rspec)
- current_allocations = collapse_alloc_dict(allocations)
- try:
- current_hrn_allocations=allocations[hrn]
- except KeyError:
- current_hrn_allocations=[]
-
- # Check request against current allocations
- requested_interfaces = map(lambda(elt):elt[0], requested_allocations)
- current_interfaces = map(lambda(elt):elt[0], current_allocations)
- current_hrn_interfaces = map(lambda(elt):elt[0], current_hrn_allocations)
-
- for a in requested_interfaces:
- if (a not in current_hrn_interfaces and a in current_interfaces):
- raise SfaOutOfResource(a)
- if (topology[a][1] not in requested_interfaces):
- raise SfaNoPairRSpec(a,topology[a][1])
- # Request OK
-
- # Allocations to delete
- allocations_to_delete = []
- for a in current_hrn_allocations:
- if (a not in requested_allocations):
- allocations_to_delete.extend([a])
-
- # Ok, let's do our thing
- alloc_nodes(api, hrn, requested_interfaces)
- alloc_links(api, hrn, requested_allocations, allocations_to_delete)
- allocations[hrn] = requested_allocations
- commit_alloc_dict(allocations)
-
- unlock_state_file()
-
- return True
-
-def rspec_to_allocations(rspec):
- ifs = []
- try:
- ifspecs = rspec['rspec']['request'][0]['ifspec']
- for l in ifspecs:
- ifs.extend([(l['name'].replace('tns:',''),l['ip'])])
- except KeyError:
- # Bad RSpec
- pass
- return ifs
-
-def main():
- t = get_interface_map()
- r = RSpec()
- rspec_xml = open(sys.argv[1]).read()
- #get_rspec(None,'foo')
- create_slice(None, "plc.princeton.sap0", rspec_xml)
-
-if __name__ == "__main__":
- main()
+++ /dev/null
-import sys
-
-#The following is not essential
-#from soaplib.wsgi_soap import SimpleWSGISoapApp
-#from soaplib.serializers.primitive import *
-#from soaplib.serializers.clazz import *
-
-import socket
-import struct
-
-from sfa.util.faults import *
-from sfa.util.rspec import RSpec
-from sfa.server.registry import Registries
-from sfa.util.config import Config
-from sfa.plc.nodes import *
-
-# Message IDs for all the SFA light calls
-# This will be used by the aggrMgr controller
-SFA_GET_RESOURCES = 101
-SFA_CREATE_SLICE = 102
-SFA_START_SLICE = 103
-SFA_STOP_SLICE = 104
-SFA_DELETE_SLICE = 105
-SFA_GET_SLICES = 106
-SFA_RESET_SLICES = 107
-
-DEBUG = 1
-
-def print_buffer(buf):
- for i in range(0,len(buf)):
- print('%x' % buf[i])
-
-def extract(sock):
- # Shud we first obtain the message length?
- # msg_len = socket.ntohs(sock.recv(2))
- msg = ""
-
- while (1):
- try:
- chunk = sock.recv(1)
- except socket.error, message:
- if 'timed out' in message:
- break
- else:
- sys.exit("Socket error: " + message)
-
- if len(chunk) == 0:
- break
- msg += chunk
-
- print 'Done extracting %d bytes of response from aggrMgr' % len(msg)
- return msg
-
-def connect(server, port):
- '''Connect to the Aggregate Manager module'''
- sock = socket.socket ( socket.AF_INET, socket.SOCK_STREAM )
- sock.connect ( ( server, port) )
- sock.settimeout(1)
- if DEBUG: print 'Connected!'
- return sock
-
-def connect_aggrMgr():
- (aggr_mgr_ip, aggr_mgr_port) = Config().get_openflow_aggrMgr_info()
- if DEBUG: print """Connecting to port %d of %s""" % (aggr_mgr_port, aggr_mgr_ip)
- return connect(aggr_mgr_ip, aggr_mgr_port)
-
-def generate_slide_id(cred, hrn):
- if cred == None:
- cred = ""
- if hrn == None:
- hrn = ""
- #return cred + '_' + hrn
- return str(hrn)
-
-def msg_aggrMgr(cred, hrn, msg_id):
- slice_id = generate_slide_id(cred, hrn)
-
- msg = struct.pack('> B%ds' % len(slice_id), msg_id, slice_id)
- buf = struct.pack('> H', len(msg)+2) + msg
-
- try:
- aggrMgr_sock = connect_aggrMgr()
- aggrMgr_sock.send(buf)
- aggrMgr_sock.close()
- return 1
- except socket.error, message:
- print "Socket error"
- except IOerror, message:
- print "IO error"
- return 0
-
-def start_slice(cred, hrn):
- if DEBUG: print "Received start_slice call"
- return msg_aggrMgr(SFA_START_SLICE)
-
-def stop_slice(cred, hrn):
- if DEBUG: print "Received stop_slice call"
- return msg_aggrMgr(SFA_STOP_SLICE)
-
-def delete_slice(cred, hrn):
- if DEBUG: print "Received delete_slice call"
- return msg_aggrMgr(SFA_DELETE_SLICE)
-
-def reset_slices(cred, hrn):
- if DEBUG: print "Received reset_slices call"
- return msg_aggrMgr(SFA_RESET_SLICES)
-
-def create_slice(cred, hrn, rspec):
- if DEBUG: print "Received create_slice call"
- slice_id = generate_slide_id(cred, hrn)
-
- msg = struct.pack('> B%ds%ds' % (len(slice_id)+1, len(rspec)), SFA_CREATE_SLICE, slice_id, rspec)
- buf = struct.pack('> H', len(msg)+2) + msg
-
- try:
- aggrMgr_sock = connect_aggrMgr()
- aggrMgr_sock.send(buf)
- if DEBUG: print "Sent %d bytes and closing connection" % len(buf)
- aggrMgr_sock.close()
-
- if DEBUG: print "----------------"
- return 1
- except socket.error, message:
- print "Socket error"
- except IOerror, message:
- print "IO error"
- return 0
-
-def get_rspec(cred, hrn=None):
- if DEBUG: print "Received get_rspec call"
- slice_id = generate_slide_id(cred, hrn)
-
- msg = struct.pack('> B%ds' % len(slice_id), SFA_GET_RESOURCES, slice_id)
- buf = struct.pack('> H', len(msg)+2) + msg
-
- try:
- aggrMgr_sock = connect_aggrMgr()
- aggrMgr_sock.send(buf)
- resource_list = extract(aggrMgr_sock);
- aggrMgr_sock.close()
-
- if DEBUG: print "----------------"
- return resource_list
- except socket.error, message:
- print "Socket error"
- except IOerror, message:
- print "IO error"
- return None
-
-"""
-Returns the request context required by sfatables. At some point, this mechanism should be changed
-to refer to "contexts", which is the information that sfatables is requesting. But for now, we just
-return the basic information needed in a dict.
-"""
-def fetch_context(slice_hrn, user_hrn, contexts):
- base_context = {'sfa':{'user':{'hrn':user_hrn}}}
- return base_context
-
-def main():
- r = RSpec()
- r.parseFile(sys.argv[1])
- rspec = r.toDict()
- create_slice(None,'plc',rspec)
-
-if __name__ == "__main__":
- main()
+++ /dev/null
-"""
-Returns the request context required by sfatables. At some point, this mechanism should be changed
-to refer to "contexts", which is the information that sfatables is requesting. But for now, we just
-return the basic information needed in a dict.
-"""
-def fetch_context(slice_hrn, user_hrn, contexts):
- base_context = {'sfa':{'user':{'hrn':user_hrn}}}
- return base_context
def __init__(self, api, conf_file = "/etc/sfa/aggregates.xml"):
Interfaces.__init__(self, api, conf_file)
# set up a connection to the local aggregate
- # xxx fixme ? - should not we do this only when SFA_AGGREGATE_ENABLED ?
- address = self.api.config.SFA_AGGREGATE_HOST
- port = self.api.config.SFA_AGGREGATE_PORT
- url = 'http://%(address)s:%(port)s' % locals()
- local_aggregate = {'hrn': self.api.hrn,
- 'urn': hrn_to_urn(self.api.hrn, 'authority'),
- 'addr': address,
- 'port': port,
- 'url': url}
- self.interfaces[self.api.hrn] = local_aggregate
+ if self.api.config.SFA_AGGREGATE_ENABLED:
+ address = self.api.config.SFA_AGGREGATE_HOST
+ port = self.api.config.SFA_AGGREGATE_PORT
+ url = 'http://%(address)s:%(port)s' % locals()
+ local_aggregate = {'hrn': self.api.hrn,
+ 'urn': hrn_to_urn(self.api.hrn, 'authority'),
+ 'addr': address,
+ 'port': port,
+ 'url': url}
+ self.interfaces[self.api.hrn] = local_aggregate
# get connections
self.update(self.get_connections())
+import traceback
+import os.path
+from sfa.util.sfalogging import sfa_logger
from sfa.util.faults import *
-from sfa.util.storage import *
+from sfa.util.storage import XmlStorage
from sfa.util.xrn import get_authority, hrn_to_urn
-from sfa.trust.gid import GID
from sfa.util.record import SfaRecord
-import traceback
import sfa.util.xmlrpcprotocol as xmlrpcprotocol
import sfa.util.soapprotocol as soapprotocol
+from sfa.trust.gid import GID
# GeniLight client support is optional
try:
1) Makes sure a record exist in the local registry for the each
fedeated peer
- 2) Attepts to fetch and install trusted gids
+ 2) Attempts to fetch and install trusted gids
3) Provides connections (xmlrpc or soap) to federated peers
"""
connections = {}
required_fields = self.default_fields.keys()
for interface in self.interfaces.values():
- # make sure the required fields are present and not null
-
url = interface['url']
+# sfa_logger().debug("Interfaces.get_connections - looping on neighbour %s"%url)
# check which client we should use
# sfa.util.xmlrpcprotocol is default
client_type = 'xmlrpcprotocol'
+++ /dev/null
-FAULT_UNHANDLEDSERVEREXCEPTION = 901
-FAULT_BADREQUESTHASH = 902
+++ /dev/null
-import traceback
-import xmlrpclib
-
-FAULTCODE = 900
-
-class UnhandledServerException(xmlrpclib.Fault):
- def __init__(self, type, value, tb):
- exc_str = ''.join(traceback.format_exception(type, value, tb))
- faultString = exc_str # "Unhandled exception: " + str(type) + "\n" + exc_str
- xmlrpclib.Fault.__init__(self, FAULTCODE + 1, faultString)
-
-class BadRequestHash(xmlrpclib.Fault):
- def __init__(self, hash = None):
- faultString = "bad request hash: " + str(hash)
- xmlrpclib.Fault.__init__(self, FAULTCODE + 2, faultString)
+++ /dev/null
-import xmlrpclib
-
-from BaseApi import BaseApi
-
-from sfa.trust.credential import Credential
-from sfa.trust.gid import GID
-from sfa.trust.trustedroot import TrustedRootList
-
-from ApiExceptionCodes import *
-
-class BadRequestHash(xmlrpclib.Fault):
- def __init__(self, hash = None):
- faultString = "bad request hash: " + str(hash)
- xmlrpclib.Fault.__init__(self, FAULT_BADREQUESTHASH, faultString)
-
-class AuthenticatedApi(BaseApi):
- def __init__(self, encoding = "utf-8", trustedRootsDir=None):
- BaseApi.__init__(self, encoding)
- if trustedRootsDir:
- self.trusted_cert_list = TrustedRootList(trustedRootsDir).get_list()
- self.trusted_cert_file_list = TrustedRootList(trustedRootsDir).get_file_list()
- else:
- self.trusted_cert_list = None
-
- def register_functions(self):
- BaseApi.register_functions(self)
- self.register_function(self.gidNoop)
-
- def verifyGidRequestHash(self, gid, hash, arglist):
- key = gid.get_pubkey()
- if not key.verify_string(str(arglist), hash):
- raise BadRequestHash(hash)
-
- def verifyCredRequestHash(self, cred, hash, arglist):
- gid = cred.get_gid_caller()
- self.verifyGidRequestHash(gid, hash, arglist)
-
- def validateGid(self, gid):
- if self.trusted_cert_list:
- gid.verify_chain(self.trusted_cert_list)
-
- def validateCred(self, cred):
- if self.trusted_cert_list:
- cred.verify(self.trusted_cert_file_list)
-
- def authenticateGid(self, gidStr, argList, requestHash):
- gid = GID(string = gidStr)
- self.validateGid(gid)
- self.verifyGidRequestHash(gid, requestHash, argList)
- return gid
-
- def authenticateCred(self, credStr, argList, requestHash):
- cred = Credential(string = credStr)
- self.validateCred(cred)
- self.verifyCredRequestHash(cred, requestHash, argList)
- return cred
-
- def gidNoop(self, gidStr, value, requestHash):
- self.authenticateGid(gidStr, [gidStr, value], requestHash)
- return value
-
- def credNoop(self, credStr, value, requestHash):
- self.authenticateCred(credStr, [credStr, value], requestHash)
- return value
-
-
+++ /dev/null
-from sfa.trust.certificate import Keypair
-from sfa.trust.gid import GID
-
-from BaseClient import BaseClient
-
-class AuthenticatedClient(BaseClient):
- def __init__(self, url, private_key_file, gid_file=None, cred_file=None):
- BaseClient.__init__(self, url)
- self.private_key_file = private_key_file
- self.gid_file = gid_file
- self.cred_file = cred_file
- self.private_key = Keypair(filename = self.private_key_file)
- if gid_file:
- self.gid = GID(filename = self.gid_file)
- if cred_file:
- self.cred = Credential(filename = self.cred_file)
-
- def computeRequestHash(self, argList):
- return self.private_key.sign_string(str(argList))
-
- def gidNoop(self, value):
- gidStr = self.gid.save_to_string(True)
- reqHash = self.computeRequestHash([gidStr, value])
- return self.server.gidNoop(gidStr, value, reqHash)
+++ /dev/null
-#
-# PLCAPI XML-RPC and SOAP interfaces
-#
-# Aaron Klingaman <alk@absarokasoft.com>
-# Mark Huang <mlhuang@cs.princeton.edu>
-#
-# Copyright (C) 2004-2006 The Trustees of Princeton University
-# $Id: API.py 14587 2009-07-19 13:18:50Z thierry $
-# $URL: https://svn.planet-lab.org/svn/PLCAPI/trunk/PLC/API.py $
-#
-
-import sys
-import traceback
-import string
-
-import xmlrpclib
-import logging
-import logging.handlers
-
-from ApiExceptionCodes import *
-
-# Wrapper around xmlrpc fault to include a traceback of the server to the
-# client. This is done to aid in debugging from a client perspective.
-
-class FaultWithTraceback(xmlrpclib.Fault):
- def __init__(self, code, faultString, exc_info):
- type, value, tb = exc_info
- exc_str = ''.join(traceback.format_exception(type, value, tb))
- faultString = faultString + "\nFAULT_TRACEBACK:" + exc_str
- xmlrpclib.Fault.__init__(self, code, faultString)
-
-# Exception to report to the caller when some non-XMLRPC fault occurs on the
-# server. For example a TypeError.
-
-class UnhandledServerException(FaultWithTraceback):
- def __init__(self, exc_info):
- type, value, tb = exc_info
- faultString = "Unhandled exception: " + str(type)
- FaultWithTraceback.__init__(self, FAULT_UNHANDLEDSERVEREXCEPTION, faultString, exc_info)
-
-# See "2.2 Characters" in the XML specification:
-#
-# #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
-# avoiding
-# [#x7F-#x84], [#x86-#x9F], [#xFDD0-#xFDDF]
-
-invalid_xml_ascii = map(chr, range(0x0, 0x8) + [0xB, 0xC] + range(0xE, 0x1F))
-xml_escape_table = string.maketrans("".join(invalid_xml_ascii), "?" * len(invalid_xml_ascii))
-
-def xmlrpclib_escape(s, replace = string.replace):
- """
- xmlrpclib does not handle invalid 7-bit control characters. This
- function augments xmlrpclib.escape, which by default only replaces
- '&', '<', and '>' with entities.
- """
-
- # This is the standard xmlrpclib.escape function
- s = replace(s, "&", "&")
- s = replace(s, "<", "<")
- s = replace(s, ">", ">",)
-
- # Replace invalid 7-bit control characters with '?'
- return s.translate(xml_escape_table)
-
-def xmlrpclib_dump(self, value, write):
- """
- xmlrpclib cannot marshal instances of subclasses of built-in
- types. This function overrides xmlrpclib.Marshaller.__dump so that
- any value that is an instance of one of its acceptable types is
- marshalled as that type.
-
- xmlrpclib also cannot handle invalid 7-bit control characters. See
- above.
- """
-
- # Use our escape function
- args = [self, value, write]
- if isinstance(value, (str, unicode)):
- args.append(xmlrpclib_escape)
-
- try:
- # Try for an exact match first
- f = self.dispatch[type(value)]
- except KeyError:
- # Try for an isinstance() match
- for Type, f in self.dispatch.iteritems():
- if isinstance(value, Type):
- f(*args)
- return
- raise TypeError, "cannot marshal %s objects" % type(value)
- else:
- f(*args)
-
-# You can't hide from me!
-xmlrpclib.Marshaller._Marshaller__dump = xmlrpclib_dump
-
-# SOAP support is optional
-try:
- import SOAPpy
- from SOAPpy.Parser import parseSOAPRPC
- from SOAPpy.Types import faultType
- from SOAPpy.NS import NS
- from SOAPpy.SOAPBuilder import buildSOAP
-except ImportError:
- SOAPpy = None
-
-def import_deep(name):
- mod = __import__(name)
- components = name.split('.')
- for comp in components[1:]:
- mod = getattr(mod, comp)
- return mod
-
-class BaseApi:
- def __init__(self, encoding = "utf-8"):
- self.encoding = encoding
- self.init_logger()
- self.funcs = {}
- self.register_functions()
-
- def init_logger(self):
- self.logger = logging.getLogger("ApiLogger")
- self.logger.setLevel(logging.INFO)
- self.logger.addHandler(logging.handlers.RotatingFileHandler(self.get_log_name(), maxBytes=100000, backupCount=5))
-
- def get_log_name(self):
- return "/tmp/apilogfile.txt"
-
- def register_functions(self):
- self.register_function(self.noop)
-
- def register_function(self, function, name = None):
- if name is None:
- name = function.__name__
- self.funcs[name] = function
-
- def call(self, source, method, *args):
- """
- Call the named method from the specified source with the
- specified arguments.
- """
-
- if not method in self.funcs:
- raise "Unknown method: " + method
-
- return self.funcs[method](*args)
-
- def handle(self, source, data):
- """
- Handle an XML-RPC or SOAP request from the specified source.
- """
-
- # Parse request into method name and arguments
- try:
- interface = xmlrpclib
- (args, method) = xmlrpclib.loads(data)
- methodresponse = True
- except Exception, e:
- if SOAPpy is not None:
- interface = SOAPpy
- (r, header, body, attrs) = parseSOAPRPC(data, header = 1, body = 1, attrs = 1)
- method = r._name
- args = r._aslist()
- # XXX Support named arguments
- else:
- raise e
-
- self.logger.debug("OP:" + str(method) + " from " + str(source))
-
- try:
- result = self.call(source, method, *args)
- except xmlrpclib.Fault, fault:
- self.logger.warning("FAULT: " + str(fault.faultCode) + " " + str(fault.faultString))
- self.logger.info(traceback.format_exc())
- # Handle expected faults
- if interface == xmlrpclib:
- result = FaultWithTraceback(fault.faultCode, fault.faultString, sys.exc_info())
- methodresponse = None
- elif interface == SOAPpy:
- result = faultParameter(NS.ENV_T + ":Server", "Method Failed", method)
- result._setDetail("Fault %d: %s" % (fault.faultCode, fault.faultString))
- self.logger.debug
- except:
- self.logger.warning("EXCEPTION: " + str(sys.exc_info()[0]))
- self.logger.info(traceback.format_exc())
- result = UnhandledServerException(sys.exc_info())
- methodresponse = None
-
- # Return result
- if interface == xmlrpclib:
- if not isinstance(result, xmlrpclib.Fault):
- result = (result,)
- data = xmlrpclib.dumps(result, methodresponse = True, encoding = self.encoding, allow_none = 1)
- elif interface == SOAPpy:
- data = buildSOAP(kw = {'%sResponse' % method: {'Result': result}}, encoding = self.encoding)
-
- return data
-
- def noop(self, value):
- return value
-
-
-
+++ /dev/null
-import xmlrpclib
-
-from ApiExceptionCodes import *
-
-class ExceptionUnmarshaller(xmlrpclib.Unmarshaller):
- def close(self):
- try:
- return xmlrpclib.Unmarshaller.close(self)
- except xmlrpclib.Fault, e:
- # if the server tagged some traceback info onto the end of the
- # exception text, then print it out on the client.
-
- if "\nFAULT_TRACEBACK:" in e.faultString:
- parts = e.faultString.split("\nFAULT_TRACEBACK:")
- e.faultString = parts[0]
- if BaseClient.VerboseExceptions:
- print "\n|Server Traceback:", "\n|".join(parts[1].split("\n"))
-
- raise e
-
-class ExceptionReportingTransport(xmlrpclib.Transport):
- def make_connection(self, host):
- import httplib
- if host.startswith("https:"):
- return httplib.HTTPS(host)
- else:
- return httplib.HTTP(host)
-
- def getparser(self):
- unmarshaller = ExceptionUnmarshaller()
- parser = xmlrpclib.ExpatParser(unmarshaller)
- return parser, unmarshaller
-
-class BaseClient():
-
- VerboseExceptions = False
-
- def __init__(self, url):
- self.url = url
- self.server = xmlrpclib.ServerProxy(self.url, ExceptionReportingTransport())
-
- def noop(self, value):
- return self.server.noop(value)
-
- @staticmethod
- def EnableVerboseExceptions(x=True):
- BaseClient.VerboseExceptions = x
-
+++ /dev/null
-#
-# Apache mod_python interface
-#
-# Aaron Klingaman <alk@absarokasoft.com>
-# Mark Huang <mlhuang@cs.princeton.edu>
-#
-# Copyright (C) 2004-2006 The Trustees of Princeton University
-#
-
-import sys
-import traceback
-import xmlrpclib
-from mod_python import apache
-
-from sfa.util.logging import sfa_logger
-from API import RemoteApi
-api = RemoteApi()
-
-def handler(req):
- try:
- if req.method != "POST":
- req.content_type = "text/html"
- req.send_http_header()
- req.write("""
-<html><head>
-<title>PLCAPI XML-RPC/SOAP Interface</title>
-</head><body>
-<h1>PLCAPI XML-RPC/SOAP Interface</h1>
-<p>Please use XML-RPC or SOAP to access the PLCAPI.</p>
-</body></html>
-""")
- return apache.OK
-
- # Read request
- request = req.read(int(req.headers_in['content-length']))
-
- # mod_python < 3.2: The IP address portion of remote_addr is
- # incorrect (always 0.0.0.0) when IPv6 is enabled.
- # http://issues.apache.org/jira/browse/MODPYTHON-64?page=all
- (remote_ip, remote_port) = req.connection.remote_addr
- remote_addr = (req.connection.remote_ip, remote_port)
-
- # Handle request
- response = api.handle(remote_addr, request)
-
- # Write response
- req.content_type = "text/xml; charset=" + api.encoding
- req.send_http_header()
- req.write(response)
-
- return apache.OK
-
- except Exception, err:
- # Log error in /var/log/httpd/(ssl_)?error_log
- sfa_logger().log_exc("%r"%err)
- return apache.HTTP_INTERNAL_SERVER_ERROR
+++ /dev/null
-from AuthenticatedApi import AuthenticatedApi, BadRequestHash
-
-class RemoteApi(AuthenticatedApi):
- def __init__(self, encoding="utf-8", trustedRootsDir="/usr/local/testapi/var/trusted_roots"):
- return AuthenticatedApi.__init__(self, encoding)
-
- def get_log_name(self):
- return "/usr/local/testapi/var/logfile.txt"
-
- def register_functions(self):
- AuthenticatedApi.register_functions(self)
- self.register_function(self.typeError)
- self.register_function(self.badRequestHash)
-
- def typeError(self):
- raise TypeError()
-
- def badRequestHash(self):
- raise BadRequestHash("somehashvalue")
+++ /dev/null
-SFA_SRC_DIR=/home/smbaker/projects/sfa/trunk
-
-mkdir -p /usr/local/testapi/bin
-mkdir -p /usr/local/testapi/bin/sfa/trust
-mkdir -p /usr/local/testapi/bin/sfa/util
-mkdir -p /usr/local/testapi/var/trusted_roots
-mkdir -p /repository/testapi
-
-# source code for the API
-cp BaseApi.py /usr/local/testapi/bin/
-cp AuthenticatedApi.py /usr/local/testapi/bin/
-cp TestApi.py /usr/local/testapi/bin/API.py
-cp ModPython.py /usr/local/testapi/bin/
-cp ApiExceptionCodes.py /usr/local/testapi/bin/
-
-# trusted root certificates that match gackstestuser.*
-cp trusted_roots/*.gid /usr/local/testapi/var/trusted_roots/
-
-# apache config file to enable the api
-cp testapi.conf /etc/httpd/conf.d/
-
-# copy over sfa stuff that we need
-echo > /usr/local/testapi/bin/sfa/__init__.py
-echo > /usr/local/testapi/bin/sfa/trust/__init__.py
-echo > /usr/local/testapi/bin/sfa/util/__init__.py
-cp $SFA_SRC_DIR/sfa/trust/gid.py /usr/local/testapi/bin/sfa/trust/
-cp $SFA_SRC_DIR/sfa/trust/certificate.py /usr/local/testapi/bin/sfa/trust/
-cp $SFA_SRC_DIR/sfa/trust/trustedroot.py /usr/local/testapi/bin/sfa/trust/
-cp $SFA_SRC_DIR/sfa/trust/credential.py /usr/local/testapi/bin/sfa/trust/
-cp $SFA_SRC_DIR/sfa/trust/rights.py /usr/local/testapi/bin/sfa/trust/
-cp $SFA_SRC_DIR/sfa/util/faults.py /usr/local/testapi/bin/sfa/util/
-
-# make everything owned by apache
-chown -R apache /usr/local/testapi
-chown apache /etc/httpd/conf.d/testapi.conf
-
-/etc/init.d/httpd restart
+++ /dev/null
-import sys
-import traceback
-
-from BaseClient import BaseClient
-from AuthenticatedClient import AuthenticatedClient
-
-BaseClient.EnableVerboseExceptions(True)
-
-HOST = "localhost"
-URL = "http://" + HOST + "/TESTAPI/"
-SURL = "https://" + HOST + "/TESTAPI/"
-
-print "*** testing some valid ops; these should print \"Hello, World\" ***"
-
-bc = BaseClient(URL)
-print "HTTP noop:", bc.noop("Hello, World")
-
-ac = AuthenticatedClient(URL, "gackstestuser.pkey", "gackstestuser.gid")
-print "HTTP gidNoop:", ac.gidNoop("Hello, World")
-
-bc = BaseClient(SURL)
-print "HTTPS noop:", bc.noop("Hello, World")
-
-ac = AuthenticatedClient(URL, "gackstestuser.pkey", "gackstestuser.gid")
-print "HTTPS gidNoop:", ac.gidNoop("Hello, World")
-
-print
-print "*** testing some exception handling: ***"
-
-bc = BaseClient(URL)
-print "HTTP typeError:",
-try:
- result = bc.server.typeError()
- print result
-except Exception, e:
- print ''.join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]))
-
-print "HTTP badrequesthash:",
-try:
- result = bc.server.badRequestHash()
- print result
-except:
- print ''.join(traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]))
-
+++ /dev/null
-export PYTHONPATH=/home/smbaker/projects/sfa/trunk
-
-python ./test.py
+++ /dev/null
-<Location /TESTAPI/>
- SetHandler mod_python
- PythonPath "sys.path + ['/usr/local/testapi/bin/']"
- PythonHandler ModPython
-</Location>
\ No newline at end of file
# Credentials are signed XML files that assign a subject gid privileges to an object gid
##
-### $Id$
-### $URL$
-
import os
import datetime
from tempfile import mkstemp
+import dateutil.parser
+from StringIO import StringIO
from xml.dom.minidom import Document, parseString
-from dateutil.parser import parse
from lxml import etree
-from StringIO import StringIO
+
from sfa.util.faults import *
from sfa.util.sfalogging import sfa_logger
from sfa.trust.certificate import Keypair
self.set_refid(cred.getAttribute("xml:id"))
- self.set_expiration(parse(getTextNode(cred, "expires")))
+ self.set_expiration(dateutil.parser.parse(getTextNode(cred, "expires")))
self.gidCaller = GID(string=getTextNode(cred, "owner_gid"))
self.gidObject = GID(string=getTextNode(cred, "target_gid"))
#
#
+import re
+import traceback
+import commands
+from pprint import pformat
+from types import StringTypes, NoneType
+
import psycopg2
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
# UNICODEARRAY not exported yet
psycopg2.extensions.register_type(psycopg2._psycopg.UNICODEARRAY)
-import pgdb
-from types import StringTypes, NoneType
-import traceback
-import commands
-import re
-from pprint import pformat
+# allow to run sfa2wsdl if this is missing (for mac)
+import sys
+try: import pgdb
+except: print >> sys.stderr, "WARNING, could not import pgdb"
from sfa.util.faults import *
from sfa.util.sfalogging import sfa_logger
--- /dev/null
+#!/usr/bin/python
+
+import threading
+import time
+
+from sfa.util.sfalogging import sfa_logger
+
+"""
+Callids: a simple mechanism to remember the call ids served so fas
+memory-only for now - thread-safe
+implemented as a (singleton) hash 'callid'->timestamp
+"""
+
+debug=False
+
+class _call_ids_impl (dict):
+
+ _instance = None
+ # 5 minutes sounds amply enough
+ purge_timeout=5*60
+ # when trying to get a lock
+ retries=10
+ # in ms
+ wait_ms=100
+
+ def __init__(self):
+ self._lock=threading.Lock()
+
+ # the only primitive
+ # return True if the callid is unknown, False otherwise
+ def already_handled (self,call_id):
+ # if not provided in the call...
+ if not call_id: return False
+ has_lock=False
+ for attempt in range(_call_ids_impl.retries):
+ if debug: sfa_logger().debug("Waiting for lock (%d)"%attempt)
+ if self._lock.acquire(False):
+ has_lock=True
+ if debug: sfa_logger().debug("got lock (%d)"%attempt)
+ break
+ time.sleep(float(_call_ids_impl.wait_ms)/1000)
+ # in the unlikely event where we can't get the lock
+ if not has_lock:
+ sfa_logger().warning("_call_ids_impl.should_handle_call_id: could not acquire lock")
+ return False
+ # we're good to go
+ if self.has_key(call_id):
+ self._purge()
+ self._lock.release()
+ return True
+ self[call_id]=time.time()
+ self._purge()
+ self._lock.release()
+ if debug: sfa_logger().debug("released lock")
+ return False
+
+ def _purge(self):
+ now=time.time()
+ o_keys=[]
+ for (k,v) in self.iteritems():
+ if (now-v) >= _call_ids_impl.purge_timeout: o_keys.append(k)
+ for k in o_keys:
+ if debug: sfa_logger().debug("Purging call_id %r (%s)"%(k,time.strftime("%H:%M:%S",time.localtime(self[k]))))
+ del self[k]
+ if debug:
+ sfa_logger().debug("AFTER PURGE")
+ for (k,v) in self.iteritems(): sfa_logger().debug("%s -> %s"%(k,time.strftime("%H:%M:%S",time.localtime(v))))
+
+def Callids ():
+ if not _call_ids_impl._instance:
+ _call_ids_impl._instance = _call_ids_impl()
+ return _call_ids_impl._instance
-# $Id$
-# $URL$
from types import StringTypes
try:
set
set = Set
import time
-import pgdb
+try: import pgdb
+except: pass
from sfa.util.faults import *
from sfa.util.parameter import Parameter, Mixed, python_type
#
#
-### $Id$
-### $URL$
-
import os, time
from types import *
from types import StringTypes
from sfa.util.parameter import Parameter, Mixed, python_type, xmlrpc_type
from sfa.trust.auth import Auth
-# we inherit object because we use new-style classes for legacy methods
-class Method (object):
+class Method:
"""
Base class for all SfaAPI functions. At a minimum, all SfaAPI
functions must define:
def call(self, *args):
"""
Method body for all SfaAPI functions. Must override.
-
"""
-
- return True
+ return None
def __init__(self, api):
self.name = self.__class__.__name__
if not self.api.interface or self.api.interface not in self.interfaces:
raise SfaInvalidAPIMethod(methodname, self.api.interface)
- # legacy code cannot be type-checked, due to the way Method.args() works
- if not hasattr(self,"skip_typecheck"):
- (min_args, max_args, defaults) = self.args()
-
- # Check that the right number of arguments were passed in
- if len(args) < len(min_args) or len(args) > len(max_args):
- raise SfaInvalidArgumentCount(len(args), len(min_args), len(max_args))
+ (min_args, max_args, defaults) = self.args()
+
+ # Check that the right number of arguments were passed in
+ if len(args) < len(min_args) or len(args) > len(max_args):
+ raise SfaInvalidArgumentCount(len(args), len(min_args), len(max_args))
- for name, value, expected in zip(max_args, args, self.accepts):
- self.type_check(name, value, expected, args)
+ for name, value, expected in zip(max_args, args, self.accepts):
+ self.type_check(name, value, expected, args)
+ if self.api.config.SFA_API_DEBUG:
+ sfa_logger().debug("method.__call__ [%s] : BEG %s"%(self.api.interface,methodname))
result = self.call(*args, **kwds)
- runtime = time.time() - start
+ runtime = time.time() - start
if self.api.config.SFA_API_DEBUG or hasattr(self, 'message'):
- msg=getattr(self,'message',"method %s completed in %02f s"%(methodname,runtime))
- sfa_logger().debug(msg)
+ sfa_logger().debug("method.__call__ [%s] : END %s in %02f s (%s)"%\
+ (self.api.interface,methodname,runtime,getattr(self,'message',"[no-msg]")))
return result
def pl_slicename (self):
self._normalize()
leaf = self.leaf
- leaf = re.sub('[^a-zA-Z0-9]', '', leaf)
+ leaf = re.sub('[^a-zA-Z0-9_]', '', leaf)
return self.pl_login_base() + '_' + leaf
#def hrn_to_pl_authname(hrn):
# raise SfaInvalidArgument, field + " must be specified and cannot be unset in class %s"%self.__class__.__name__
# Validate values before committing
- for key, value in self.iteritems():
+ for (key, value) in self.iteritems():
if value is not None and hasattr(self, 'validate_' + key):
validate = getattr(self, 'validate_' + key)
self[key] = validate(value)
-### $Id$
-### $URL$
-
import sys
import pprint
import os
+from StringIO import StringIO
+from types import StringTypes, ListType
import httplib
from xml.dom import minidom
-from types import StringTypes, ListType
from lxml import etree
-from StringIO import StringIO
from sfa.util.sfalogging import sfa_logger
# the resulting tree
rspec = None
for input_rspec in rspecs:
+ # ignore empty strings as returned with used call_ids
+ if not input_rspec: continue
try:
tree = etree.parse(StringIO(input_rspec))
except etree.XMLSyntaxError:
if len(networks) == 1:
self.network = networks[0]
+ # Thierry : need this to locate hostname even if several networks
def get_node_element(self, hostname, network=None):
- if network == None:
+ if network == None and self.network:
network = self.network
- names = self.rspec.iterfind("./network[@name='%s']/site/node/hostname" % network)
+ if network != None:
+ names = self.rspec.iterfind("./network[@name='%s']/site/node/hostname" % network)
+ else:
+ names = self.rspec.iterfind("./network/site/node/hostname")
for name in names:
if name.text == hostname:
return name.getparent()
return None
+ # Thierry : need this to return all nodes in all networks
def get_node_list(self, network=None):
- if network == None:
+ if network == None and self.network:
network = self.network
- result = self.rspec.xpath("./network[@name='%s']/site/node/hostname/text()" % network)
- return result
+ if network != None:
+ return self.rspec.xpath("./network[@name='%s']/site/node/hostname/text()" % network)
+ else:
+ return self.rspec.xpath("./network/site/node/hostname/text()")
def get_network_list(self):
return self.rspec.xpath("./network[@name]/@name")
result = callable(*args, **kwds)
end = time.time()
args = map(str, args)
- args += ["%s = %s" % (name, str(value)) for (name, value) in kwds.items()]
+ args += ["%s = %s" % (name, str(value)) for (name, value) in kwds.iteritems()]
# should probably use debug, but then debug is not always enabled
logger.info("PROFILED %s (%s): %.02f s" % (callable.__name__, ", ".join(args), end - start))
return result
# implements SFA tickets
#
-### $Id$
-### $URL$
-
import xmlrpclib
from sfa.trust.certificate import Certificate
# SOAP-specific code for SFA Client
-import pdb
-from ZSI.client import Binding
from httplib import HTTPSConnection
+from ZSI.client import Binding
def xmlrpc_like_callable (soap_callable, *x):
soap_result = soap_callable(*x)
-### $Id$
-### $URL$
-
import os
from sfa.util.rspec import RecordSpec
-### $Id$
-### $URL$
#
# implements support for SFA records stored in db tables
#
# TODO: Use existing PLC database methods? or keep this separate?
-import pgdb
-
from sfa.util.PostgreSQL import *
from sfa.trust.gid import *
from sfa.util.record import *
def db_fields(self, obj=None):
db_fields = self.db.fields(self.SFA_TABLE_PREFIX)
- return dict( [ (key,value) for (key, value) in obj.items() \
+ return dict( [ (key,value) for (key, value) in obj.iteritems() \
if key in db_fields and
self.is_writable(key, value, SfaRecord.fields)] )
def insert(self, record):
db_fields = self.db_fields(record)
keys = db_fields.keys()
- values = [self.db.param(key, value) for (key, value) in db_fields.items()]
+ values = [self.db.param(key, value) for (key, value) in db_fields.iteritems()]
query_str = "INSERT INTO " + self.tablename + \
"(" + ",".join(keys) + ") " + \
"VALUES(" + ",".join(values) + ")"
def update(self, record):
db_fields = self.db_fields(record)
keys = db_fields.keys()
- values = [self.db.param(key, value) for (key, value) in db_fields.items()]
+ values = [self.db.param(key, value) for (key, value) in db_fields.iteritems()]
columns = ["%s = %s" % (key, value) for (key, value) in zip(keys, values)]
query_str = "UPDATE %s SET %s WHERE record_id = %s" % \
(self.tablename, ", ".join(columns), record['record_id'])
verbose = False
if options and options.debug:
verbose = True
- sfa_logger().info ("Connecting to xmlrpcserver at %s (with verbose=%s)"%(url,verbose))
+# sfa_logger().debug ("xmlrpcprotocol.XMLRPCServerProxy.__init__ %s (with verbose=%s)"%(url,verbose))
xmlrpclib.ServerProxy.__init__(self, url, transport, allow_none=allow_none, verbose=verbose)
def __getattr__(self, attr):
- sfa_logger().info ("Calling xml-rpc method:%s"%attr)
+ sfa_logger().debug ("xml-rpc %s method:%s"%(self.url,attr))
return xmlrpclib.ServerProxy.__getattr__(self, attr)
import os, time
import libxml2
-import pdb
from sfatables.globals import *
from sfatables.pretty import Pretty
import sys
import os
import pdb
+from optparse import OptionParser
+
import libxml2
-from optparse import OptionParser
from sfatables import commands
from sfatables.globals import *
from sfatables.commands.List import *
import libxml2
-import libxslt
+# allow to run sfa2wsdl if this is missing (for mac)
+import sys
+try:import libxslt
+except: print >>sys.stderr, "WARNING, could not import libxslt"
+
from sfatables.globals import *
class XMLRule:
pprint(tmack_components)
#print "removing %(slicehrn)s from all nodes" % locals()
-#a.delete_slice(cred, slicehrn)
+#a.DeleteSliver(cred, slicehrn)
print "adding %(slicehrn)s back to its original nodes" % locals()
a.list_resources(cred, slicehrn)
-a.create_slice(cred, slicehrn, components)
+a.CreateSliver(cred, slicehrn, components)
a.list_resources(cred, slicehrn)
BasicTestCase.setUp(self)
def testGetSlices(self):
- self.aggregate.get_slices(self.credential)
+ self.aggregate.ListSlices(self.credential)
def testGetResources(self):
# available resources
# get availabel resources
rspec = self.aggregate.get_resources(self.credential)
slice_credential = self.client.get_credential(self.slice['hrn'], 'slice')
- self.aggregate.create_slice(slice_credential, self.slice['hrn'], rspec)
+ self.aggregate.CreateSliver(slice_credential, self.slice['hrn'], rspec)
def testDeleteSlice(self):
slice_credential = self.client.get_credential(self.slice['hrn'], 'slice')
- self.aggregate.delete_slice(slice_credential, self.slice['hrn'])
+ self.aggregate.DeleteSliver(slice_credential, self.slice['hrn'],"call-id-delete-slice")
def testGetTicket(self):
slice_credential = self.client.get_credential(self.slice['hrn'], 'slice')
self.cm.stop_slice(self.slice_cred, self.slice['hrn'])
def testDeleteSlice(self):
- self.cm.delete_slice(self.slice_cred, self.slice['hrn'])
+ self.cm.DeleteSliver(self.slice_cred, self.slice['hrn'],"call-id-delete-slice-cm")
def testRestartSlice(self):
self.cm.restart_slice(self.slice_cred, self.slice['hrn'])
def testGetSlices(self):
- self.cm.get_slices(self.slice_cred, self.slice['hrn'])
+ self.cm.ListSlices(self.slice_cred, self.slice['hrn'])
def testRedeemTicket(self):
rspec = self.aggregate.get_resources(self.credential)
def test_names(testcase):
return [name for name in dir(testcase) if name.startswith('test')]
-def create_slice(client):
+def CreateSliver(client):
# register a slice that will be used for some test
authority = get_authority(client.hrn)
auth_cred = client.get_credential(authority, 'authority')
client.registry.register(auth_cred, slice_record)
return slice_record
-def delete_slice(cleint, slice):
+def DeleteSliver(client, slice):
authority = get_authority(client.hrn)
auth_cred = client.get_credential(authority, 'authority')
if slice:
# create the test slice if necessary
if options.all or options.slicemgr or options.aggregate \
or options.component:
- test_slice = create_slice(client)
+ test_slice = CreateSliver(client)
if options.registry or options.all:
for name in test_names(RegistryTest):
unittest.TextTestRunner(verbosity=2).run(suite)
# remove teset slice
- delete_slice(client, test_slice)
+ DeleteSliver(client, test_slice)
# attempt to update at build-time
-WSDLS = sfa.wsdl registry.wsdl slice-manager.wsdl aggregate.wsdl
+INTERFACES=sfa registry slice-manager aggregate
-all: $(WSDLS)
+WSDLS = $(foreach interface,$(INTERFACES),$(interface).wsdl)
+HTMLS = $(foreach interface,$(INTERFACES),$(interface).html)
+
+all: $(WSDLS) $(HTMLS)
# temporary: turn off wsdl generation as it is failing
#TEMPORARY_OFF = yes
registry.wsdl slice-manager.wsdl aggregate.wsdl sfa.wsdl:
touch $@
else
-registry.wsdl:
+registry.wsdl: sfa2wsdl.py
PYTHONPATH=../ ./sfa2wsdl.py --registry > $@
-slice-manager.wsdl:
+slice-manager.wsdl: sfa2wsdl.py
PYTHONPATH=../ ./sfa2wsdl.py --slice-manager > $@
-aggregate.wsdl:
+aggregate.wsdl: sfa2wsdl.py
PYTHONPATH=../ ./sfa2wsdl.py --aggregate > $@
-sfa.wsdl:
+sfa.wsdl: sfa2wsdl.py
PYTHONPATH=../ ./sfa2wsdl.py --registry --slice-manager --aggregate > $@
endif
install -c -m 0755 $(wsdl) $@
#################### clean
-clean:
- rm -f *wsdl
+clean::
+ rm -f $(WSDLS)
install-clean:
rm -f $(INSTALLED)
.PHONY: all clean install install-clean
+#################### generate html document
+%.html: %.wsdl wsdl2html.xsl
+ xsltproc $(XSLTFLAGS) wsdl2html.xsl $*.wsdl > $@ || rm $@
+
+clean::
+ rm -f $(HTMLS)
+
+
+backup:
+ $(foreach wsdl,$(WSDLS), cp $(wsdl) $(wsdl).bak;)
+ $(foreach html,$(HTMLS), cp $(html) $(html).bak;)
+
+diff:
+ $(foreach wsdl,$(WSDLS), diff $(wsdl) $(wsdl).bak;)
+ $(foreach html,$(HTMLS), diff $(html) $(html).bak;)
--- /dev/null
+<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">
+<html> <head>
+<title></title>
+</head>
+
+<body>
+<h1>The various SFA APIs in a myplc deployment</h1>
+<ul>
+ <li> <a href="registry.html"> The registry API.</a></li>
+ <li> <a href="aggregate.html"> The aggregate API</a></li>
+ <li> <a href="slice-manager.html"> The slice manager API</a></li>
+ <li> <a href="sfa.html"> All APIs in a single page</a></li>
+ </ul>
+
+</body> </html>
def __init__(self, interface_options):
self.interface_options = interface_options
+ def interface_name (self):
+ if self.interface_options.aggregate and \
+ self.interface_options.slicemgr and \
+ self.interface_options.registry:
+ return "complete"
+ if self.interface_options.aggregate: return "aggregate"
+ elif self.interface_options.slicemgr: return "slicemgr"
+ elif self.interface_options.registry: return "registry"
+ elif self.interface_options.component: return "component"
+ else: return "unknown"
+
def filter_argname(self,argname):
if (not self.interface_options.lite or (argname!="cred")):
if (argname.find('(') != -1):
argname = inbrack
return argname
- def fold_complex_type_names(self,acc, arg):
- name = arg.doc
- if (type(acc)==list):
- acc.append(name)
- else:
- p_i_b = acc.doc
- acc = [p_i_b,name]
- return acc
-
- def fold_complex_type(self,acc, arg):
- name = self.name_complex_type(arg)
- self.complex_types[arg]=name
- if (type(acc)==list):
- acc.append(name)
- else:
- p_i_b = self.name_complex_type(acc)
- acc = [p_i_b,name]
- return acc
+# def fold_complex_type_names(self,acc, arg):
+# name = arg.doc
+# if (type(acc)==list):
+# acc.append(name)
+# else:
+# p_i_b = acc.doc
+# acc = [p_i_b,name]
+# return acc
+#
+# def fold_complex_type(self,acc, arg):
+# name = self.name_complex_type(arg)
+# self.complex_types[arg]=name
+# if (type(acc)==list):
+# acc.append(name)
+# else:
+# p_i_b = self.name_complex_type(acc)
+# acc = [p_i_b,name]
+# return acc
def name_complex_type(self,arg):
#pdb.set_trace()
if (isinstance(arg, Mixed)):
- inner_types = reduce(self.fold_complex_type, arg)
- inner_names = reduce(self.fold_complex_type_names, arg)
+# inner_types = reduce(self.fold_complex_type, arg)
+# inner_names = reduce(self.fold_complex_type_names, arg)
+ inner_types = [ self.name_complex_type(x) for x in arg ]
+ inner_names = [ x.doc for x in arg ]
if (inner_types[-1]=="none"):
inner_types=inner_types[:-1]
min_args = 0
else:
min_args = 1
- self.num_types=self.num_types+1
+ self.num_types += 1
type_name = "Type%d"%self.num_types
complex_type = types_section.appendChild(self.types.createElement("xsd:complexType"))
complex_type.setAttribute("name", type_name)
choice = complex_type.appendChild(self.types.createElement("xsd:choice"))
- for n,t in zip(inner_names,inner_types):
+ for (n,t) in zip(inner_names,inner_types):
element = choice.appendChild(self.types.createElement("element"))
n = self.filter_argname(n)
element.setAttribute("name", n)
return "xsdl:%s"%type_name
elif (isinstance(arg, Parameter)):
return (self.name_simple_type(arg.type))
- elif type(arg) == ListType or type(arg) == TupleType:
+ elif type(arg) in ( ListType , TupleType ):
inner_type = self.name_complex_type(arg[0])
self.num_types=self.num_types+1
type_name = "Type%d"%self.num_types
#print
- in_el = self.wsdl.firstChild.appendChild(self.wsdl.createElement("message"))
+ in_el = self.wsdl.lastChild.appendChild(self.wsdl.createElement("message"))
in_el.setAttribute("name", method + "_in")
for service_name in function.interfaces:
# Return type
return_type = function.returns
- out_el = self.wsdl.firstChild.appendChild(self.wsdl.createElement("message"))
+ out_el = self.wsdl.lastChild.appendChild(self.wsdl.createElement("message"))
out_el.setAttribute("name", method + "_out")
ret_part = out_el.appendChild(self.wsdl.createElement("part"))
ret_part.setAttribute("name", "Result")
# Port connecting arguments with return type
- port_el = self.wsdl.firstChild.appendChild(self.wsdl.createElement("portType"))
+ port_el = self.wsdl.lastChild.appendChild(self.wsdl.createElement("portType"))
port_el.setAttribute("name", method + "_port")
op_el = port_el.appendChild(self.wsdl.createElement("operation"))
# Bindings
- bind_el = self.wsdl.firstChild.appendChild(self.wsdl.createElement("binding"))
+ bind_el = self.wsdl.lastChild.appendChild(self.wsdl.createElement("binding"))
bind_el.setAttribute("name", method + "_binding")
bind_el.setAttribute("type", "tns:" + method + "_port")
def add_wsdl_services(self):
for service in self.services.keys():
if (getattr(self.interface_options,service)):
- service_el = self.wsdl.firstChild.appendChild(self.wsdl.createElement("service"))
+ service_el = self.wsdl.lastChild.appendChild(self.wsdl.createElement("service"))
service_el.setAttribute("name", service)
for method in self.services[service]:
def compute_wsdl_definitions(self):
wsdl_text_header = """
+ <?xml-stylesheet type="text/xsl" href="wsdl2html.xsl"?>
<wsdl:definitions
- name="sfa_autogenerated"
- targetNamespace="%s/2009/07/sfa.wsdl"
+ name="myplc-sfa-%s"
+ targetNamespace="%s/sfa.wsdl"
xmlns="http://schemas.xmlsoap.org/wsdl/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
- xmlns:xsdl="%s/2009/07/schema"
- xmlns:tns="%s/2009/07/sfa.wsdl"
+ xmlns:xsdl="%s/schema"
+ xmlns:tns="%s/sfa.wsdl"
xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/"/>
- """ % (globals.plc_ns,globals.plc_ns,globals.plc_ns)
+ """ % (self.interface_name(),globals.plc_ns,globals.plc_ns,globals.plc_ns)
self.wsdl = xml.dom.minidom.parseString(wsdl_text_header)
def compute_wsdl_definitions_and_types(self):
wsdl_text_header = """
+ <?xml-stylesheet type="text/xsl" href="wsdl2html.xsl"?>
<wsdl:definitions
- name="sfa_autogenerated"
- targetNamespace="%s/2009/07/sfa.wsdl"
+ name="myplc-sfa-%s"
+ targetNamespace="%s/sfa.wsdl"
xmlns="http://schemas.xmlsoap.org/wsdl/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
- xmlns:xsdl="%s/2009/07/schema"
- xmlns:tns="%s/2009/07/sfa.wsdl"
+ xmlns:xsdl="%s/schema"
+ xmlns:tns="%s/sfa.wsdl"
xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/">
<types>
- <xsd:schema xmlns="http://www.w3.org/2001/XMLSchema" targetNamespace="%s/2009/07/schema"/>
+ <xsd:schema xmlns="http://www.w3.org/2001/XMLSchema" targetNamespace="%s/schema"/>
</types>
- </wsdl:definitions> """ % (globals.plc_ns, globals.plc_ns, globals.plc_ns, globals.plc_ns)
+ </wsdl:definitions> """ % (self.interface_name(),globals.plc_ns, globals.plc_ns, globals.plc_ns, globals.plc_ns)
self.types = xml.dom.minidom.parseString(wsdl_text_header)
def add_wsdl_types(self):
wsdl_types = self.wsdl.importNode(self.types.getElementsByTagName("types")[0], True)
- self.wsdl.firstChild.appendChild(wsdl_types)
+ self.wsdl.lastChild.appendChild(wsdl_types)
def generate_wsdl(self):
self.compute_wsdl_definitions_and_types()
--- /dev/null
+<?xml version="1.0" encoding="ISO-8859-1"?>
+<xsl:stylesheet
+version="1.0"
+xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/"
+xmlns:fn="http://www.w3.org/2005/02/xpath-functions"
+>
+<xsl:output method="html" />
+
+<!-- to locate the out message -->
+<xsl:key name="messages" match="wsdl:message" use="@name"/>
+
+<xsl:template match="wsdl:definitions">
+<html>
+<head> <title> <xsl:value-of select="./@name" /> </title> </head>
+<body>
+<table border='1'><tr><th>method</th><th>in</th><th>out</th></tr>
+<xsl:apply-templates mode="messages" />
+</table>
+</body>
+</html>
+</xsl:template>
+
+<xsl:template match="wsdl:message" mode="messages">
+<xsl:variable name="methodname" select="substring-before(@name,'_in')" />
+<xsl:variable name="outmessage" select="concat($methodname,'_out')" />
+<xsl:if test="contains(@name,'_in')">
+<tr>
+<td>
+<xsl:value-of select="$methodname" />
+</td>
+<td>
+<xsl:for-each select="wsdl:part">
+<xsl:value-of select="@name" />
+(<xsl:value-of select="@type" />)
+<xsl:text> </xsl:text>
+</xsl:for-each>
+</td>
+<td>
+<xsl:value-of select="key('messages',$outmessage)/wsdl:part/@type" />
+
+</td>
+</tr>
+</xsl:if>
+</xsl:template>
+
+</xsl:stylesheet>
+