Merge branch 'senslab2' of ssh://flab-git.pl.sophia.inria.fr/git/sfa into senslab2
authorSandrine Avakian <sandrine.avakian@inria.fr>
Mon, 14 Nov 2011 10:51:40 +0000 (11:51 +0100)
committerSandrine Avakian <sandrine.avakian@inria.fr>
Mon, 14 Nov 2011 10:51:40 +0000 (11:51 +0100)
Conflicts:
sfa/managers/registry_manager_slab.py

195 files changed:
.gitignore
INSTALL.txt
Makefile
config/default_config.xml
config/topology [new file with mode: 0644]
docs/Makefile
setup.py
sfa.spec
sfa/client/Makefile
sfa/client/getNodes.py
sfa/client/getRecord.py
sfa/client/setRecord.py
sfa/client/sfadump.py
sfa/client/sfascan.py
sfa/client/sfi.py
sfa/client/sfiAddAttribute.py
sfa/client/sfiAddLinks.py [new file with mode: 0755]
sfa/client/sfiAddSliver.py
sfa/client/sfiListLinks.py [new file with mode: 0755]
sfa/client/sfi_commands.py
sfa/client/xmlrpcprotocol.py [moved from sfa/util/xmlrpcprotocol.py with 96% similarity]
sfa/generic/__init__.py [new file with mode: 0644]
sfa/generic/architecture.txt [new file with mode: 0644]
sfa/generic/pl.py [new file with mode: 0644]
sfa/generic/slab.py [new file with mode: 0644]
sfa/importer/__init__.py [moved from sfa/managers/vini/__init__.py with 100% similarity]
sfa/importer/sfa-import-plc.py [moved from sfa/plc/sfa-import-plc.py with 96% similarity]
sfa/importer/sfa-nuke-plc.py [moved from sfa/plc/sfa-nuke-plc.py with 97% similarity]
sfa/importer/sfaImport.py [moved from sfa/plc/sfaImport.py with 98% similarity]
sfa/init.d/sfa
sfa/init.d/sfa-cm
sfa/managers/aggregate_manager.py [new file with mode: 0644]
sfa/managers/aggregate_manager_eucalyptus.py
sfa/managers/aggregate_manager_max.py
sfa/managers/aggregate_manager_openflow.py [deleted file]
sfa/managers/aggregate_manager_pl.py [deleted file]
sfa/managers/aggregate_manager_slab.py
sfa/managers/aggregate_manager_vini.py [deleted file]
sfa/managers/component_manager_pl.py
sfa/managers/managerwrapper.py [new file with mode: 0644]
sfa/managers/registry_manager.py [moved from sfa/managers/registry_manager_pl.py with 83% similarity]
sfa/managers/registry_manager_slab.py
sfa/managers/slice_manager.py [new file with mode: 0644]
sfa/managers/slice_manager_pl.py [deleted file]
sfa/managers/slice_manager_slab.py
sfa/managers/vini/request.xml [deleted file]
sfa/managers/vini/topology.py [deleted file]
sfa/managers/vini/utils.py [deleted file]
sfa/managers/vini/vini.rnc [deleted file]
sfa/managers/vini/vini.rng [deleted file]
sfa/managers/vini/vini.xml [deleted file]
sfa/managers/vini/vini_network.py [deleted file]
sfa/methods/CreateGid.py
sfa/methods/CreateSliver.py
sfa/methods/DeleteSliver.py
sfa/methods/GetCredential.py
sfa/methods/GetGids.py
sfa/methods/GetSelfCredential.py
sfa/methods/GetTicket.py
sfa/methods/GetVersion.py
sfa/methods/List.py
sfa/methods/ListResources.py
sfa/methods/ListSlices.py
sfa/methods/RedeemTicket.py
sfa/methods/Register.py
sfa/methods/RegisterPeerObject.py
sfa/methods/Remove.py
sfa/methods/RemovePeerObject.py
sfa/methods/RenewSliver.py
sfa/methods/Resolve.py
sfa/methods/ResolveGENI.py
sfa/methods/Shutdown.py
sfa/methods/SliverStatus.py
sfa/methods/Start.py
sfa/methods/Stop.py
sfa/methods/Update.py
sfa/methods/UpdateSliver.py
sfa/methods/get_aggregates.py
sfa/methods/get_key.py
sfa/methods/get_registries.py
sfa/methods/get_trusted_certs.py
sfa/methods/register_peer_object.py
sfa/methods/remove_peer_object.py
sfa/methods/reset_slice.py
sfa/plc/aggregate.py
sfa/plc/network.py
sfa/plc/nodemanager.py [moved from sfa/util/nodemanager.py with 100% similarity]
sfa/plc/peers.py
sfa/plc/plcomponentdriver.py [new file with mode: 0644]
sfa/plc/pldriver.py [moved from sfa/plc/api.py with 50% similarity]
sfa/plc/plshell.py [new file with mode: 0644]
sfa/plc/slices.py
sfa/plc/vlink.py [new file with mode: 0644]
sfa/rspecs/elements/bwlimit.py [new file with mode: 0644]
sfa/rspecs/elements/component_manager.py [new file with mode: 0644]
sfa/rspecs/elements/disk_image.py [new file with mode: 0644]
sfa/rspecs/elements/element.py
sfa/rspecs/elements/execute.py [new file with mode: 0644]
sfa/rspecs/elements/hardware_type.py [new file with mode: 0644]
sfa/rspecs/elements/install.py [new file with mode: 0644]
sfa/rspecs/elements/interface.py [new file with mode: 0644]
sfa/rspecs/elements/link.py
sfa/rspecs/elements/link_type.py [new file with mode: 0644]
sfa/rspecs/elements/location.py [new file with mode: 0644]
sfa/rspecs/elements/login.py [new file with mode: 0644]
sfa/rspecs/elements/network.py
sfa/rspecs/elements/node.py
sfa/rspecs/elements/pltag.py [new file with mode: 0644]
sfa/rspecs/elements/property.py [new file with mode: 0644]
sfa/rspecs/elements/services.py [new file with mode: 0644]
sfa/rspecs/elements/sliver.py
sfa/rspecs/elements/tag.py [new file with mode: 0644]
sfa/rspecs/elements/versions/__init__.py [new file with mode: 0644]
sfa/rspecs/elements/versions/element_version.py [new file with mode: 0644]
sfa/rspecs/elements/versions/pgv2Link.py [new file with mode: 0644]
sfa/rspecs/elements/versions/pgv2Node.py [new file with mode: 0644]
sfa/rspecs/elements/versions/pgv2Services.py [new file with mode: 0644]
sfa/rspecs/elements/versions/pgv2SliverType.py [new file with mode: 0644]
sfa/rspecs/elements/versions/sfav1Network.py [new file with mode: 0644]
sfa/rspecs/elements/versions/sfav1Node.py [new file with mode: 0644]
sfa/rspecs/elements/versions/sfav1Sliver.py [new file with mode: 0644]
sfa/rspecs/pg_rspec_converter.py
sfa/rspecs/rspec.py
sfa/rspecs/rspec_elements.py
sfa/rspecs/sfa_rspec_converter.py
sfa/rspecs/versions/pgv2.py
sfa/rspecs/versions/sfav1.py
sfa/rspecs/xml.py [deleted file]
sfa/rspecs/xml_interface.py [deleted file]
sfa/senslab/LDAPapi.py
sfa/senslab/slabdriver.py [moved from sfa/senslab/api.py with 60% similarity]
sfa/senslab/table_slab.py
sfa/server/aggregate.py
sfa/server/component.py
sfa/server/interface.py
sfa/server/modpython/SfaAggregateModPython.py
sfa/server/modpython/SfaRegistryModPython.py
sfa/server/modpython/SfaSliceMgrModPython.py
sfa/server/registry.py
sfa/server/sfa-ca.py
sfa/server/sfa-clean-peer-records.py
sfa/server/sfa-start.py [moved from sfa/server/sfa-server.py with 86% similarity]
sfa/server/sfa_component_setup.py
sfa/server/sfaapi.py [new file with mode: 0644]
sfa/server/sfaserver.py [new file with mode: 0644]
sfa/server/slicemgr.py
sfa/server/threadedserver.py [moved from sfa/util/server.py with 80% similarity]
sfa/server/threadmanager.py [moved from sfa/util/threadmanager.py with 100% similarity]
sfa/server/xmlrpcapi.py [moved from sfa/util/api.py with 55% similarity]
sfa/trust/auth.py
sfa/trust/certificate.py
sfa/trust/credential.py
sfa/trust/credential_legacy.py
sfa/trust/gid.py
sfa/trust/hierarchy.py
sfa/trust/sfaticket.py [moved from sfa/util/sfaticket.py with 98% similarity]
sfa/util/PostgreSQL.py
sfa/util/bwlimit.py [deleted file]
sfa/util/cache.py
sfa/util/componentserver.py [deleted file]
sfa/util/config.py
sfa/util/defaultdict.py [new file with mode: 0644]
sfa/util/faults.py
sfa/util/filter.py
sfa/util/httpsProtocol.py [deleted file]
sfa/util/method.py
sfa/util/parameter.py
sfa/util/plxrn.py
sfa/util/policy.py
sfa/util/record.py
sfa/util/rspec.py [deleted file]
sfa/util/rspecHelper.py [deleted file]
sfa/util/sfalogging.py
sfa/util/soapprotocol.py [deleted file]
sfa/util/specdict.py [deleted file]
sfa/util/ssl_socket.py [deleted file]
sfa/util/storage.py
sfa/util/table.py
sfa/util/topology.py [new file with mode: 0644]
sfa/util/xml.py [new file with mode: 0755]
sfa/util/xrn.py
sfatables/commands/Add.py
sfatables/commands/Delete.py
sfatables/commands/Insert.py
sfatables/commands/List.py
sfatables/runtime.py
sfatables/sfatables
sfatables/xmlextension.py
sfatables/xmlrule.py
tests/client/README [deleted file]
tests/testInterfaces.py
tools/Makefile [new file with mode: 0644]
tools/depgraph2dot.py [new file with mode: 0755]
tools/py2depgraph.py [new file with mode: 0755]
tools/readme [new file with mode: 0644]

index 9acfb22..7e5b62e 100644 (file)
@@ -15,3 +15,4 @@ sfa/client/*.version
 *.pkey
 *.cert
 *.cred
+.DS_Store
index d748883..cc59ceb 100644 (file)
@@ -63,9 +63,9 @@ This will initialize /etc/sfa/authorities/server.key from /etc/sfa/authorities/p
 This will start Registry, Slice Manager and Aggregate Manager. Your ps command output would look like:
 
 # ps -ef | grep python
-root     24944     1  0 May11 ?        00:00:00 /usr/bin/python /usr/bin/sfa-server.py -r -d
-root     24957     1  0 May11 ?        00:00:00 /usr/bin/python /usr/bin/sfa-server.py -a -d
-root     24970     1  0 May11 ?        00:00:00 /usr/bin/python /usr/bin/sfa-server.py -s -d
+root     24944     1  0 May11 ?        00:00:00 /usr/bin/python /usr/bin/sfa-start.py -r -d
+root     24957     1  0 May11 ?        00:00:00 /usr/bin/python /usr/bin/sfa-start.py -a -d
+root     24970     1  0 May11 ?        00:00:00 /usr/bin/python /usr/bin/sfa-start.py -s -d
 -------
 4) Configure SFA client:
 
index f162f5f..72fa41a 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -15,10 +15,13 @@ uninstall: python-uninstall tests-uninstall
 
 .PHONY: all install clean uninstall
 
-VERSIONTAG=0.0-0-should.be-redefined-by-specfile
+##########
+rpmversion:=$(shell rpm -q --specfile sfa.spec --queryformat="%{version}\n" | head -1)
+# somehow %{taglevel} is empty, turns out %{release} has what we want
+rpmtaglevel:=$(shell rpm -q --specfile sfa.spec --queryformat="%{release}\n" 2> /dev/null | head -1)
+VERSIONTAG=$(rpmversion)-$(rpmtaglevel)
 SCMURL=should-be-redefined-by-specfile
 
-##########
 python: version
 
 version: sfa/util/version.py
@@ -87,7 +90,7 @@ force:
 
 ##########
 tags:  
-       find . -type f | egrep -v '/\.git/|/\.svn/|TAGS|\.py[co]$$|\.doc$$|\.html$$|\.pdf$$|~$$|\.png$$|\.svg$$|\.out$$|\.bak$$|\.xml$$' | xargs etags
+       find . -type f | egrep -v '/\.git/|/\.svn/|TAGS|~$$|\.(py[co]|doc|html|pdf|png|svg|out|bak|xml|dg)$$' | xargs etags
 .PHONY: tags
 
 signatures:
@@ -125,7 +128,7 @@ sfiAddAttribute.py sfiAddSliver.py sfiDeleteAttribute.py sfiDeleteSliver.py sfiL
 sfiListSlivers.py sfadump.py
 
 BINS = ./config/sfa-config-tty ./config/gen-sfa-cm-config.py \
-       ./sfa/plc/sfa-import-plc.py ./sfa/plc/sfa-nuke-plc.py ./sfa/server/sfa-server.py \
+       ./sfa/importer/sfa-import-plc.py ./sfa/importer/sfa-nuke-plc.py ./sfa/server/sfa-start.py \
        $(foreach client,$(CLIENTS),./sfa/client/$(client))
 
 sync:
@@ -137,7 +140,9 @@ ifeq (,$(SSHURL))
 else
        +$(RSYNC) ./sfa/ $(SSHURL)/usr/lib\*/python2.\*/site-packages/sfa/
        +$(RSYNC) ./tests/ $(SSHURL)/root/tests-sfa
-       +$(RSYNC)  $(BINS) $(SSHURL)/usr/bin
+       +$(RSYNC)  $(BINS) $(SSHURL)/usr/bin/
+       +$(RSYNC) ./sfa/init.d/sfa  $(SSHURL)/etc/init.d/
+       +$(RSYNC) ./config/default_config.xml $(SSHURL)/etc/sfa/
        $(SSHCOMMAND) exec service sfa restart
 endif
 
index 212dee4..670d6f2 100644 (file)
@@ -18,6 +18,15 @@ Thierry Parmentelat
       <description>Basic system variables.</description>
 
       <variablelist>
+       <variable id="generic_flavour" type="string">
+         <name>Generic Flavour</name>
+         <value>pl</value>
+         <description>This string refers to a class located in sfa.generic that describes 
+         which specific implementation needs to be used for api, manager and driver objects.
+         PlanetLab users do not need to change this setting.
+         </description>
+       </variable>
+
         <variable id="interface_hrn" type="string">
           <name>Human readable name</name>
           <value>plc</value>
@@ -49,9 +58,10 @@ Thierry Parmentelat
             it look like the user is the one performing the operation. Doing this requires a 
             valid key pair and credential for the user. This option defines the path where 
             key pairs and credentials are generated and stored.
-            This functionality is used by the SFA web gui 
+            This functionality is used by the SFA web GUI. 
             </description> 
         </variable>
+
       </variablelist>
     </category>
 
diff --git a/config/topology b/config/topology
new file mode 100644 (file)
index 0000000..24a8e13
--- /dev/null
@@ -0,0 +1,20 @@
+# Links in the physical topology, gleaned from looking at the Internet2
+# topology map.  Link (a, b) connects sites with IDs a and b.
+#
+# 2 12  # I2 Princeton - New York
+# 11 13 # I2 Chicago - Wash DC
+# 11 15 # I2 Chicago - Atlanta
+# 11 16 # I2 Chicago - CESNET
+# 11 17 # I2 Chicago - Kansas City
+# 12 13 # I2 New York - Wash DC
+# 13 15 # I2 Wash DC - Atlanta
+# 14 15 # Ga Tech - I2 Atlanta
+# 15 19 # I2 Atlanta - Houston
+# 17 19 # I2 Kansas City - Houston
+# 17 22 # I2 Kansas City - Salt Lake City
+# 17 24 # I2 Kansas City - UMKC
+# 19 20 # I2 Houston - Los Angeles
+# 20 21 # I2 Los Angeles - Seattle
+# 20 22 # I2 Los Angeles - Salt Lake City
+# 21 22 # I2 Seattle - Salt Lake City
+
index 463dbaf..5f34949 100644 (file)
@@ -3,7 +3,7 @@ doc:
        pythondoc.py ../sfa/util/certificate.py ../sfa/util/credential.py ../sfa/util/gid.py \
                      ../sfa/util/rights.py ../sfa/util/config.py ../sfa/trust/hierarchy.py \
                      ../sfa/util/record.py ../sfa/util/client.py \
-                     ../sfa/util/server.py 
+                     ../sfa/server/sfaserver.py 
 
        pythondoc.py ../sfa/registry/registry.py ../sfa/registry/import.py \
                      ../sfa/registry/nuke.py
index 921139c..c3285eb 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -12,10 +12,10 @@ from distutils.core import setup
 bins = [ 
     'config/sfa-config-tty',
     'config/gen-sfa-cm-config.py',
-    'sfa/plc/sfa-import-plc.py', 
-    'sfa/plc/sfa-nuke-plc.py', 
+    'sfa/importer/sfa-import-plc.py', 
+    'sfa/importer/sfa-nuke-plc.py', 
     'sfa/server/sfa-ca.py', 
-    'sfa/server/sfa-server.py', 
+    'sfa/server/sfa-start.py', 
     'sfa/server/sfa-clean-peer-records.py', 
     'sfa/server/sfa_component_setup.py', 
     'sfa/client/sfi.py', 
@@ -36,17 +36,19 @@ bins = [
 
 package_dirs = [
     'sfa', 
+    'sfa/trust',
+    'sfa/util', 
     'sfa/client',
+    'sfa/server',
     'sfa/methods',
+    'sfa/generic',
+    'sfa/managers',
+    'sfa/importer',
     'sfa/plc',
     'sfa/senslab',
-    'sfa/server',
-    'sfa/trust',
-    'sfa/util', 
-    'sfa/managers',
-    'sfa/managers/vini',
     'sfa/rspecs',
     'sfa/rspecs/elements',
+    'sfa/rspecs/elements/versions',
     'sfa/rspecs/versions',
     'sfatables',
     'sfatables/commands',
@@ -59,6 +61,7 @@ data_files = [('/etc/sfa/', [ 'config/aggregates.xml',
                               'config/registries.xml',
                               'config/default_config.xml',
                               'config/sfi_config',
+                              'config/topology',
                               'sfa/managers/pl/pl.rng',
                               'sfa/trust/credential.xsd',
                               'sfa/trust/top.xsd',
index 95e56a3..55b6ec3 100644 (file)
--- a/sfa.spec
+++ b/sfa.spec
@@ -1,6 +1,6 @@
 %define name sfa
-%define version 1.0
-%define taglevel 36
+%define version 1.1
+%define taglevel 2
 
 %define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}}
 %global python_sitearch        %( python -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)" )
@@ -46,13 +46,13 @@ Requires: python-dateutil
 #%endif
 
 %package cm
-Summary: the SFA wrapper around MyPLC NodeManager
+Summary: the SFA layer around MyPLC NodeManager
 Group: Applications/System
 Requires: sfa
 Requires: pyOpenSSL >= 0.6
 
 %package plc
-Summary: the SFA wrapper arounf MyPLC
+Summary: the SFA layer around MyPLC
 Group: Applications/System
 Requires: sfa
 Requires: python-psycopg2
@@ -121,7 +121,7 @@ rm -rf $RPM_BUILD_ROOT
 
 %files
 # sfa and sfatables depend each other.
-%{_bindir}/sfa-server.py*
+%{_bindir}/sfa-start.py*
 /etc/sfatables/*
 %{python_sitelib}/*
 %{_bindir}/keyconvert.py*
@@ -144,6 +144,7 @@ rm -rf $RPM_BUILD_ROOT
 /etc/sfa/sig.xsd
 /etc/sfa/xml.xsd
 /etc/sfa/protogeni-rspec-common.xsd
+/etc/sfa/topology
 %{_bindir}/sfa-config-tty
 %{_bindir}/sfa-import-plc.py*
 %{_bindir}/sfa-clean-peer-records.py*
@@ -196,6 +197,19 @@ fi
 [ "$1" -ge "1" ] && service sfa-cm restart || :
 
 %changelog
+* Mon Nov 07 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-1.1-2
+- checkpoint tag: use SFA_GENERIC_FLAVOUR instead of SFA_*_TYPE
+- improvements in the pgv2 rspecs
+- driver separated from api
+- code starts moving around where it belongs
+- sfascan caches getversion across invokations
+- vini topology extracted as a config file
+
+* Fri Oct 28 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-1.1-1
+- first support for protogeni rspecs is working
+- vini no longer needs a specific manager
+- refactoring underway towards more flexible/generic architecture
+
 * Thu Sep 15 2011 Tony Mack <tmack@cs.princeton.edu> - sfa-1.0-36
 - Unicode-friendliness for user names with accents/special chars.
 - Fix bug that could cause create the client to fail when calling CreateSliver for a slice that has the same hrn as a user.
index af366fc..061e1b2 100644 (file)
@@ -1,5 +1,6 @@
 # recompute the SFA graphs from different locations
 
+## use -vv for the full story
 SFASCAN = ./sfascan.py -v
 
 # AMs, at least MyPLC AMs, are boring
@@ -40,8 +41,8 @@ BUNDLES-LR += http://www.planet-lab.jp:12347/@auto-plj-sa
 BUNDLES-LR += http://www.emanicslab.org:12345/@auto-elc-reg 
 BUNDLES-LR += http://www.emanicslab.org:12347/@auto-elc-sa
 
-#EXTENSIONS := png svg
-EXTENSIONS := png
+EXTENSIONS := png svg
+#EXTENSIONS := png
 
 ####################
 ALL += $(foreach bundle,$(BUNDLES),$(word 2,$(subst @, ,$(bundle))))
@@ -89,8 +90,9 @@ $(foreach bundle,$(BUNDLES),$(eval $(call bundle_version_target,$(bundle))))
 
 ####################
 clean:
-       rm -f auto-*.{out,version}
+       rm -f .auto*
        $(foreach extension,$(EXTENSIONS),rm -rf auto-*.$(extension);)
+       $(SFASCAN) -c
 
 DATE=$(shell date '+%Y-%m-%d')
 PUBEXTENSIONS=png
index 67f9a28..71d17f0 100644 (file)
@@ -6,8 +6,6 @@ from optparse import OptionParser
 from pprint import pprint
 from types import StringTypes
 
-from sfa.util.rspec import RSpec
-
 def create_parser():
     command = sys.argv[0]
     argv = sys.argv[1:]
index cb765e0..e2be593 100755 (executable)
@@ -14,9 +14,7 @@ import os
 from optparse import OptionParser
 from pprint import pprint
 from xml.parsers.expat import ExpatError
-
-from sfa.util.rspec import RecordSpec
-
+from sfa.util.xml import XML    
 
 def create_parser():
     command = sys.argv[0]
@@ -34,17 +32,17 @@ def create_parser():
     return parser    
 
 
-def printRec(record, filters, options):
+def printRec(record_dict, filters, options):
     line = ""
     if len(filters):
         for filter in filters:
             if options.DEBUG:  print "Filtering on %s" %filter
             line += "%s: %s\n" % (filter, 
-                printVal(record.dict["record"].get(filter, None)))
+                printVal(record_dict.get(filter, None)))
         print line
     else:
         # print the wole thing
-        for (key, value) in record.dict["record"].iteritems():
+        for (key, value) in record_dict.iteritems():
             if (not options.withkey and key in ('gid', 'keys')) or\
                 (not options.plinfo and key == 'pl_info'):
                 continue
@@ -69,16 +67,14 @@ def main():
 
     stdin = sys.stdin.read()
     
-    record = RecordSpec(xml = stdin)
+    record = XML(stdin)
+    record_dict = record.todict()
     
-    if not record.dict.has_key("record"):
-        raise "RecordError", "Input record does not have 'record' tag."
-
     if options.DEBUG: 
-        record.pprint()
+        pprint(record.toxml())
         print "#####################################################"
 
-    printRec(record, args, options)
+    printRec(record_dict, args, options)
 
 if __name__ == '__main__':
     try: main()
index 5f48e68..405c90d 100755 (executable)
@@ -14,9 +14,7 @@ sys.path.append('.')
 import os
 from optparse import OptionParser
 from pprint import pprint
-
-from sfa.util.rspec import RecordSpec
-
+from sfa.util.xml import XML
 
 def create_parser():
     command = sys.argv[0]
@@ -92,15 +90,14 @@ def main():
     parser = create_parser(); 
     (options, args) = parser.parse_args()
 
-    record = RecordSpec(xml = sys.stdin.read())
-
+    record = XML(sys.stdin.read())
+    record_dict = record.todict()
     if args:
-        editDict(args, record.dict["record"], options)
+        editDict(args, record_dict, options)
     if options.DEBUG:
-        print "New Record:\n%s" % record.dict
-        record.pprint()
-
-    record.parseDict(record.dict)
+        print "New Record:\n%s" % record_dict
+        
+    record.parse_dict(record_dict)
     s = record.toxml()
     sys.stdout.write(s)
 
index 54654f8..52a9105 100755 (executable)
@@ -12,7 +12,6 @@ from sfa.trust.certificate import Certificate
 from sfa.trust.credential import Credential
 from sfa.trust.gid import GID
 from sfa.util.record import SfaRecord
-from sfa.util.rspec import RSpec
 from sfa.util.sfalogging import logger
 
 def determine_sfa_filekind(fn):
index 494a727..f252378 100755 (executable)
@@ -1,6 +1,8 @@
 #!/usr/bin/env python
 
-import sys
+import sys, os.path
+import pickle
+import time
 import socket
 import traceback
 from urlparse import urlparse
@@ -11,7 +13,7 @@ from optparse import OptionParser
 
 from sfa.client.sfi import Sfi
 from sfa.util.sfalogging import logger, DEBUG
-import sfa.util.xmlrpcprotocol as xmlrpcprotocol
+import sfa.client.xmlrpcprotocol as xmlrpcprotocol
 
 def url_hostname_port (url):
     if url.find("://")<0:
@@ -28,11 +30,82 @@ def url_hostname_port (url):
     else:
         return (url,parts[0],parts[1])
 
+### a very simple cache mechanism so that successive runs (see make) 
+### will go *much* faster
+### assuming everything is sequential, as simple as it gets
+### { url -> (timestamp,version)}
+class VersionCache:
+    def __init__ (self, filename=None, expires=60*60):
+        # default is to store cache in the same dir as argv[0]
+        if filename is None:
+            filename=os.path.join(os.path.dirname(sys.argv[0]),"sfascan-version-cache.pickle")
+        self.filename=filename
+        self.expires=expires
+        self.url2version={}
+        self.load()
+
+    def load (self):
+        try:
+            infile=file(self.filename,'r')
+            self.url2version=pickle.load(infile)
+            infile.close()
+        except:
+            logger.debug("Cannot load version cache, restarting from scratch")
+            self.url2version = {}
+        logger.debug("loaded version cache with %d entries %s"%(len(self.url2version),self.url2version.keys()))
+
+    def save (self):
+        try:
+            outfile=file(self.filename,'w')
+            pickle.dump(self.url2version,outfile)
+            outfile.close()
+        except:
+            logger.log_exc ("Cannot save version cache into %s"%self.filename)
+    def clean (self):
+        try:
+            retcod=os.unlink(self.filename)
+            logger.info("Cleaned up version cache %s, retcod=%d"%(self.filename,retcod))
+        except:
+            logger.info ("Could not unlink version cache %s"%self.filename)
+
+    def show (self):
+        entries=len(self.url2version)
+        print "version cache from file %s has %d entries"%(self.filename,entries)
+        key_values=self.url2version.items()
+        def old_first (kv1,kv2): return int(kv1[1][0]-kv2[1][0])
+        key_values.sort(old_first)
+        for key_value in key_values:
+            (url,tuple) = key_value
+            (timestamp,version) = tuple
+            how_old = time.time()-timestamp
+            if how_old<=self.expires:
+                print url,"-- %d seconds ago"%how_old
+            else:
+                print "OUTDATED",url,"(%d seconds ago, expires=%d)"%(how_old,self.expires)
+    
+    # turns out we might have trailing slashes or not
+    def normalize (self, url):
+        return url.strip("/")
+        
+    def set (self,url,version):
+        url=self.normalize(url)
+        self.url2version[url]=( time.time(), version)
+    def get (self,url):
+        url=self.normalize(url)
+        try:
+            (timestamp,version)=self.url2version[url]
+            how_old = time.time()-timestamp
+            if how_old<=self.expires: return version
+            else: return None
+        except:
+            return None
+
 ###
 class Interface:
 
-    def __init__ (self,url):
+    def __init__ (self,url,verbose=False):
         self._url=url
+        self.verbose=verbose
         try:
             (self._url,self.hostname,self.port)=url_hostname_port(url)
             self.ip=socket.gethostbyname(self.hostname)
@@ -54,13 +127,21 @@ class Interface:
 
     # connect to server and trigger GetVersion
     def get_version(self):
+        ### if we already know the answer:
         if self.probed:
             return self._version
+        ### otherwise let's look in the cache file
+        logger.debug("searching in version cache %s"%self.url())
+        cached_version = VersionCache().get(self.url())
+        if cached_version is not None:
+            logger.info("Retrieved version info from cache")
+            return cached_version
+        ### otherwise let's do the hard work
         # dummy to meet Sfi's expectations for its 'options' field
         class DummyOptions:
             pass
         options=DummyOptions()
-        options.verbose=False
+        options.verbose=self.verbose
         options.timeout=10
         try:
             client=Sfi(options)
@@ -68,13 +149,22 @@ class Interface:
             key_file = client.get_key_file()
             cert_file = client.get_cert_file(key_file)
             url=self.url()
-            logger.info('issuing get version at %s'%url)
-            logger.debug("GetVersion, using timeout=%d"%options.timeout)
-            server=xmlrpcprotocol.get_server(url, key_file, cert_file, timeout=options.timeout, verbose=options.verbose)
+            logger.info('issuing GetVersion at %s'%url)
+            # setting timeout here seems to get the call to fail - even though the response time is fast
+            #server=xmlrpcprotocol.server_proxy(url, key_file, cert_file, verbose=self.verbose, timeout=options.timeout)
+            server=xmlrpcprotocol.server_proxy(url, key_file, cert_file, verbose=self.verbose)
             self._version=server.GetVersion()
         except:
+            logger.log_exc("failed to get version")
             self._version={}
+        # so that next run from this process will find out
         self.probed=True
+        # store in version cache so next processes will remember for an hour
+        cache=VersionCache()
+        cache.set(self.url(),self._version)
+        cache.save()
+        logger.debug("Saved version for url=%s in version cache"%self.url())
+        # that's our result
         return self._version
 
     @staticmethod
@@ -155,19 +245,18 @@ class SfaScan:
         while to_scan:
             for interface in to_scan:
                 # performing xmlrpc call
+                logger.info("retrieving/fetching version at interface %s"%interface.url())
                 version=interface.get_version()
-                if self.verbose:
-                    logger.info("GetVersion at interface %s"%interface.url())
-                    if not version:
-                        logger.info("<EMPTY GetVersion(); offline or cannot authenticate>")
-                    else: 
-                        for (k,v) in version.iteritems(): 
-                            if not isinstance(v,dict):
-                                logger.info("\r\t%s:%s"%(k,v))
-                            else:
-                                logger.info(k)
-                                for (k1,v1) in v.iteritems():
-                                    logger.info("\r\t\t%s:%s"%(k1,v1))
+                if not version:
+                    logger.info("<EMPTY GetVersion(); offline or cannot authenticate>")
+                else: 
+                    for (k,v) in version.iteritems(): 
+                        if not isinstance(v,dict):
+                            logger.debug("\r\t%s:%s"%(k,v))
+                        else:
+                            logger.debug(k)
+                            for (k1,v1) in v.iteritems():
+                                logger.debug("\r\t\t%s:%s"%(k1,v1))
                 # 'geni_api' is expected if the call succeeded at all
                 # 'peers' is needed as well as AMs typically don't have peers
                 if 'geni_api' in version and 'peers' in version: 
@@ -206,21 +295,35 @@ def main():
                       help="output filenames (cumulative) - defaults are %r"%default_outfiles)
     parser.add_option("-l","--left-to-right",action="store_true",dest="left_to_right",default=False,
                       help="instead of top-to-bottom")
-    parser.add_option("-v","--verbose",action='store_true',dest='verbose',default=False,
-                      help="verbose")
-    parser.add_option("-d","--debug",action='store_true',dest='debug',default=False,
-                      help="debug")
+    parser.add_option("-v", "--verbose", action="count", dest="verbose", default=0,
+                      help="verbose - can be repeated for more verbosity")
+    parser.add_option("-c", "--clean-cache",action='store_true',
+                      dest='clean_cache',default=False,
+                      help='clean/trash version cache and exit')
+    parser.add_option("-s","--show-cache",action='store_true',
+                      dest='show_cache',default=False,
+                      help='show/display version cache')
+    
     (options,args)=parser.parse_args()
+    logger.enable_console()
+    # apply current verbosity to logger
+    logger.setLevelFromOptVerbose(options.verbose)
+    # figure if we need to be verbose for these local classes that only have a bool flag
+    bool_verbose=logger.getBoolVerboseFromOpt(options.verbose)
+
+    if options.show_cache: 
+        VersionCache().show()
+        sys.exit(0)
+    if options.clean_cache:
+        VersionCache().clean()
+        sys.exit(0)
     if not args:
         parser.print_help()
         sys.exit(1)
+        
     if not options.outfiles:
         options.outfiles=default_outfiles
-    logger.enable_console()
-    if options.debug:
-        options.verbose=True
-        logger.setLevel(DEBUG)
-    scanner=SfaScan(left_to_right=options.left_to_right, verbose=options.verbose)
+    scanner=SfaScan(left_to_right=options.left_to_right, verbose=bool_verbose)
     entries = [ Interface(entry) for entry in args ]
     g=scanner.graph(entries)
     logger.info("creating layout")
index 83a66f9..fc6a7b4 100755 (executable)
@@ -18,12 +18,12 @@ from sfa.util.sfalogging import sfi_logger
 from sfa.trust.certificate import Keypair, Certificate
 from sfa.trust.gid import GID
 from sfa.trust.credential import Credential
-from sfa.util.sfaticket import SfaTicket
+from sfa.trust.sfaticket import SfaTicket
 from sfa.util.record import SfaRecord, UserRecord, SliceRecord, NodeRecord, AuthorityRecord
 from sfa.rspecs.rspec import RSpec
 from sfa.rspecs.rspec_converter import RSpecConverter
 from sfa.util.xrn import get_leaf, get_authority, hrn_to_urn
-import sfa.util.xmlrpcprotocol as xmlrpcprotocol
+import sfa.client.xmlrpcprotocol as xmlrpcprotocol
 from sfa.util.config import Config
 from sfa.util.version import version_core
 from sfa.util.cache import Cache
@@ -232,9 +232,9 @@ class Sfi:
             parser.add_option("-d", "--delegate", dest="delegate", default=None, 
                              action="store_true",
                              help="Include a credential delegated to the user's root"+\
-                                  "authority in set of credentials for this call")  
-        
-        # registy filter option    
+                                  "authority in set of credentials for this call")
+
+        # registy filter option
         if command in ("list", "show", "remove"):
             parser.add_option("-t", "--type", dest="type", type="choice",
                             help="type filter ([all]|user|slice|authority|node|aggregate)",
@@ -398,9 +398,9 @@ class Sfi:
        self.cert_file = cert_file
        self.cert = GID(filename=cert_file)
        self.logger.info("Contacting Registry at: %s"%self.reg_url)
-       self.registry = xmlrpcprotocol.get_server(self.reg_url, key_file, cert_file, timeout=self.options.timeout, verbose=self.options.debug)  
+       self.registry = xmlrpcprotocol.server_proxy(self.reg_url, key_file, cert_file, timeout=self.options.timeout, verbose=self.options.debug)  
        self.logger.info("Contacting Slice Manager at: %s"%self.sm_url)
-       self.slicemgr = xmlrpcprotocol.get_server(self.sm_url, key_file, cert_file, timeout=self.options.timeout, verbose=self.options.debug)
+       self.slicemgr = xmlrpcprotocol.server_proxy(self.sm_url, key_file, cert_file, timeout=self.options.timeout, verbose=self.options.debug)
        return
 
     def get_cached_server_version(self, server):
@@ -493,7 +493,7 @@ class Sfi:
             self.logger.info("Getting Registry issued cert")
             self.read_config()
             # *hack.  need to set registyr before _get_gid() is called 
-            self.registry = xmlrpcprotocol.get_server(self.reg_url, key_file, cert_file, timeout=self.options.timeout, verbose=self.options.debug)
+            self.registry = xmlrpcprotocol.server_proxy(self.reg_url, key_file, cert_file, timeout=self.options.timeout, verbose=self.options.debug)
             gid = self._get_gid(type='user')
             self.registry = None 
             self.logger.info("Writing certificate to %s"%cert_file)
@@ -521,7 +521,7 @@ class Sfi:
         if args:
             hrn = args[0]
         gid = self._get_gid(hrn)
-        self.logger.debug("Sfi.get_gid-> %s",gid.save_to_string(save_parents=True))
+        self.logger.debug("Sfi.get_gid-> %s" % gid.save_to_string(save_parents=True))
         return gid
 
     def _get_gid(self, hrn=None, type=None):
@@ -646,7 +646,7 @@ class Sfi:
        return key_string
 
     # xxx opts undefined
-    def get_component_server_from_hrn(self, hrn):
+    def get_component_proxy_from_hrn(self, hrn):
         # direct connection to the nodes component manager interface
         user_cred = self.get_user_cred().save_to_string(save_parents=True)
         records = self.registry.Resolve(hrn, user_cred)
@@ -655,9 +655,9 @@ class Sfi:
             self.logger.warning("No such component:%r"% opts.component)
         record = records[0]
   
-        return self.get_server(record['hostname'], CM_PORT, self.key_file, self.cert_file)
+        return self.server_proxy(record['hostname'], CM_PORT, self.key_file, self.cert_file)
  
-    def get_server(self, host, port, keyfile, certfile):
+    def server_proxy(self, host, port, keyfile, certfile):
         """
         Return an instance of an xmlrpc server connection    
         """
@@ -666,10 +666,10 @@ class Sfi:
         host_parts = host.split('/')
         host_parts[0] = host_parts[0] + ":" + str(port)
         url =  "http://%s" %  "/".join(host_parts)    
-        return xmlrpcprotocol.get_server(url, keyfile, certfile, timeout=self.options.timeout, verbose=self.options.debug)
+        return xmlrpcprotocol.server_proxy(url, keyfile, certfile, timeout=self.options.timeout, verbose=self.options.debug)
 
     # xxx opts could be retrieved in self.options
-    def get_server_from_opts(self, opts):
+    def server_proxy_from_opts(self, opts):
         """
         Return instance of an xmlrpc connection to a slice manager, aggregate
         or component server depending on the specified opts
@@ -677,10 +677,10 @@ class Sfi:
         server = self.slicemgr
         # direct connection to an aggregate
         if hasattr(opts, 'aggregate') and opts.aggregate:
-            server = self.get_server(opts.aggregate, opts.port, self.key_file, self.cert_file)
+            server = self.server_proxy(opts.aggregate, opts.port, self.key_file, self.cert_file)
         # direct connection to the nodes component manager interface
         if hasattr(opts, 'component') and opts.component:
-            server = self.get_component_server_from_hrn(opts.component)    
+            server = self.get_component_proxy_from_hrn(opts.component)    
  
         return server
     #==========================================================================
@@ -911,7 +911,7 @@ class Sfi:
             if opts.version_registry:
                 server=self.registry
             else:
-                server = self.get_server_from_opts(opts)
+                server = self.server_proxy_from_opts(opts)
             version=server.GetVersion()
         for (k,v) in version.iteritems():
             print "%-20s: %s"%(k,v)
@@ -928,7 +928,7 @@ class Sfi:
         if opts.delegate:
             delegated_cred = self.delegate_cred(user_cred, get_authority(self.authority))
             creds.append(delegated_cred)  
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         #results = server.ListSlices(creds, unique_call_id())
         results = server.ListSlices(creds)
         display_list(results)
@@ -939,7 +939,7 @@ class Sfi:
         user_cred = self.get_user_cred().save_to_string(save_parents=True)
         server = self.slicemgr
         call_options = {}
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         
         if args:
             cred = self.get_slice_cred(args[0]).save_to_string(save_parents=True)
@@ -979,16 +979,21 @@ class Sfi:
 
     # created named slice with given rspec
     def create(self, opts, args):
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         server_version = self.get_cached_server_version(server)
         slice_hrn = args[0]
         slice_urn = hrn_to_urn(slice_hrn, 'slice')
         user_cred = self.get_user_cred()
         slice_cred = self.get_slice_cred(slice_hrn).save_to_string(save_parents=True)
-        # delegate the cred to the callers root authority
-        delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority)+'.slicemanager')
-        #delegated_cred = self.delegate_cred(slice_cred, get_authority(slice_hrn))
-        #creds.append(delegated_cred)
+
+        if hasattr(opts, 'aggregate') and opts.aggregate:
+            delegated_cred = None
+        else:
+            # delegate the cred to the callers root authority
+            delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority)+'.slicemanager')
+            #delegated_cred = self.delegate_cred(slice_cred, get_authority(slice_hrn))
+            #creds.append(delegated_cred)
+
         rspec_file = self.get_rspec_file(args[1])
         rspec = open(rspec_file).read()
 
@@ -1013,11 +1018,13 @@ class Sfi:
                 creds = [slice_cred]
             else:
                 users = sfa_users_arg(user_records, slice_record)
-                creds = [slice_cred, delegated_cred]
+                creds = [slice_cred]
+                if delegated_cred:
+                    creds.append(delegated_cred)
         call_args = [slice_urn, creds, rspec, users]
         if self.server_supports_call_id_arg(server):
             call_args.append(unique_call_id())
-           
+
         result = server.CreateSliver(*call_args)
         if opts.file is None:
             print result
@@ -1037,7 +1044,7 @@ class Sfi:
             creds.append(delegated_cred)
         rspec_file = self.get_rspec_file(rspec_path) 
         rspec = open(rspec_file).read()
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         ticket_string = server.GetTicket(slice_urn, creds, rspec, [])
         file = os.path.join(self.options.sfi_dir, get_leaf(slice_hrn) + ".ticket")
         self.logger.info("writing ticket to %s"%file)
@@ -1068,7 +1075,7 @@ class Sfi:
         for hostname in hostnames:
             try:
                 self.logger.info("Calling redeem_ticket at %(hostname)s " % locals())
-                server = self.get_server(hostname, CM_PORT, self.key_file, \
+                server = self.server_proxy(hostname, CM_PORT, self.key_file, \
                                          self.cert_file, self.options.debug)
                 server.RedeemTicket(ticket.save_to_string(save_parents=True), slice_cred)
                 self.logger.info("Success")
@@ -1087,7 +1094,7 @@ class Sfi:
         if opts.delegate:
             delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
             creds.append(delegated_cred)
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
 
         call_args = [slice_urn, creds]
         if self.server_supports_call_id_arg(server):
@@ -1103,7 +1110,7 @@ class Sfi:
         if opts.delegate:
             delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
             creds.append(delegated_cred)
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         return server.Start(slice_urn, creds)
     
     # stop named slice
@@ -1115,14 +1122,14 @@ class Sfi:
         if opts.delegate:
             delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
             creds.append(delegated_cred)
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         return server.Stop(slice_urn, creds)
     
     # reset named slice
     def reset(self, opts, args):
         slice_hrn = args[0]
         slice_urn = hrn_to_urn(slice_hrn, 'slice') 
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         slice_cred = self.get_slice_cred(args[0]).save_to_string(save_parents=True)
         creds = [slice_cred]
         if opts.delegate:
@@ -1133,7 +1140,7 @@ class Sfi:
     def renew(self, opts, args):
         slice_hrn = args[0]
         slice_urn = hrn_to_urn(slice_hrn, 'slice') 
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         slice_cred = self.get_slice_cred(args[0]).save_to_string(save_parents=True)
         creds = [slice_cred]
         if opts.delegate:
@@ -1155,7 +1162,7 @@ class Sfi:
         if opts.delegate:
             delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
             creds.append(delegated_cred)
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         call_args = [slice_urn, creds]
         if self.server_supports_call_id_arg(server):
             call_args.append(unique_call_id())
@@ -1173,7 +1180,7 @@ class Sfi:
         if opts.delegate:
             delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
             creds.append(delegated_cred)
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         return server.Shutdown(slice_urn, creds)         
     
     def print_help (self):
index 9c2eae5..f22e63e 100755 (executable)
@@ -1,7 +1,6 @@
 #! /usr/bin/env python
 
 import sys
-from sfa.util.rspecHelper import RSpec, Commands
 from sfa.client.sfi_commands import Commands
 from sfa.rspecs.rspec import RSpec
 
diff --git a/sfa/client/sfiAddLinks.py b/sfa/client/sfiAddLinks.py
new file mode 100755 (executable)
index 0000000..f5b2888
--- /dev/null
@@ -0,0 +1,45 @@
+#! /usr/bin/env python
+
+import sys
+from sfa.client.sfi_commands import Commands
+from sfa.rspecs.rspec import RSpec
+from sfa.rspecs.version_manager import VersionManager
+
+command = Commands(usage="%prog [options] node1 node2...",
+                   description="Add links to the RSpec. " +
+                   "This command reads in an RSpec and outputs a modified " +
+                   "RSpec. Use this to add links to your slivers")
+command.add_linkfile_option()
+command.prep()
+
+if not command.opts.linkfile:
+    print "Missing link list -- exiting"
+    command.parser.print_help()
+    sys.exit(1)
+    
+if command.opts.infile:
+    infile=file(command.opts.infile)
+else:
+    infile=sys.stdin
+if command.opts.outfile:
+    outfile=file(command.opts.outfile,"w")
+else:
+    outfile=sys.stdout
+ad_rspec = RSpec(infile)
+links = file(command.opts.linkfile).read().split('\n')
+link_tuples = map(lambda x: tuple(x.split()), links)
+
+version_manager = VersionManager()
+try:
+    type = ad_rspec.version.type
+    version_num = ad_rspec.version.version
+    request_version = version_manager._get_version(type, version_num, 'request')    
+    request_rspec = RSpec(version=request_version)
+    request_rspec.version.merge(ad_rspec)
+    request_rspec.version.add_link_requests(link_tuples)
+except:
+    print >> sys.stderr, "FAILED: %s" % links
+    raise
+    sys.exit(1)
+print >>outfile, request_rspec.toxml()
+sys.exit(0)
index ef4a008..c72dee3 100755 (executable)
@@ -25,19 +25,20 @@ if command.opts.outfile:
     outfile=file(command.opts.outfile,"w")
 else:
     outfile=sys.stdout
-request_rspec = RSpec(infile)
+ad_rspec = RSpec(infile)
 nodes = file(command.opts.nodefile).read().split()
 version_manager = VersionManager()
 try:
-    type = request_rspec.version.type
-    version_num = request_rspec.version.version
-    manifest_version = version_manager._get_version(type, version_num, 'manifest')    
-    manifest_rspec = RSpec(version=manifest_version)
+    type = ad_rspec.version.type
+    version_num = ad_rspec.version.version
+    request_version = version_manager._get_version(type, version_num, 'request')    
+    request_rspec = RSpec(version=request_version)
     slivers = [{'hostname': node} for node in nodes]
-    manifest_rspec.version.merge(request_rspec)
-    manifest_rspec.version.add_slivers(slivers)
+    request_rspec.version.merge(ad_rspec)
+    request_rspec.version.add_slivers(slivers)
 except:
     print >> sys.stderr, "FAILED: %s" % nodes
+    raise
     sys.exit(1)
-print >>outfile, manifest_rspec.toxml()
+print >>outfile, request_rspec.toxml()
 sys.exit(0)
diff --git a/sfa/client/sfiListLinks.py b/sfa/client/sfiListLinks.py
new file mode 100755 (executable)
index 0000000..a4720ca
--- /dev/null
@@ -0,0 +1,26 @@
+#! /usr/bin/env python
+
+import sys
+from sfa.client.sfi_commands import Commands
+from sfa.rspecs.rspec import RSpec
+from sfa.util.xrn import Xrn 
+
+command = Commands(usage="%prog [options]",
+                   description="List all links in the RSpec. " + 
+                   "Use this to display the list of available links. " ) 
+command.prep()
+
+if command.opts.infile:
+    rspec = RSpec(command.opts.infile)
+    links = rspec.version.get_links()
+    if command.opts.outfile:
+        sys.stdout = open(command.opts.outfile, 'w')
+    
+    for link in links:
+        ifname1 = Xrn(link['interface1']['component_id']).get_leaf()
+        ifname2 = Xrn(link['interface2']['component_id']).get_leaf()
+        print "%s %s" % (ifname1, ifname2)
+
+
+
+    
index bdcc16d..80897cd 100755 (executable)
@@ -12,6 +12,7 @@ class Commands:
         self.parser.add_option("-o", "", dest="outfile", metavar="FILE",
                                help="write output to FILE (default is stdout)")
         self.nodefile = False
+        self.linkfile = False
         self.attributes = {}
 
     def add_nodefile_option(self):
@@ -20,6 +21,12 @@ class Commands:
                                metavar="FILE",
                                help="read node list from FILE"),
 
+    def add_linkfile_option(self):
+        self.linkfile = True
+        self.parser.add_option("-l", "", dest="linkfile",
+                               metavar="FILE",
+                               help="read link list from FILE") 
+
     def add_show_attributes_option(self):
         self.parser.add_option("-s", "--show-attributes", action="store_true", 
                                dest="showatt", default=False, 
similarity index 96%
rename from sfa/util/xmlrpcprotocol.py
rename to sfa/client/xmlrpcprotocol.py
index 25e7b76..bd741a4 100644 (file)
@@ -1,9 +1,10 @@
 # XMLRPC-specific code for SFA Client
 
 import xmlrpclib
-#from sfa.util.httpsProtocol import HTTPS, HTTPSConnection
 from httplib import HTTPS, HTTPSConnection
+
 from sfa.util.sfalogging import logger
+
 ##
 # ServerException, ExceptionUnmarshaller
 #
@@ -86,7 +87,7 @@ class XMLRPCServerProxy(xmlrpclib.ServerProxy):
         logger.debug ("xml-rpc %s method:%s"%(self.url,attr))
         return xmlrpclib.ServerProxy.__getattr__(self, attr)
 
-def get_server(url, key_file, cert_file, timeout=None, verbose=False):
+def server_proxy(url, key_file, cert_file, timeout=None, verbose=False):
     transport = XMLRPCTransport(key_file, cert_file, timeout)
     return XMLRPCServerProxy(url, transport, allow_none=True, verbose=verbose)
 
diff --git a/sfa/generic/__init__.py b/sfa/generic/__init__.py
new file mode 100644 (file)
index 0000000..ea6ce05
--- /dev/null
@@ -0,0 +1,107 @@
+from sfa.util.sfalogging import logger
+from sfa.util.config import Config
+
+from sfa.managers.managerwrapper import ManagerWrapper
+
+# a bundle is the combination of 
+# (*) an api that reacts on the incoming requests to trigger the API methods
+# (*) a manager that implements the function of the service, 
+#     either aggregate, registry, or slicemgr
+# (*) a driver that controls the underlying testbed
+# 
+# 
+# The Generic class is a utility that uses the configuration to figure out 
+# which combination of these pieces need to be put together 
+# from config.
+# this extra indirection is needed to adapt to the current naming scheme
+# where we have 'pl' and 'plc' and components and the like, that does not 
+# yet follow a sensible scheme
+
+# needs refinements to cache more efficiently, esp. wrt the config
+
+class Generic:
+
+    def __init__ (self, flavour, config):
+        self.flavour=flavour
+        self.config=config
+
+    # proof of concept
+    # example flavour='pl' -> sfa.generic.pl.pl()
+    @staticmethod
+    def the_flavour (flavour=None, config=None):
+        if config is None: config=Config()
+        if flavour is None: flavour=config.SFA_GENERIC_FLAVOUR
+        flavour = flavour.lower()
+        #mixed = flavour.capitalize()
+        module_path="sfa.generic.%s"%flavour
+        classname="%s"%flavour
+        logger.info("Generic.the_flavour with flavour=%s"%flavour)
+        try:
+            module = __import__ (module_path, globals(), locals(), [classname])
+            return getattr(module, classname)(flavour,config)
+        except:
+            logger.log_exc("Cannot locate generic instance with flavour=%s"%flavour)
+
+    # in the simplest case these can be redefined to the class/module objects to be used
+    # see pl.py for an example
+    # some descendant of SfaApi
+    def api_class (self) : pass
+    # in practical terms these are modules for now
+    def registry_class (self) : pass
+    def slicemgr_class (self) : pass
+    def aggregate_class (self) : pass
+    def component_class (self) : pass
+
+
+    # build an API object
+    # insert a manager instance 
+    def make_api (self, *args, **kwargs):
+        # interface is a required arg
+        if not 'interface' in kwargs:
+            logger.critical("Generic.make_api: no interface found")
+        api = self.api_class()(*args, **kwargs)
+        manager = self.make_manager(api.interface)
+        driver = self.make_driver (api.config, api.interface)
+        ### arrange stuff together
+        # add a manager wrapper
+        manager = ManagerWrapper(manager,api.interface)
+        api.manager=manager
+        # insert driver in manager
+        manager.driver=driver
+        # add it in api as well for convenience
+        api.driver=driver
+        return api
+
+    def make_manager (self, interface):
+        """
+        interface expected in ['registry', 'aggregate', 'slice', 'component']
+        flavour is e.g. 'pl' or 'max' or whatever
+        """
+        flavour = self.flavour
+        message="Generic.make_manager for interface=%s and flavour=%s"%(interface,flavour)
+        
+        classname = "%s_manager_class"%interface
+        try:
+            module = getattr(self,classname)()
+            logger.info("%s : %s"%(message,module))
+            return module
+        except:
+            logger.log_exc_critical(message)
+        
+    # need interface to select the right driver
+    def make_driver (self, config, interface):
+        flavour = self.flavour
+        message="Generic.make_driver for flavour=%s and interface=%s"%(flavour,interface)
+        
+        if interface == "component":
+            classname = "component_driver_class"
+        else:
+            classname = "driver_class"
+        try:
+            class_obj = getattr(self,classname)()
+            logger.info("%s : %s"%(message,class_obj))
+            return class_obj(config)
+        except:
+            logger.log_exc_critical(message)
+        
+        
diff --git a/sfa/generic/architecture.txt b/sfa/generic/architecture.txt
new file mode 100644 (file)
index 0000000..ff63549
--- /dev/null
@@ -0,0 +1,40 @@
+We identify 3 layers in the server-side aspects:
+
+. api: this object reacts to an incoming SFA request
+
+. manager: this implements a given interface, either registry,
+aggregate, or slicemgr
+
+. driver: this object is in charge of actually talking to the
+underlying testbed
+
+-----
+
+the generic layer is in charge of instantiating these and to link them
+as follows:
+* the classes actually used for creating the 3 elements are
+configurable in a flavour (e.g. sfa.generic.pl.py)
+* which is then configured from sfa-config-tty as SFA_GENERIC_FLAVOUR
+
+* a call to make_api will then create the 3 elements with the
+  following layout:
+
+api.manager 
+manager.driver
+api.driver (for convenience)
+
+------
+example
+
+from sfa.generic import Generic
+generic=Generic.the_flavour()
+-> returns an instance of a Generic object with a flavour from the
+   config; by default it would thus be an instance of sfa.generic.pl
+
+api = generic.make_api (...) returns an instance of the given class
+with the arguments passed as arguments to the constructor
+
+------
+more in sfa/generic/__init__.py
+
+
diff --git a/sfa/generic/pl.py b/sfa/generic/pl.py
new file mode 100644 (file)
index 0000000..098a27a
--- /dev/null
@@ -0,0 +1,35 @@
+from sfa.generic import Generic
+
+import sfa.server.sfaapi
+import sfa.plc.pldriver
+import sfa.managers.registry_manager
+import sfa.managers.slice_manager
+import sfa.managers.aggregate_manager
+
+class pl (Generic):
+    
+    # use the standard api class
+    def api_class (self):
+        return sfa.server.sfaapi.SfaApi
+
+    # the manager classes for the server-side services
+    def registry_manager_class (self) : 
+        return sfa.managers.registry_manager
+    def slicemgr_manager_class (self) : 
+        return sfa.managers.slice_manager.SliceManager
+    def aggregate_manager_class (self) :
+        return sfa.managers.aggregate_manager.AggregateManager
+
+    # driver class for server-side services, talk to the whole testbed
+    def driver_class (self):
+        return sfa.plc.pldriver.PlDriver
+
+    # for the component mode, to be run on board planetlab nodes
+    # manager class
+    def component_manager_class (self):
+        return sfa.managers.component_manager_pl
+    # driver_class
+    def component_driver_class (self):
+        return sfa.plc.plcomponentdriver.PlComponentDriver
+
+
diff --git a/sfa/generic/slab.py b/sfa/generic/slab.py
new file mode 100644 (file)
index 0000000..83e5f6f
--- /dev/null
@@ -0,0 +1,35 @@
+from sfa.generic import Generic
+
+import sfa.server.sfaapi
+import sfa.senslab.slabdriver
+import sfa.managers.registry_manager_slab
+import sfa.managers.slice_manager_slab
+import sfa.managers.aggregate_manager_slab
+
+class slab (Generic):
+    
+    # use the standard api class
+    def api_class (self):
+        return sfa.server.sfaapi.SfaApi
+
+    # the manager classes for the server-side services
+    def registry_manager_class (self) : 
+        return sfa.managers.registry_manager_slab
+    def slicemgr_manager_class (self) : 
+        return sfa.managers.slice_manager_slab
+    def aggregate_manager_class (self) :
+        return sfa.managers.aggregate_manager_slab
+
+    # driver class for server-side services, talk to the whole testbed
+    def driver_class (self):
+        return sfa.senslab.slabdriver.SlabDriver
+
+    # slab does not have a component manager yet
+    # manager class
+    def component_manager_class (self):
+        return None
+    # driver_class
+    def component_driver_class (self):
+        return None
+
+
similarity index 96%
rename from sfa/plc/sfa-import-plc.py
rename to sfa/importer/sfa-import-plc.py
index 95793a1..6873f48 100755 (executable)
 # RSA keys at this time, not DSA keys.
 ##
 
+import os
 import getopt
 import sys
-import tempfile
 
-from sfa.util.record import *
 from sfa.util.table import SfaTable
 from sfa.util.xrn import get_leaf, get_authority
 from sfa.util.plxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_pl_slicename
 from sfa.util.config import Config
-from sfa.trust.certificate import convert_public_key, Keypair
-from sfa.trust.trustedroots import *
-from sfa.trust.hierarchy import *
 from sfa.util.xrn import Xrn
-from sfa.plc.api import *
-from sfa.trust.gid import create_uuid
-from sfa.plc.sfaImport import sfaImport, _cleanup_string
+
+from sfa.importer.sfaImport import sfaImport
 
 def process_options():
 
@@ -125,7 +120,8 @@ def main():
         sites_dict[site['login_base']] = site 
     
     # Get all plc users
-    persons = shell.GetPersons(plc_auth, {'peer_id': None, 'enabled': True}, ['person_id', 'email', 'key_ids', 'site_ids'])
+    persons = shell.GetPersons(plc_auth, {'peer_id': None, 'enabled': True}, 
+                               ['person_id', 'email', 'key_ids', 'site_ids'])
     persons_dict = {}
     for person in persons:
         persons_dict[person['person_id']] = person
similarity index 97%
rename from sfa/plc/sfa-nuke-plc.py
rename to sfa/importer/sfa-nuke-plc.py
index fb84020..be7b0c1 100755 (executable)
@@ -11,8 +11,6 @@ import sys
 import os
 from optparse import OptionParser
 
-from sfa.trust.hierarchy import *
-from sfa.util.record import *
 from sfa.util.table import SfaTable
 from sfa.util.sfalogging import logger
 
similarity index 98%
rename from sfa/plc/sfaImport.py
rename to sfa/importer/sfaImport.py
index 1effe71..4de6e1b 100644 (file)
@@ -8,20 +8,16 @@
 # RSA keys at this time, not DSA keys.
 ##
 
-import getopt
-import sys
-import tempfile
-
 from sfa.util.sfalogging import _SfaLogger
 
-from sfa.util.record import *
+from sfa.util.record import SfaRecord
 from sfa.util.table import SfaTable
 from sfa.util.xrn import get_authority, hrn_to_urn
 from sfa.util.plxrn import email_to_hrn
 from sfa.util.config import Config
 from sfa.trust.certificate import convert_public_key, Keypair
 from sfa.trust.trustedroots import TrustedRoots
-from sfa.trust.hierarchy import *
+from sfa.trust.hierarchy import Hierarchy
 from sfa.trust.gid import create_uuid
 
 
@@ -234,8 +230,6 @@ class sfaImport:
 
     
     def import_site(self, hrn, site):
-        shell = self.shell
-        plc_auth = self.plc_auth
         urn = hrn_to_urn(hrn, 'authority')
         self.logger.info("Import: site %s"%hrn)
 
index e2fdb10..08975dc 100755 (executable)
@@ -62,18 +62,18 @@ start() {
     reload
 
     # install peer certs
-    action $"SFA installing peer certs" daemon /usr/bin/sfa-server.py -t -d $OPTIONS 
+    action $"SFA installing peer certs" daemon /usr/bin/sfa-start.py -t -d $OPTIONS 
 
     if [ "$SFA_REGISTRY_ENABLED" -eq 1 ]; then
-        action $"SFA Registry" daemon /usr/bin/sfa-server.py -r -d $OPTIONS
+        action $"SFA Registry" daemon /usr/bin/sfa-start.py -r -d $OPTIONS
     fi
 
     if [ "$SFA_AGGREGATE_ENABLED" -eq 1 ]; then
-        action $"SFA Aggregate" daemon /usr/bin/sfa-server.py -a -d $OPTIONS
+        action $"SFA Aggregate" daemon /usr/bin/sfa-start.py -a -d $OPTIONS
     fi
         
     if [ "$SFA_SM_ENABLED" -eq 1 ]; then
-        action "SFA SliceMgr" daemon /usr/bin/sfa-server.py -s -d $OPTIONS
+        action "SFA SliceMgr" daemon /usr/bin/sfa-start.py -s -d $OPTIONS
     fi
 
     if [ "$SFA_FLASHPOLICY_ENABLED" -eq 1 ]; then
@@ -81,15 +81,15 @@ start() {
     fi
 
     RETVAL=$?
-    [ $RETVAL -eq 0 ] && touch /var/lock/subsys/sfa-server.py
+    [ $RETVAL -eq 0 ] && touch /var/lock/subsys/sfa-start.py
 
 }
 
 stop() {
-    action $"Shutting down SFA" killproc sfa-server.py
+    action $"Shutting down SFA" killproc sfa-start.py
     RETVAL=$?
 
-    [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/sfa-server.py
+    [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/sfa-start.py
 }
 
 
@@ -99,13 +99,13 @@ case "$1" in
     reload) reload force ;;
     restart) stop; start ;;
     condrestart)
-       if [ -f /var/lock/subsys/sfa-server.py ]; then
+       if [ -f /var/lock/subsys/sfa-start.py ]; then
             stop
             start
        fi
        ;;
     status)
-       status sfa-server.py
+       status sfa-start.py
        RETVAL=$?
        ;;
     *)
index e3bbd96..cdddf8b 100755 (executable)
@@ -6,9 +6,6 @@
 #
 # description:   Wraps PLCAPI into the SFA compliant API
 #
-# $Id: sfa 14304 2009-07-06 20:19:51Z thierry $
-# $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/init.d/sfa $
-#
 
 # Source config
 . /etc/sfa/sfa_config
@@ -30,7 +27,7 @@ start() {
             echo "Component Mgr"
             # make sure server key (nodes private key) exists first
             init_key
-            /usr/bin/sfa-server.py -c -d $OPTIONS
+            /usr/bin/sfa-start.py -c -d $OPTIONS
         fi
 
         RETVAL=$?
@@ -41,7 +38,7 @@ start() {
 
 stop() {
     echo -n $"Shutting down SFA: "
-    killproc sfa-server.py
+    killproc sfa-start.py
     RETVAL=$?
 
     echo
diff --git a/sfa/managers/aggregate_manager.py b/sfa/managers/aggregate_manager.py
new file mode 100644 (file)
index 0000000..14ec7d0
--- /dev/null
@@ -0,0 +1,390 @@
+import datetime
+import time
+import sys
+
+from sfa.util.faults import RecordNotFound, SliverDoesNotExist
+from sfa.util.xrn import get_authority, hrn_to_urn, urn_to_hrn, Xrn, urn_to_sliver_id
+from sfa.util.plxrn import slicename_to_hrn, hrn_to_pl_slicename
+from sfa.util.version import version_core
+from sfa.util.sfatime import utcparse
+from sfa.util.callids import Callids
+
+from sfa.trust.sfaticket import SfaTicket
+from sfa.trust.credential import Credential
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec
+
+from sfa.server.sfaapi import SfaApi
+
+import sfa.plc.peers as peers
+from sfa.plc.aggregate import Aggregate
+from sfa.plc.slices import Slices
+
+class AggregateManager:
+
+    def __init__ (self):
+        # xxx Thierry : caching at the aggregate level sounds wrong...
+        #self.caching=True
+        self.caching=False
+    
+    def GetVersion(self, api):
+    
+        version_manager = VersionManager()
+        ad_rspec_versions = []
+        request_rspec_versions = []
+        for rspec_version in version_manager.versions:
+            if rspec_version.content_type in ['*', 'ad']:
+                ad_rspec_versions.append(rspec_version.to_dict())
+            if rspec_version.content_type in ['*', 'request']:
+                request_rspec_versions.append(rspec_version.to_dict()) 
+        default_rspec_version = version_manager.get_version("sfa 1").to_dict()
+        xrn=Xrn(api.hrn)
+        version_more = {'interface':'aggregate',
+                        'testbed':'myplc',
+                        'hrn':xrn.get_hrn(),
+                        'request_rspec_versions': request_rspec_versions,
+                        'ad_rspec_versions': ad_rspec_versions,
+                        'default_ad_rspec': default_rspec_version
+                        }
+        return version_core(version_more)
+    
+    def _get_registry_objects(self, slice_xrn, creds, users):
+        """
+    
+        """
+        hrn, _ = urn_to_hrn(slice_xrn)
+    
+        hrn_auth = get_authority(hrn)
+    
+        # Build up objects that an SFA registry would return if SFA
+        # could contact the slice's registry directly
+        reg_objects = None
+    
+        if users:
+            # dont allow special characters in the site login base
+            #only_alphanumeric = re.compile('[^a-zA-Z0-9]+')
+            #login_base = only_alphanumeric.sub('', hrn_auth[:20]).lower()
+            slicename = hrn_to_pl_slicename(hrn)
+            login_base = slicename.split('_')[0]
+            reg_objects = {}
+            site = {}
+            site['site_id'] = 0
+            site['name'] = 'geni.%s' % login_base 
+            site['enabled'] = True
+            site['max_slices'] = 100
+    
+            # Note:
+            # Is it okay if this login base is the same as one already at this myplc site?
+            # Do we need uniqueness?  Should use hrn_auth instead of just the leaf perhaps?
+            site['login_base'] = login_base
+            site['abbreviated_name'] = login_base
+            site['max_slivers'] = 1000
+            reg_objects['site'] = site
+    
+            slice = {}
+            
+            # get_expiration always returns a normalized datetime - no need to utcparse
+            extime = Credential(string=creds[0]).get_expiration()
+            # If the expiration time is > 60 days from now, set the expiration time to 60 days from now
+            if extime > datetime.datetime.utcnow() + datetime.timedelta(days=60):
+                extime = datetime.datetime.utcnow() + datetime.timedelta(days=60)
+            slice['expires'] = int(time.mktime(extime.timetuple()))
+            slice['hrn'] = hrn
+            slice['name'] = hrn_to_pl_slicename(hrn)
+            slice['url'] = hrn
+            slice['description'] = hrn
+            slice['pointer'] = 0
+            reg_objects['slice_record'] = slice
+    
+            reg_objects['users'] = {}
+            for user in users:
+                user['key_ids'] = []
+                hrn, _ = urn_to_hrn(user['urn'])
+                user['email'] = hrn_to_pl_slicename(hrn) + "@geni.net"
+                user['first_name'] = hrn
+                user['last_name'] = hrn
+                reg_objects['users'][user['email']] = user
+    
+            return reg_objects
+    
+    def SliverStatus(self, api, slice_xrn, creds, call_id):
+        if Callids().already_handled(call_id): return {}
+    
+        (hrn, _) = urn_to_hrn(slice_xrn)
+        # find out where this slice is currently running
+        slicename = hrn_to_pl_slicename(hrn)
+        
+        slices = api.driver.GetSlices([slicename], ['slice_id', 'node_ids','person_ids','name','expires'])
+        if len(slices) == 0:        
+            raise Exception("Slice %s not found (used %s as slicename internally)" % (slice_xrn, slicename))
+        slice = slices[0]
+        
+        # report about the local nodes only
+        nodes = api.driver.GetNodes({'node_id':slice['node_ids'],'peer_id':None},
+                                     ['node_id', 'hostname', 'site_id', 'boot_state', 'last_contact'])
+        site_ids = [node['site_id'] for node in nodes]
+    
+        result = {}
+        top_level_status = 'unknown'
+        if nodes:
+            top_level_status = 'ready'
+        slice_urn = Xrn(slice_xrn, 'slice').get_urn()
+        result['geni_urn'] = slice_urn
+        result['pl_login'] = slice['name']
+        result['pl_expires'] = datetime.datetime.fromtimestamp(slice['expires']).ctime()
+        
+        resources = []
+        for node in nodes:
+            res = {}
+            res['pl_hostname'] = node['hostname']
+            res['pl_boot_state'] = node['boot_state']
+            res['pl_last_contact'] = node['last_contact']
+            if node['last_contact'] is not None:
+                res['pl_last_contact'] = datetime.datetime.fromtimestamp(node['last_contact']).ctime()
+            sliver_id = urn_to_sliver_id(slice_urn, slice['slice_id'], node['node_id']) 
+            res['geni_urn'] = sliver_id
+            if node['boot_state'] == 'boot':
+                res['geni_status'] = 'ready'
+            else:
+                res['geni_status'] = 'failed'
+                top_level_status = 'failed' 
+                
+            res['geni_error'] = ''
+    
+            resources.append(res)
+            
+        result['geni_status'] = top_level_status
+        result['geni_resources'] = resources
+        return result
+    
+    def CreateSliver(self, api, slice_xrn, creds, rspec_string, users, call_id):
+        """
+        Create the sliver[s] (slice) at this aggregate.    
+        Verify HRN and initialize the slice record in PLC if necessary.
+        """
+        if Callids().already_handled(call_id): return ""
+    
+        aggregate = Aggregate(api)
+        slices = Slices(api)
+        (hrn, _) = urn_to_hrn(slice_xrn)
+        peer = slices.get_peer(hrn)
+        sfa_peer = slices.get_sfa_peer(hrn)
+        slice_record=None    
+        if users:
+            slice_record = users[0].get('slice_record', {})
+    
+        # parse rspec
+        rspec = RSpec(rspec_string)
+        requested_attributes = rspec.version.get_slice_attributes()
+        
+        # ensure site record exists
+        site = slices.verify_site(hrn, slice_record, peer, sfa_peer)
+        # ensure slice record exists
+        slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)
+        # ensure person records exists
+        persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)
+        # ensure slice attributes exists
+        slices.verify_slice_attributes(slice, requested_attributes)
+        
+        # add/remove slice from nodes
+        requested_slivers = [node['component_name'] for node in rspec.version.get_nodes_with_slivers()]
+        slices.verify_slice_nodes(slice, requested_slivers, peer) 
+   
+        # add/remove links links 
+        slices.verify_slice_links(slice, rspec.version.get_link_requests(), aggregate)
+    
+        # handle MyPLC peer association.
+        # only used by plc and ple.
+        slices.handle_peer(site, slice, persons, peer)
+        
+        return aggregate.get_rspec(slice_xrn=slice_xrn, version=rspec.version)
+    
+    
+    def RenewSliver(self, api, xrn, creds, expiration_time, call_id):
+        if Callids().already_handled(call_id): return True
+        (hrn, _) = urn_to_hrn(xrn)
+        slicename = hrn_to_pl_slicename(hrn)
+        slices = api.driver.GetSlices({'name': slicename}, ['slice_id'])
+        if not slices:
+            raise RecordNotFound(hrn)
+        slice = slices[0]
+        requested_time = utcparse(expiration_time)
+        record = {'expires': int(time.mktime(requested_time.timetuple()))}
+        try:
+            api.driver.UpdateSlice(slice['slice_id'], record)
+            return True
+        except:
+            return False
+    
+    def start_slice(self, api, xrn, creds):
+        (hrn, _) = urn_to_hrn(xrn)
+        slicename = hrn_to_pl_slicename(hrn)
+        slices = api.driver.GetSlices({'name': slicename}, ['slice_id'])
+        if not slices:
+            raise RecordNotFound(hrn)
+        slice_id = slices[0]['slice_id']
+        slice_tags = api.driver.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'}, ['slice_tag_id'])
+        # just remove the tag if it exists
+        if slice_tags:
+            api.driver.DeleteSliceTag(slice_tags[0]['slice_tag_id'])
+    
+        return 1
+     
+    def stop_slice(self, api, xrn, creds):
+        hrn, _ = urn_to_hrn(xrn)
+        slicename = hrn_to_pl_slicename(hrn)
+        slices = api.driver.GetSlices({'name': slicename}, ['slice_id'])
+        if not slices:
+            raise RecordNotFound(hrn)
+        slice_id = slices[0]['slice_id']
+        slice_tags = api.driver.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'})
+        if not slice_tags:
+            api.driver.AddSliceTag(slice_id, 'enabled', '0')
+        elif slice_tags[0]['value'] != "0":
+            tag_id = slice_tags[0]['slice_tag_id']
+            api.driver.UpdateSliceTag(tag_id, '0')
+        return 1
+    
+    def reset_slice(self, api, xrn):
+        # XX not implemented at this interface
+        return 1
+    
+    def DeleteSliver(self, api, xrn, creds, call_id):
+        if Callids().already_handled(call_id): return ""
+        (hrn, _) = urn_to_hrn(xrn)
+        slicename = hrn_to_pl_slicename(hrn)
+        slices = api.driver.GetSlices({'name': slicename})
+        if not slices:
+            return 1
+        slice = slices[0]
+    
+        # determine if this is a peer slice
+        peer = peers.get_peer(api, hrn)
+        try:
+            if peer:
+                api.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
+            api.driver.DeleteSliceFromNodes(slicename, slice['node_ids'])
+        finally:
+            if peer:
+                api.driver.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
+        return 1
+    
+    def ListSlices(self, api, creds, call_id):
+        if Callids().already_handled(call_id): return []
+        # look in cache first
+        if self.caching and api.cache:
+            slices = api.cache.get('slices')
+            if slices:
+                return slices
+    
+        # get data from db 
+        slices = api.driver.GetSlices({'peer_id': None}, ['name'])
+        slice_hrns = [slicename_to_hrn(api.hrn, slice['name']) for slice in slices]
+        slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
+    
+        # cache the result
+        if self.caching and api.cache:
+            api.cache.add('slices', slice_urns) 
+    
+        return slice_urns
+        
+    def ListResources(self, api, creds, options, call_id):
+        if Callids().already_handled(call_id): return ""
+        # get slice's hrn from options
+        xrn = options.get('geni_slice_urn', None)
+        (hrn, _) = urn_to_hrn(xrn)
+    
+        version_manager = VersionManager()
+        # get the rspec's return format from options
+        rspec_version = version_manager.get_version(options.get('rspec_version'))
+        version_string = "rspec_%s" % (rspec_version.to_string())
+    
+        #panos adding the info option to the caching key (can be improved)
+        if options.get('info'):
+            version_string = version_string + "_"+options.get('info', 'default')
+    
+        # look in cache first
+        if self.caching and api.cache and not xrn:
+            rspec = api.cache.get(version_string)
+            if rspec:
+                api.logger.info("aggregate.ListResources: returning cached value for hrn %s"%hrn)
+                return rspec 
+    
+        #panos: passing user-defined options
+        #print "manager options = ",options
+        aggregate = Aggregate(api, options)
+        rspec =  aggregate.get_rspec(slice_xrn=xrn, version=rspec_version)
+    
+        # cache the result
+        if self.caching and api.cache and not xrn:
+            api.cache.add(version_string, rspec)
+    
+        return rspec
+    
+    
+    def get_ticket(self, api, xrn, creds, rspec, users):
+    
+        (slice_hrn, _) = urn_to_hrn(xrn)
+        slices = Slices(api)
+        peer = slices.get_peer(slice_hrn)
+        sfa_peer = slices.get_sfa_peer(slice_hrn)
+    
+        # get the slice record
+        credential = api.getCredential()
+        interface = api.registries[api.hrn]
+        registry = api.server_proxy(interface, credential)
+        records = registry.Resolve(xrn, credential)
+    
+        # make sure we get a local slice record
+        record = None
+        for tmp_record in records:
+            if tmp_record['type'] == 'slice' and \
+               not tmp_record['peer_authority']:
+    #Error (E0602, get_ticket): Undefined variable 'SliceRecord'
+                record = SliceRecord(dict=tmp_record)
+        if not record:
+            raise RecordNotFound(slice_hrn)
+        
+        # similar to CreateSliver, we must verify that the required records exist
+        # at this aggregate before we can issue a ticket
+        # parse rspec
+        rspec = RSpec(rspec_string)
+        requested_attributes = rspec.version.get_slice_attributes()
+    
+        # ensure site record exists
+        site = slices.verify_site(hrn, slice_record, peer, sfa_peer)
+        # ensure slice record exists
+        slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)
+        # ensure person records exists
+        persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)
+        # ensure slice attributes exists
+        slices.verify_slice_attributes(slice, requested_attributes)
+        
+        # get sliver info
+        slivers = slices.get_slivers(slice_hrn)
+    
+        if not slivers:
+            raise SliverDoesNotExist(slice_hrn)
+    
+        # get initscripts
+        initscripts = []
+        data = {
+            'timestamp': int(time.time()),
+            'initscripts': initscripts,
+            'slivers': slivers
+        }
+    
+        # create the ticket
+        object_gid = record.get_gid_object()
+        new_ticket = SfaTicket(subject = object_gid.get_subject())
+        new_ticket.set_gid_caller(api.auth.client_gid)
+        new_ticket.set_gid_object(object_gid)
+        new_ticket.set_issuer(key=api.key, subject=api.hrn)
+        new_ticket.set_pubkey(object_gid.get_pubkey())
+        new_ticket.set_attributes(data)
+        new_ticket.set_rspec(rspec)
+        #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
+        new_ticket.encode()
+        new_ticket.sign()
+    
+        return new_ticket.save_to_string(save_parents=True)
index ea8f2af..3f2d512 100644 (file)
@@ -4,6 +4,8 @@ import sys
 import os, errno
 import logging
 import datetime
+from multiprocessing import Process
+from time import sleep
 
 import boto
 from boto.ec2.regioninfo import RegionInfo
@@ -13,36 +15,21 @@ from xmlbuilder import XMLBuilder
 from lxml import etree as ET
 from sqlobject import *
 
-from sfa.util.faults import *
+from sfa.util.faults import InvalidRSpec 
 from sfa.util.xrn import urn_to_hrn, Xrn
-from sfa.util.rspec import RSpec
-from sfa.server.registry import Registries
-from sfa.trust.credential import Credential
-from sfa.plc.api import SfaAPI
-from sfa.plc.aggregate import Aggregate
-from sfa.plc.slices import *
 from sfa.util.plxrn import hrn_to_pl_slicename, slicename_to_hrn
 from sfa.util.callids import Callids
-from sfa.util.sfalogging import logger
-from sfa.rspecs.sfa_rspec import sfa_rspec_version
+#comes with its own logging
+#from sfa.util.sfalogging import logger
 from sfa.util.version import version_core
-
-from multiprocessing import Process
-from time import sleep
-
-##
-# The data structure used to represent a cloud.
-# It contains the cloud name, its ip address, image information,
-# key pairs, and clusters information.
-#
-cloud = {}
-
-##
-# The location of the RelaxNG schema.
-#
-EUCALYPTUS_RSPEC_SCHEMA='/etc/sfa/eucalyptus.rng'
-
-api = SfaAPI()
+from sfa.trust.credential import Credential
+from sfa.server.sfaapi import SfaApi
+from sfa.plc.aggregate import Aggregate
+from sfa.plc.slices import Slice, Slices
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec
+# not sure what this used to be nor where it is now defined
+#from sfa.rspecs.sfa_rspec import sfa_rspec_version
 
 ##
 # Meta data of an instance.
@@ -81,9 +68,6 @@ class EucaInstance(SQLObject):
                     (self.image_id, self.kernel_id, self.ramdisk_id,
                     self.inst_type, self.key_pair))
 
-        # XXX The return statement is for testing. REMOVE in production
-        #return
-
         try:
             reservation = botoConn.run_instances(self.image_id,
                                                  kernel_id = self.kernel_id,
@@ -110,143 +94,6 @@ class Slice(SQLObject):
     #slice_index = DatabaseIndex('slice_hrn')
     instances = MultipleJoin('EucaInstance')
 
-##
-# Initialize the aggregate manager by reading a configuration file.
-#
-def init_server():
-    logger = logging.getLogger('EucaAggregate')
-    fileHandler = logging.FileHandler('/var/log/euca.log')
-    fileHandler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
-    logger.addHandler(fileHandler)
-    fileHandler.setLevel(logging.DEBUG)
-    logger.setLevel(logging.DEBUG)
-
-    configParser = ConfigParser()
-    configParser.read(['/etc/sfa/eucalyptus_aggregate.conf', 'eucalyptus_aggregate.conf'])
-    if len(configParser.sections()) < 1:
-        logger.error('No cloud defined in the config file')
-        raise Exception('Cannot find cloud definition in configuration file.')
-
-    # Only read the first section.
-    cloudSec = configParser.sections()[0]
-    cloud['name'] = cloudSec
-    cloud['access_key'] = configParser.get(cloudSec, 'access_key')
-    cloud['secret_key'] = configParser.get(cloudSec, 'secret_key')
-    cloud['cloud_url']  = configParser.get(cloudSec, 'cloud_url')
-    cloudURL = cloud['cloud_url']
-    if cloudURL.find('https://') >= 0:
-        cloudURL = cloudURL.replace('https://', '')
-    elif cloudURL.find('http://') >= 0:
-        cloudURL = cloudURL.replace('http://', '')
-    (cloud['ip'], parts) = cloudURL.split(':')
-
-    # Create image bundles
-    images = getEucaConnection().get_all_images()
-    cloud['images'] = images
-    cloud['imageBundles'] = {}
-    for i in images:
-        if i.type != 'machine' or i.kernel_id is None: continue
-        name = os.path.dirname(i.location)
-        detail = {'imageID' : i.id, 'kernelID' : i.kernel_id, 'ramdiskID' : i.ramdisk_id}
-        cloud['imageBundles'][name] = detail
-
-    # Initialize sqlite3 database and tables.
-    dbPath = '/etc/sfa/db'
-    dbName = 'euca_aggregate.db'
-
-    if not os.path.isdir(dbPath):
-        logger.info('%s not found. Creating directory ...' % dbPath)
-        os.mkdir(dbPath)
-
-    conn = connectionForURI('sqlite://%s/%s' % (dbPath, dbName))
-    sqlhub.processConnection = conn
-    Slice.createTable(ifNotExists=True)
-    EucaInstance.createTable(ifNotExists=True)
-    Meta.createTable(ifNotExists=True)
-
-    # Start the update process to keep track of the meta data
-    # about Eucalyptus instance.
-    Process(target=updateMeta).start()
-
-    # Make sure the schema exists.
-    if not os.path.exists(EUCALYPTUS_RSPEC_SCHEMA):
-        err = 'Cannot location schema at %s' % EUCALYPTUS_RSPEC_SCHEMA
-        logger.error(err)
-        raise Exception(err)
-
-##
-# Creates a connection to Eucalytpus. This function is inspired by 
-# the make_connection() in Euca2ools.
-#
-# @return A connection object or None
-#
-def getEucaConnection():
-    global cloud
-    accessKey = cloud['access_key']
-    secretKey = cloud['secret_key']
-    eucaURL   = cloud['cloud_url']
-    useSSL    = False
-    srvPath   = '/'
-    eucaPort  = 8773
-    logger    = logging.getLogger('EucaAggregate')
-
-    if not accessKey or not secretKey or not eucaURL:
-        logger.error('Please set ALL of the required environment ' \
-                     'variables by sourcing the eucarc file.')
-        return None
-    
-    # Split the url into parts
-    if eucaURL.find('https://') >= 0:
-        useSSL  = True
-        eucaURL = eucaURL.replace('https://', '')
-    elif eucaURL.find('http://') >= 0:
-        useSSL  = False
-        eucaURL = eucaURL.replace('http://', '')
-    (eucaHost, parts) = eucaURL.split(':')
-    if len(parts) > 1:
-        parts = parts.split('/')
-        eucaPort = int(parts[0])
-        parts = parts[1:]
-        srvPath = '/'.join(parts)
-
-    return boto.connect_ec2(aws_access_key_id=accessKey,
-                            aws_secret_access_key=secretKey,
-                            is_secure=useSSL,
-                            region=RegionInfo(None, 'eucalyptus', eucaHost), 
-                            port=eucaPort,
-                            path=srvPath)
-
-##
-# Returns a string of keys that belong to the users of the given slice.
-# @param sliceHRN The hunman readable name of the slice.
-# @return sting()
-#
-# This method is no longer needed because the user keys are passed into
-# CreateSliver
-#
-def getKeysForSlice(api, sliceHRN):
-    logger   = logging.getLogger('EucaAggregate')
-    cred     = api.getCredential()
-    registry = api.registries[api.hrn]
-    keys     = []
-
-    # Get the slice record
-    records = registry.Resolve(sliceHRN, cred)
-    if not records:
-        logging.warn('Cannot find any record for slice %s' % sliceHRN)
-        return []
-
-    # Find who can log into this slice
-    persons = records[0]['persons']
-
-    # Extract the keys from persons records
-    for p in persons:
-        sliceUser = registry.Resolve(p, cred)
-        userKeys = sliceUser[0]['keys']
-        keys += userKeys
-
-    return '\n'.join(keys)
-
 ##
 # A class that builds the RSpec for Eucalyptus.
 #
@@ -423,322 +270,430 @@ class ZoneResultParser(object):
 
         return clusterList
 
-def ListResources(api, creds, options, call_id): 
-    if Callids().already_handled(call_id): return ""
-    global cloud
-    # get slice's hrn from options
-    xrn = options.get('geni_slice_urn', '')
-    hrn, type = urn_to_hrn(xrn)
-    logger = logging.getLogger('EucaAggregate')
-
-    # get hrn of the original caller
-    origin_hrn = options.get('origin_hrn', None)
-    if not origin_hrn:
-        origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
-
-    conn = getEucaConnection()
-
-    if not conn:
-        logger.error('Cannot create a connection to Eucalyptus')
-        return 'Cannot create a connection to Eucalyptus'
-
-    try:
-        # Zones
-        zones = conn.get_all_zones(['verbose'])
-        p = ZoneResultParser(zones)
-        clusters = p.parse()
-        cloud['clusters'] = clusters
-        
-        # Images
-        images = conn.get_all_images()
-        cloud['images'] = images
-        cloud['imageBundles'] = {}
+class AggregateManagerEucalyptus:
+
+    # The data structure used to represent a cloud.
+    # It contains the cloud name, its ip address, image information,
+    # key pairs, and clusters information.
+    cloud = {}
+    
+    # The location of the RelaxNG schema.
+    EUCALYPTUS_RSPEC_SCHEMA='/etc/sfa/eucalyptus.rng'
+    
+    _inited=False
+
+    # the init_server mechanism has vanished
+    def __init__ (self):
+        if AggregateManagerEucalyptus._inited: return
+        AggregateManagerEucalyptus.init_server()
+
+    # Initialize the aggregate manager by reading a configuration file.
+    @staticmethod
+    def init_server():
+        logger = logging.getLogger('EucaAggregate')
+        fileHandler = logging.FileHandler('/var/log/euca.log')
+        fileHandler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
+        logger.addHandler(fileHandler)
+        fileHandler.setLevel(logging.DEBUG)
+        logger.setLevel(logging.DEBUG)
+
+        configParser = ConfigParser()
+        configParser.read(['/etc/sfa/eucalyptus_aggregate.conf', 'eucalyptus_aggregate.conf'])
+        if len(configParser.sections()) < 1:
+            logger.error('No cloud defined in the config file')
+            raise Exception('Cannot find cloud definition in configuration file.')
+    
+        # Only read the first section.
+        cloudSec = configParser.sections()[0]
+        AggregateManagerEucalyptus.cloud['name'] = cloudSec
+        AggregateManagerEucalyptus.cloud['access_key'] = configParser.get(cloudSec, 'access_key')
+        AggregateManagerEucalyptus.cloud['secret_key'] = configParser.get(cloudSec, 'secret_key')
+        AggregateManagerEucalyptus.cloud['cloud_url']  = configParser.get(cloudSec, 'cloud_url')
+        cloudURL = AggregateManagerEucalyptus.cloud['cloud_url']
+        if cloudURL.find('https://') >= 0:
+            cloudURL = cloudURL.replace('https://', '')
+        elif cloudURL.find('http://') >= 0:
+            cloudURL = cloudURL.replace('http://', '')
+        (AggregateManagerEucalyptus.cloud['ip'], parts) = cloudURL.split(':')
+    
+        # Create image bundles
+        images = self.getEucaConnection().get_all_images()
+        AggregateManagerEucalyptus.cloud['images'] = images
+        AggregateManagerEucalyptus.cloud['imageBundles'] = {}
         for i in images:
             if i.type != 'machine' or i.kernel_id is None: continue
             name = os.path.dirname(i.location)
             detail = {'imageID' : i.id, 'kernelID' : i.kernel_id, 'ramdiskID' : i.ramdisk_id}
-            cloud['imageBundles'][name] = detail
-
-        # Key Pairs
-        keyPairs = conn.get_all_key_pairs()
-        cloud['keypairs'] = keyPairs
-
-        if hrn:
-            instanceId = []
-            instances  = []
-
-            # Get the instances that belong to the given slice from sqlite3
-            # XXX use getOne() in production because the slice's hrn is supposed
-            # to be unique. For testing, uniqueness is turned off in the db.
-            # If the slice isn't found in the database, create a record for the 
-            # slice.
-            matchedSlices = list(Slice.select(Slice.q.slice_hrn == hrn))
-            if matchedSlices:
-                theSlice = matchedSlices[-1]
-            else:
-                theSlice = Slice(slice_hrn = hrn)
-            for instance in theSlice.instances:
-                instanceId.append(instance.instance_id)
-
-            # Get the information about those instances using their ids.
-            if len(instanceId) > 0:
-                reservations = conn.get_all_instances(instanceId)
-            else:
-                reservations = []
+            AggregateManagerEucalyptus.cloud['imageBundles'][name] = detail
+    
+        # Initialize sqlite3 database and tables.
+        dbPath = '/etc/sfa/db'
+        dbName = 'euca_aggregate.db'
+    
+        if not os.path.isdir(dbPath):
+            logger.info('%s not found. Creating directory ...' % dbPath)
+            os.mkdir(dbPath)
+    
+        conn = connectionForURI('sqlite://%s/%s' % (dbPath, dbName))
+        sqlhub.processConnection = conn
+        Slice.createTable(ifNotExists=True)
+        EucaInstance.createTable(ifNotExists=True)
+        Meta.createTable(ifNotExists=True)
+    
+        # Start the update process to keep track of the meta data
+        # about Eucalyptus instance.
+        Process(target=AggregateManagerEucalyptus.updateMeta).start()
+    
+        # Make sure the schema exists.
+        if not os.path.exists(AggregateManagerEucalyptus.EUCALYPTUS_RSPEC_SCHEMA):
+            err = 'Cannot location schema at %s' % AggregateManagerEucalyptus.EUCALYPTUS_RSPEC_SCHEMA
+            logger.error(err)
+            raise Exception(err)
+    
+    #
+    # A separate process that will update the meta data.
+    #
+    @staticmethod    
+    def updateMeta():
+        logger = logging.getLogger('EucaMeta')
+        fileHandler = logging.FileHandler('/var/log/euca_meta.log')
+        fileHandler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
+        logger.addHandler(fileHandler)
+        fileHandler.setLevel(logging.DEBUG)
+        logger.setLevel(logging.DEBUG)
+    
+        while True:
+            sleep(30)
+    
+            # Get IDs of the instances that don't have IPs yet.
+            dbResults = Meta.select(
+                          AND(Meta.q.pri_addr == None,
+                              Meta.q.state    != 'deleted')
+                        )
+            dbResults = list(dbResults)
+            logger.debug('[update process] dbResults: %s' % dbResults)
+            instids = []
+            for r in dbResults:
+                if not r.instance:
+                    continue
+                instids.append(r.instance.instance_id)
+            logger.debug('[update process] Instance Id: %s' % ', '.join(instids))
+    
+            # Get instance information from Eucalyptus
+            conn = self.getEucaConnection()
+            vmInstances = []
+            reservations = conn.get_all_instances(instids)
             for reservation in reservations:
-                for instance in reservation.instances:
-                    instances.append(instance)
-
-            # Construct a dictionary for the EucaRSpecBuilder
-            instancesDict = {}
-            for instance in instances:
-                instList = instancesDict.setdefault(instance.instance_type, [])
-                instInfoDict = {} 
-
-                instInfoDict['id'] = instance.id
-                instInfoDict['public_dns'] = instance.public_dns_name
-                instInfoDict['state'] = instance.state
-                instInfoDict['key'] = instance.key_name
-
-                instList.append(instInfoDict)
-            cloud['instances'] = instancesDict
-
-    except EC2ResponseError, ec2RespErr:
-        errTree = ET.fromstring(ec2RespErr.body)
-        errMsgE = errTree.find('.//Message')
-        logger.error(errMsgE.text)
-
-    rspec = EucaRSpecBuilder(cloud).toXML()
-
-    # Remove the instances records so next time they won't 
-    # show up.
-    if 'instances' in cloud:
-        del cloud['instances']
-
-    return rspec
-
-"""
-Hook called via 'sfi.py create'
-"""
-def CreateSliver(api, slice_xrn, creds, xml, users, call_id):
-    if Callids().already_handled(call_id): return ""
-
-    global cloud
-    logger = logging.getLogger('EucaAggregate')
-    logger.debug("In CreateSliver")
-
-    aggregate = Aggregate(api)
-    slices = Slices(api)
-    (hrn, type) = urn_to_hrn(slice_xrn)
-    peer = slices.get_peer(hrn)
-    sfa_peer = slices.get_sfa_peer(hrn)
-    slice_record=None
-    if users:
-        slice_record = users[0].get('slice_record', {})
-
-    conn = getEucaConnection()
-    if not conn:
-        logger.error('Cannot create a connection to Eucalyptus')
-        return ""
-
-    # Validate RSpec
-    schemaXML = ET.parse(EUCALYPTUS_RSPEC_SCHEMA)
-    rspecValidator = ET.RelaxNG(schemaXML)
-    rspecXML = ET.XML(xml)
-    for network in rspecXML.iterfind("./network"):
-        if network.get('name') != cloud['name']:
-            # Throw away everything except my own RSpec
-            # sfa_logger().error("CreateSliver: deleting %s from rspec"%network.get('id'))
-            network.getparent().remove(network)
-    if not rspecValidator(rspecXML):
-        error = rspecValidator.error_log.last_error
-        message = '%s (line %s)' % (error.message, error.line) 
-        raise InvalidRSpec(message)
-
+                vmInstances += reservation.instances
+    
+            # Check the IPs
+            instIPs = [ {'id':i.id, 'pri_addr':i.private_dns_name, 'pub_addr':i.public_dns_name}
+                        for i in vmInstances if i.private_dns_name != '0.0.0.0' ]
+            logger.debug('[update process] IP dict: %s' % str(instIPs))
+    
+            # Update the local DB
+            for ipData in instIPs:
+                dbInst = EucaInstance.select(EucaInstance.q.instance_id == ipData['id']).getOne(None)
+                if not dbInst:
+                    logger.info('[update process] Could not find %s in DB' % ipData['id'])
+                    continue
+                dbInst.meta.pri_addr = ipData['pri_addr']
+                dbInst.meta.pub_addr = ipData['pub_addr']
+                dbInst.meta.state    = 'running'
+    
+            self.dumpinstanceInfo()
+    
+    ##
+    # Creates a connection to Eucalytpus. This function is inspired by 
+    # the make_connection() in Euca2ools.
+    #
+    # @return A connection object or None
+    #
+    def getEucaConnection():
+        accessKey = AggregateManagerEucalyptus.cloud['access_key']
+        secretKey = AggregateManagerEucalyptus.cloud['secret_key']
+        eucaURL   = AggregateManagerEucalyptus.cloud['cloud_url']
+        useSSL    = False
+        srvPath   = '/'
+        eucaPort  = 8773
+        logger    = logging.getLogger('EucaAggregate')
+    
+        if not accessKey or not secretKey or not eucaURL:
+            logger.error('Please set ALL of the required environment ' \
+                         'variables by sourcing the eucarc file.')
+            return None
+        
+        # Split the url into parts
+        if eucaURL.find('https://') >= 0:
+            useSSL  = True
+            eucaURL = eucaURL.replace('https://', '')
+        elif eucaURL.find('http://') >= 0:
+            useSSL  = False
+            eucaURL = eucaURL.replace('http://', '')
+        (eucaHost, parts) = eucaURL.split(':')
+        if len(parts) > 1:
+            parts = parts.split('/')
+            eucaPort = int(parts[0])
+            parts = parts[1:]
+            srvPath = '/'.join(parts)
+    
+        return boto.connect_ec2(aws_access_key_id=accessKey,
+                                aws_secret_access_key=secretKey,
+                                is_secure=useSSL,
+                                region=RegionInfo(None, 'eucalyptus', eucaHost), 
+                                port=eucaPort,
+                                path=srvPath)
+    
+    def ListResources(api, creds, options, call_id): 
+        if Callids().already_handled(call_id): return ""
+        # get slice's hrn from options
+        xrn = options.get('geni_slice_urn', '')
+        hrn, type = urn_to_hrn(xrn)
+        logger = logging.getLogger('EucaAggregate')
+    
+        # get hrn of the original caller
+        origin_hrn = options.get('origin_hrn', None)
+        if not origin_hrn:
+            origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
+    
+        conn = self.getEucaConnection()
+    
+        if not conn:
+            logger.error('Cannot create a connection to Eucalyptus')
+            return 'Cannot create a connection to Eucalyptus'
+    
+        try:
+            # Zones
+            zones = conn.get_all_zones(['verbose'])
+            p = ZoneResultParser(zones)
+            clusters = p.parse()
+            AggregateManagerEucalyptus.cloud['clusters'] = clusters
+            
+            # Images
+            images = conn.get_all_images()
+            AggregateManagerEucalyptus.cloud['images'] = images
+            AggregateManagerEucalyptus.cloud['imageBundles'] = {}
+            for i in images:
+                if i.type != 'machine' or i.kernel_id is None: continue
+                name = os.path.dirname(i.location)
+                detail = {'imageID' : i.id, 'kernelID' : i.kernel_id, 'ramdiskID' : i.ramdisk_id}
+                AggregateManagerEucalyptus.cloud['imageBundles'][name] = detail
+    
+            # Key Pairs
+            keyPairs = conn.get_all_key_pairs()
+            AggregateManagerEucalyptus.cloud['keypairs'] = keyPairs
+    
+            if hrn:
+                instanceId = []
+                instances  = []
+    
+                # Get the instances that belong to the given slice from sqlite3
+                # XXX use getOne() in production because the slice's hrn is supposed
+                # to be unique. For testing, uniqueness is turned off in the db.
+                # If the slice isn't found in the database, create a record for the 
+                # slice.
+                matchedSlices = list(Slice.select(Slice.q.slice_hrn == hrn))
+                if matchedSlices:
+                    theSlice = matchedSlices[-1]
+                else:
+                    theSlice = Slice(slice_hrn = hrn)
+                for instance in theSlice.instances:
+                    instanceId.append(instance.instance_id)
+    
+                # Get the information about those instances using their ids.
+                if len(instanceId) > 0:
+                    reservations = conn.get_all_instances(instanceId)
+                else:
+                    reservations = []
+                for reservation in reservations:
+                    for instance in reservation.instances:
+                        instances.append(instance)
+    
+                # Construct a dictionary for the EucaRSpecBuilder
+                instancesDict = {}
+                for instance in instances:
+                    instList = instancesDict.setdefault(instance.instance_type, [])
+                    instInfoDict = {} 
+    
+                    instInfoDict['id'] = instance.id
+                    instInfoDict['public_dns'] = instance.public_dns_name
+                    instInfoDict['state'] = instance.state
+                    instInfoDict['key'] = instance.key_name
+    
+                    instList.append(instInfoDict)
+                AggregateManagerEucalyptus.cloud['instances'] = instancesDict
+    
+        except EC2ResponseError, ec2RespErr:
+            errTree = ET.fromstring(ec2RespErr.body)
+            errMsgE = errTree.find('.//Message')
+            logger.error(errMsgE.text)
+    
+        rspec = EucaRSpecBuilder(AggregateManagerEucalyptus.cloud).toXML()
+    
+        # Remove the instances records so next time they won't 
+        # show up.
+        if 'instances' in AggregateManagerEucalyptus.cloud:
+            del AggregateManagerEucalyptus.cloud['instances']
+    
+        return rspec
+    
     """
-    Create the sliver[s] (slice) at this aggregate.
-    Verify HRN and initialize the slice record in PLC if necessary.
+    Hook called via 'sfi.py create'
     """
-
-    # ensure site record exists
-    site = slices.verify_site(hrn, slice_record, peer, sfa_peer)
-    # ensure slice record exists
-    slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)
-    # ensure person records exists
-    persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)
-
-    # Get the slice from db or create one.
-    s = Slice.select(Slice.q.slice_hrn == hrn).getOne(None)
-    if s is None:
-        s = Slice(slice_hrn = hrn)
-
-    # Process any changes in existing instance allocation
-    pendingRmInst = []
-    for sliceInst in s.instances:
-        pendingRmInst.append(sliceInst.instance_id)
-    existingInstGroup = rspecXML.findall(".//euca_instances")
-    for instGroup in existingInstGroup:
-        for existingInst in instGroup:
-            if existingInst.get('id') in pendingRmInst:
-                pendingRmInst.remove(existingInst.get('id'))
-    for inst in pendingRmInst:
-        dbInst = EucaInstance.select(EucaInstance.q.instance_id == inst).getOne(None)
-        if dbInst.meta.state != 'deleted':
-            logger.debug('Instance %s will be terminated' % inst)
-            # Terminate instances one at a time for robustness
-            conn.terminate_instances([inst])
-            # Only change the state but do not remove the entry from the DB.
-            dbInst.meta.state = 'deleted'
-            #dbInst.destroySelf()
-
-    # Process new instance requests
-    requests = rspecXML.findall(".//request")
-    if requests:
-        # Get all the public keys associate with slice.
-        keys = []
-        for user in users:
-            keys += user['keys']
-            logger.debug("Keys: %s" % user['keys'])
-        pubKeys = '\n'.join(keys)
-        logger.debug('Passing the following keys to the instance:\n%s' % pubKeys)
-    for req in requests:
-        vmTypeElement = req.getparent()
-        instType = vmTypeElement.get('name')
-        numInst  = int(req.find('instances').text)
-        
-        bundleName = req.find('bundle').text
-        if not cloud['imageBundles'][bundleName]:
-            logger.error('Cannot find bundle %s' % bundleName)
-        bundleInfo = cloud['imageBundles'][bundleName]
-        instKernel  = bundleInfo['kernelID']
-        instDiskImg = bundleInfo['imageID']
-        instRamDisk = bundleInfo['ramdiskID']
-        instKey     = None
-
-        # Create the instances
-        for i in range(0, numInst):
-            eucaInst = EucaInstance(slice      = s,
-                                    kernel_id  = instKernel,
-                                    image_id   = instDiskImg,
-                                    ramdisk_id = instRamDisk,
-                                    key_pair   = instKey,
-                                    inst_type  = instType,
-                                    meta       = Meta(start_time=datetime.datetime.now()))
-            eucaInst.reserveInstance(conn, pubKeys)
-
-    # xxx - should return altered rspec 
-    # with enough data for the client to understand what's happened
-    return xml
-
-##
-# Return information on the IP addresses bound to each slice's instances
-#
-def dumpInstanceInfo():
-    logger = logging.getLogger('EucaMeta')
-    outdir = "/var/www/html/euca/"
-    outfile = outdir + "instances.txt"
-
-    try:
-        os.makedirs(outdir)
-    except OSError, e:
-        if e.errno != errno.EEXIST:
-            raise
-
-    dbResults = Meta.select(
-        AND(Meta.q.pri_addr != None,
-            Meta.q.state    == 'running')
-        )
-    dbResults = list(dbResults)
-    f = open(outfile, "w")
-    for r in dbResults:
-        instId = r.instance.instance_id
-        ipaddr = r.pri_addr
-        hrn = r.instance.slice.slice_hrn
-        logger.debug('[dumpInstanceInfo] %s %s %s' % (instId, ipaddr, hrn))
-        f.write("%s %s %s\n" % (instId, ipaddr, hrn))
-    f.close()
-
-##
-# A separate process that will update the meta data.
-#
-def updateMeta():
-    logger = logging.getLogger('EucaMeta')
-    fileHandler = logging.FileHandler('/var/log/euca_meta.log')
-    fileHandler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
-    logger.addHandler(fileHandler)
-    fileHandler.setLevel(logging.DEBUG)
-    logger.setLevel(logging.DEBUG)
-
-    while True:
-        sleep(30)
-
-        # Get IDs of the instances that don't have IPs yet.
+    def CreateSliver(api, slice_xrn, creds, xml, users, call_id):
+        if Callids().already_handled(call_id): return ""
+    
+        logger = logging.getLogger('EucaAggregate')
+        logger.debug("In CreateSliver")
+    
+        aggregate = Aggregate(api)
+        slices = Slices(api)
+        (hrn, type) = urn_to_hrn(slice_xrn)
+        peer = slices.get_peer(hrn)
+        sfa_peer = slices.get_sfa_peer(hrn)
+        slice_record=None
+        if users:
+            slice_record = users[0].get('slice_record', {})
+    
+        conn = self.getEucaConnection()
+        if not conn:
+            logger.error('Cannot create a connection to Eucalyptus')
+            return ""
+    
+        # Validate RSpec
+        schemaXML = ET.parse(AggregateManagerEucalyptus.EUCALYPTUS_RSPEC_SCHEMA)
+        rspecValidator = ET.RelaxNG(schemaXML)
+        rspecXML = ET.XML(xml)
+        for network in rspecXML.iterfind("./network"):
+            if network.get('name') != AggregateManagerEucalyptus.cloud['name']:
+                # Throw away everything except my own RSpec
+                # sfa_logger().error("CreateSliver: deleting %s from rspec"%network.get('id'))
+                network.getparent().remove(network)
+        if not rspecValidator(rspecXML):
+            error = rspecValidator.error_log.last_error
+            message = '%s (line %s)' % (error.message, error.line) 
+            raise InvalidRSpec(message)
+    
+        """
+        Create the sliver[s] (slice) at this aggregate.
+        Verify HRN and initialize the slice record in PLC if necessary.
+        """
+    
+        # ensure site record exists
+        site = slices.verify_site(hrn, slice_record, peer, sfa_peer)
+        # ensure slice record exists
+        slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)
+        # ensure person records exists
+        persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)
+    
+        # Get the slice from db or create one.
+        s = Slice.select(Slice.q.slice_hrn == hrn).getOne(None)
+        if s is None:
+            s = Slice(slice_hrn = hrn)
+    
+        # Process any changes in existing instance allocation
+        pendingRmInst = []
+        for sliceInst in s.instances:
+            pendingRmInst.append(sliceInst.instance_id)
+        existingInstGroup = rspecXML.findall(".//euca_instances")
+        for instGroup in existingInstGroup:
+            for existingInst in instGroup:
+                if existingInst.get('id') in pendingRmInst:
+                    pendingRmInst.remove(existingInst.get('id'))
+        for inst in pendingRmInst:
+            dbInst = EucaInstance.select(EucaInstance.q.instance_id == inst).getOne(None)
+            if dbInst.meta.state != 'deleted':
+                logger.debug('Instance %s will be terminated' % inst)
+                # Terminate instances one at a time for robustness
+                conn.terminate_instances([inst])
+                # Only change the state but do not remove the entry from the DB.
+                dbInst.meta.state = 'deleted'
+                #dbInst.destroySelf()
+    
+        # Process new instance requests
+        requests = rspecXML.findall(".//request")
+        if requests:
+            # Get all the public keys associate with slice.
+            keys = []
+            for user in users:
+                keys += user['keys']
+                logger.debug("Keys: %s" % user['keys'])
+            pubKeys = '\n'.join(keys)
+            logger.debug('Passing the following keys to the instance:\n%s' % pubKeys)
+        for req in requests:
+            vmTypeElement = req.getparent()
+            instType = vmTypeElement.get('name')
+            numInst  = int(req.find('instances').text)
+            
+            bundleName = req.find('bundle').text
+            if not AggregateManagerEucalyptus.cloud['imageBundles'][bundleName]:
+                logger.error('Cannot find bundle %s' % bundleName)
+            bundleInfo = AggregateManagerEucalyptus.cloud['imageBundles'][bundleName]
+            instKernel  = bundleInfo['kernelID']
+            instDiskImg = bundleInfo['imageID']
+            instRamDisk = bundleInfo['ramdiskID']
+            instKey     = None
+    
+            # Create the instances
+            for i in range(0, numInst):
+                eucaInst = EucaInstance(slice      = s,
+                                        kernel_id  = instKernel,
+                                        image_id   = instDiskImg,
+                                        ramdisk_id = instRamDisk,
+                                        key_pair   = instKey,
+                                        inst_type  = instType,
+                                        meta       = Meta(start_time=datetime.datetime.now()))
+                eucaInst.reserveInstance(conn, pubKeys)
+    
+        # xxx - should return altered rspec 
+        # with enough data for the client to understand what's happened
+        return xml
+    
+    ##
+    # Return information on the IP addresses bound to each slice's instances
+    #
+    def dumpInstanceInfo():
+        logger = logging.getLogger('EucaMeta')
+        outdir = "/var/www/html/euca/"
+        outfile = outdir + "instances.txt"
+    
+        try:
+            os.makedirs(outdir)
+        except OSError, e:
+            if e.errno != errno.EEXIST:
+                raise
+    
         dbResults = Meta.select(
-                      AND(Meta.q.pri_addr == None,
-                          Meta.q.state    != 'deleted')
-                    )
+            AND(Meta.q.pri_addr != None,
+                Meta.q.state    == 'running')
+            )
         dbResults = list(dbResults)
-        logger.debug('[update process] dbResults: %s' % dbResults)
-        instids = []
+        f = open(outfile, "w")
         for r in dbResults:
-            if not r.instance:
-                continue
-            instids.append(r.instance.instance_id)
-        logger.debug('[update process] Instance Id: %s' % ', '.join(instids))
-
-        # Get instance information from Eucalyptus
-        conn = getEucaConnection()
-        vmInstances = []
-        reservations = conn.get_all_instances(instids)
-        for reservation in reservations:
-            vmInstances += reservation.instances
-
-        # Check the IPs
-        instIPs = [ {'id':i.id, 'pri_addr':i.private_dns_name, 'pub_addr':i.public_dns_name}
-                    for i in vmInstances if i.private_dns_name != '0.0.0.0' ]
-        logger.debug('[update process] IP dict: %s' % str(instIPs))
-
-        # Update the local DB
-        for ipData in instIPs:
-            dbInst = EucaInstance.select(EucaInstance.q.instance_id == ipData['id']).getOne(None)
-            if not dbInst:
-                logger.info('[update process] Could not find %s in DB' % ipData['id'])
-                continue
-            dbInst.meta.pri_addr = ipData['pri_addr']
-            dbInst.meta.pub_addr = ipData['pub_addr']
-            dbInst.meta.state    = 'running'
-
-        dumpInstanceInfo()
-
-def GetVersion(api):
-    xrn=Xrn(api.hrn)
-    request_rspec_versions = [dict(sfa_rspec_version)]
-    ad_rspec_versions = [dict(sfa_rspec_version)]
-    version_more = {'interface':'aggregate',
-                    'testbed':'myplc',
-                    'hrn':xrn.get_hrn(),
-                    'request_rspec_versions': request_rspec_versions,
-                    'ad_rspec_versions': ad_rspec_versions,
-                    'default_ad_rspec': dict(sfa_rspec_version)
-                    }
-    return version_core(version_more)
-
-def main():
-    init_server()
-
-    #theRSpec = None
-    #with open(sys.argv[1]) as xml:
-    #    theRSpec = xml.read()
-    #CreateSliver(None, 'planetcloud.pc.test', theRSpec, 'call-id-cloudtest')
-
-    #rspec = ListResources('euca', 'planetcloud.pc.test', 'planetcloud.pc.marcoy', 'test_euca')
-    #print rspec
-
-    server_key_file = '/var/lib/sfa/authorities/server.key'
-    server_cert_file = '/var/lib/sfa/authorities/server.cert'
-    api = SfaAPI(key_file = server_key_file, cert_file = server_cert_file, interface='aggregate')
-    print getKeysForSlice(api, 'gc.gc.test1')
-
-if __name__ == "__main__":
-    main()
-
+            instId = r.instance.instance_id
+            ipaddr = r.pri_addr
+            hrn = r.instance.slice.slice_hrn
+            logger.debug('[dumpInstanceInfo] %s %s %s' % (instId, ipaddr, hrn))
+            f.write("%s %s %s\n" % (instId, ipaddr, hrn))
+        f.close()
+    
+    def GetVersion(api):
+
+        version_manager = VersionManager()
+        ad_rspec_versions = []
+        request_rspec_versions = []
+        for rspec_version in version_manager.versions:
+            if rspec_version.content_type in ['*', 'ad']:
+                ad_rspec_versions.append(rspec_version.to_dict())
+            if rspec_version.content_type in ['*', 'request']:
+                request_rspec_versions.append(rspec_version.to_dict())
+        default_rspec_version = version_manager.get_version("sfa 1").to_dict()
+        xrn=Xrn(api.hrn)
+        version_more = {'interface':'aggregate',
+                        'testbed':'myplc',
+                        'hrn':xrn.get_hrn(),
+                        'request_rspec_versions': request_rspec_versions,
+                        'ad_rspec_versions': ad_rspec_versions,
+                        'default_ad_rspec': default_rspec_version
+                        }
+        return version_core(version_more)
index fd0e127..09e56b7 100644 (file)
-from sfa.plc.slices import Slices\r
-from sfa.server.registry import Registries\r
-from sfa.util.xrn import urn_to_hrn, hrn_to_urn, get_authority, Xrn\r
-from sfa.util.plxrn import hrn_to_pl_slicename\r
-from sfa.util.rspec import RSpec\r
-from sfa.util.sfalogging import logger\r
-from sfa.util.faults import *\r
-from sfa.util.config import Config\r
-from sfa.util.sfatime import utcparse\r
-from sfa.util.callids import Callids\r
-from sfa.util.version import version_core\r
-from sfa.rspecs.rspec_version import RSpecVersion\r
-from sfa.rspecs.sfa_rspec import sfa_rspec_version\r
-from sfa.rspecs.rspec_parser import parse_rspec\r
-from sfa.managers.aggregate_manager_pl import __get_registry_objects, ListSlices\r
-import os\r
-import time\r
-import re\r
-\r
-RSPEC_TMP_FILE_PREFIX = "/tmp/max_rspec"\r
-\r
-# execute shell command and return both exit code and text output\r
-def shell_execute(cmd, timeout):\r
-    pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')\r
-    pipe = os.popen(cmd + ' 2>&1', 'r')\r
-    text = ''\r
-    while timeout:\r
-        line = pipe.read()\r
-        text += line\r
-        time.sleep(1)\r
-        timeout = timeout-1\r
-    code = pipe.close()\r
-    if code is None: code = 0\r
-    if text[-1:] == '\n': text = text[:-1]\r
-    return code, text\r
-\r
-"""\r
- call AM API client with command like in the following example:\r
- cd aggregate_client; java -classpath AggregateWS-client-api.jar:lib/* \\r
-      net.geni.aggregate.client.examples.CreateSliceNetworkClient \\r
-      ./repo https://geni:8443/axis2/services/AggregateGENI \\r
-      ... params ...\r
-"""\r
-\r
-def call_am_apiclient(client_app, params, timeout):\r
-    (client_path, am_url) = Config().get_max_aggrMgr_info()\r
-    sys_cmd = "cd " + client_path + "; java -classpath AggregateWS-client-api.jar:lib/* net.geni.aggregate.client.examples." + client_app + " ./repo " + am_url + " " + ' '.join(params)\r
-    ret = shell_execute(sys_cmd, timeout)\r
-    logger.debug("shell_execute cmd: %s returns %s" % (sys_cmd, ret))\r
-    return ret\r
-\r
-# save request RSpec xml content to a tmp file\r
-def save_rspec_to_file(rspec):\r
-    path = RSPEC_TMP_FILE_PREFIX + "_" + time.strftime('%Y%m%dT%H:%M:%S', time.gmtime(time.time())) +".xml"\r
-    file = open(path, "w")\r
-    file.write(rspec)\r
-    file.close()\r
-    return path\r
-\r
-# get stripped down slice id/name plc.maxpl.xislice1 --> maxpl_xislice1\r
-def get_plc_slice_id(cred, xrn):\r
-    (hrn, type) = urn_to_hrn(xrn)\r
-    slice_id = hrn.find(':')\r
-    sep = '.'\r
-    if hrn.find(':') != -1:\r
-        sep=':'\r
-    elif hrn.find('+') != -1:\r
-        sep='+'\r
-    else:\r
-        sep='.'\r
-    slice_id = hrn.split(sep)[-2] + '_' + hrn.split(sep)[-1]\r
-    return slice_id\r
-\r
-# extract xml \r
-def get_xml_by_tag(text, tag):\r
-    indx1 = text.find('<'+tag)\r
-    indx2 = text.find('/'+tag+'>')\r
-    xml = None\r
-    if indx1!=-1 and indx2>indx1:\r
-        xml = text[indx1:indx2+len(tag)+2]\r
-    return xml\r
-\r
-def prepare_slice(api, slice_xrn, creds, users):\r
-    reg_objects = __get_registry_objects(slice_xrn, creds, users)\r
-    (hrn, type) = urn_to_hrn(slice_xrn)\r
-    slices = Slices(api)\r
-    peer = slices.get_peer(hrn)\r
-    sfa_peer = slices.get_sfa_peer(hrn)\r
-    slice_record=None\r
-    if users:\r
-        slice_record = users[0].get('slice_record', {})\r
-    registry = api.registries[api.hrn]\r
-    credential = api.getCredential()\r
-    # ensure site record exists\r
-    site = slices.verify_site(hrn, slice_record, peer, sfa_peer)\r
-    # ensure slice record exists\r
-    slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)\r
-    # ensure person records exists\r
-    persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)\r
-\r
-def parse_resources(text, slice_xrn):\r
-    resources = []\r
-    urn = hrn_to_urn(slice_xrn, 'sliver')\r
-    plc_slice = re.search("Slice Status => ([^\n]+)", text)\r
-    if plc_slice.group(1) != 'NONE':\r
-        res = {}\r
-        res['geni_urn'] = urn + '_plc_slice'\r
-        res['geni_error'] = ''\r
-        res['geni_status'] = 'unknown'\r
-        if plc_slice.group(1) == 'CREATED':\r
-            res['geni_status'] = 'ready'\r
-        resources.append(res)\r
-    vlans = re.findall("GRI => ([^\n]+)\n\t  Status => ([^\n]+)", text)\r
-    for vlan in vlans:\r
-        res = {}\r
-        res['geni_error'] = ''\r
-        res['geni_urn'] = urn + '_vlan_' + vlan[0]\r
-        if vlan[1] == 'ACTIVE':\r
-            res['geni_status'] = 'ready'\r
-        elif vlan[1] == 'FAILED':\r
-            res['geni_status'] = 'failed'\r
-        else:\r
-            res['geni_status'] = 'configuring'\r
-        resources.append(res)\r
-    return resources\r
-\r
-def slice_status(api, slice_xrn, creds):\r
-    urn = hrn_to_urn(slice_xrn, 'slice')\r
-    result = {}\r
-    top_level_status = 'unknown'\r
-    slice_id = get_plc_slice_id(creds, urn)\r
-    (ret, output) = call_am_apiclient("QuerySliceNetworkClient", [slice_id,], 5)\r
-    # parse output into rspec XML\r
-    if output.find("Unkown Rspec:") > 0:\r
-        top_level_staus = 'failed'\r
-        result['geni_resources'] = ''\r
-    else:\r
-        has_failure = 0\r
-        all_active = 0\r
-        if output.find("Status => FAILED") > 0:\r
-            top_level_staus = 'failed'\r
-        elif (    output.find("Status => ACCEPTED") > 0 or output.find("Status => PENDING") > 0\r
-               or output.find("Status => INSETUP") > 0 or output.find("Status => INCREATE") > 0\r
-             ):\r
-            top_level_status = 'configuring'\r
-        else:\r
-            top_level_status = 'ready'\r
-        result['geni_resources'] = parse_resources(output, slice_xrn)\r
-    result['geni_urn'] = urn\r
-    result['geni_status'] = top_level_status\r
-    return result\r
-\r
-def create_slice(api, xrn, cred, rspec, users):\r
-    indx1 = rspec.find("<RSpec")\r
-    indx2 = rspec.find("</RSpec>")\r
-    if indx1 > -1 and indx2 > indx1:\r
-        rspec = rspec[indx1+len("<RSpec type=\"SFA\">"):indx2-1]\r
-    rspec_path = save_rspec_to_file(rspec)\r
-    prepare_slice(api, xrn, cred, users)\r
-    slice_id = get_plc_slice_id(cred, xrn)\r
-    sys_cmd = "sed -i \"s/rspec id=\\\"[^\\\"]*/rspec id=\\\"" +slice_id+ "/g\" " + rspec_path + ";sed -i \"s/:rspec=[^:'<\\\" ]*/:rspec=" +slice_id+ "/g\" " + rspec_path\r
-    ret = shell_execute(sys_cmd, 1)\r
-    sys_cmd = "sed -i \"s/rspec id=\\\"[^\\\"]*/rspec id=\\\"" + rspec_path + "/g\""\r
-    ret = shell_execute(sys_cmd, 1)\r
-    (ret, output) = call_am_apiclient("CreateSliceNetworkClient", [rspec_path,], 3)\r
-    # parse output ?\r
-    rspec = "<RSpec type=\"SFA\"> Done! </RSpec>"\r
-    return True\r
-\r
-def delete_slice(api, xrn, cred):\r
-    slice_id = get_plc_slice_id(cred, xrn)\r
-    (ret, output) = call_am_apiclient("DeleteSliceNetworkClient", [slice_id,], 3)\r
-    # parse output ?\r
-    return 1\r
-\r
-\r
-def get_rspec(api, cred, slice_urn):\r
-    logger.debug("#### called max-get_rspec")\r
-    #geni_slice_urn: urn:publicid:IDN+plc:maxpl+slice+xi_rspec_test1\r
-    if slice_urn == None:\r
-        (ret, output) = call_am_apiclient("GetResourceTopology", ['all', '\"\"'], 5)\r
-    else:\r
-        slice_id = get_plc_slice_id(cred, slice_urn)\r
-        (ret, output) = call_am_apiclient("GetResourceTopology", ['all', slice_id,], 5)\r
-    # parse output into rspec XML\r
-    if output.find("No resouce found") > 0:\r
-        rspec = "<RSpec type=\"SFA\"> <Fault>No resource found</Fault> </RSpec>"\r
-    else:\r
-        comp_rspec = get_xml_by_tag(output, 'computeResource')\r
-        logger.debug("#### computeResource %s" % comp_rspec)\r
-        topo_rspec = get_xml_by_tag(output, 'topology')\r
-        logger.debug("#### topology %s" % topo_rspec)\r
-        rspec = "<RSpec type=\"SFA\"> <network name=\"" + Config().get_interface_hrn() + "\">";\r
-        if comp_rspec != None:\r
-            rspec = rspec + get_xml_by_tag(output, 'computeResource')\r
-        if topo_rspec != None:\r
-            rspec = rspec + get_xml_by_tag(output, 'topology')\r
-        rspec = rspec + "</network> </RSpec>"\r
-    return (rspec)\r
-\r
-def start_slice(api, xrn, cred):\r
-    # service not supported\r
-    return None\r
-\r
-def stop_slice(api, xrn, cred):\r
-    # service not supported\r
-    return None\r
-\r
-def reset_slices(api, xrn):\r
-    # service not supported\r
-    return None\r
-\r
-"""\r
-    GENI AM API Methods\r
-"""\r
-\r
-def GetVersion(api):\r
-    xrn=Xrn(api.hrn)\r
-    request_rspec_versions = [dict(sfa_rspec_version)]\r
-    ad_rspec_versions = [dict(sfa_rspec_version)]\r
-    #TODO: MAX-AM specific\r
-    version_more = {'interface':'aggregate',\r
-                    'testbed':'myplc',\r
-                    'hrn':xrn.get_hrn(),\r
-                    'request_rspec_versions': request_rspec_versions,\r
-                    'ad_rspec_versions': ad_rspec_versions,\r
-                    'default_ad_rspec': dict(sfa_rspec_version)\r
-                    }\r
-    return version_core(version_more)\r
-\r
-def SliverStatus(api, slice_xrn, creds, call_id):\r
-    if Callids().already_handled(call_id): return {}\r
-    return slice_status(api, slice_xrn, creds)\r
-\r
-def CreateSliver(api, slice_xrn, creds, rspec_string, users, call_id):\r
-    if Callids().already_handled(call_id): return ""\r
-    #TODO: create real CreateSliver response rspec\r
-    ret = create_slice(api, slice_xrn, creds, rspec_string, users)\r
-    if ret:\r
-        return get_rspec(api, creds, slice_xrn)\r
-    else:\r
-        return "<?xml version=\"1.0\" ?> <RSpec type=\"SFA\"> Error! </RSpec>"\r
-\r
-def DeleteSliver(api, xrn, creds, call_id):\r
-    if Callids().already_handled(call_id): return ""\r
-    return delete_slice(api, xrn, creds)\r
-\r
-# no caching\r
-def ListResources(api, creds, options,call_id):\r
-    if Callids().already_handled(call_id): return ""\r
-    # version_string = "rspec_%s" % (rspec_version.get_version_name())\r
-    slice_urn = options.get('geni_slice_urn')\r
-    return get_rspec(api, creds, slice_urn)\r
-\r
-"""\r
-Returns the request context required by sfatables. At some point, this mechanism should be changed\r
-to refer to "contexts", which is the information that sfatables is requesting. But for now, we just\r
-return the basic information needed in a dict.\r
-"""\r
-def fetch_context(slice_hrn, user_hrn, contexts):\r
-    base_context = {'sfa':{'user':{'hrn':user_hrn}}}\r
-    return base_context\r
-    api = SfaAPI()\r
-    create_slice(api, "plc.maxpl.test000", None, rspec_xml, None)\r
-\r
+import os
+import time
+import re
+
+#from sfa.util.faults import *
+from sfa.util.sfalogging import logger
+from sfa.util.config import Config
+from sfa.util.callids import Callids
+from sfa.util.version import version_core
+from sfa.util.xrn import urn_to_hrn, hrn_to_urn, Xrn
+
+# xxx the sfa.rspecs module is dead - this symbol is now undefined
+#from sfa.rspecs.sfa_rspec import sfa_rspec_version
+
+from sfa.managers.aggregate_manager import AggregateManager
+
+from sfa.plc.slices import Slices
+
+class AggregateManagerMax (AggregateManager):
+
+    RSPEC_TMP_FILE_PREFIX = "/tmp/max_rspec"
+    
+    # execute shell command and return both exit code and text output
+    def shell_execute(self, cmd, timeout):
+        pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
+        pipe = os.popen(cmd + ' 2>&1', 'r')
+        text = ''
+        while timeout:
+            line = pipe.read()
+            text += line
+            time.sleep(1)
+            timeout = timeout-1
+        code = pipe.close()
+        if code is None: code = 0
+        if text[-1:] == '\n': text = text[:-1]
+        return code, text
+    
+   
+    def call_am_apiclient(self, client_app, params, timeout):
+        """
+        call AM API client with command like in the following example:
+        cd aggregate_client; java -classpath AggregateWS-client-api.jar:lib/* \
+          net.geni.aggregate.client.examples.CreateSliceNetworkClient \
+          ./repo https://geni:8443/axis2/services/AggregateGENI \
+          ... params ...
+        """
+        (client_path, am_url) = Config().get_max_aggrMgr_info()
+        sys_cmd = "cd " + client_path + "; java -classpath AggregateWS-client-api.jar:lib/* net.geni.aggregate.client.examples." + client_app + " ./repo " + am_url + " " + ' '.join(params)
+        ret = self.shell_execute(sys_cmd, timeout)
+        logger.debug("shell_execute cmd: %s returns %s" % (sys_cmd, ret))
+        return ret
+    
+    # save request RSpec xml content to a tmp file
+    def save_rspec_to_file(self, rspec):
+        path = AggregateManagerMax.RSPEC_TMP_FILE_PREFIX + "_" + \
+            time.strftime('%Y%m%dT%H:%M:%S', time.gmtime(time.time())) +".xml"
+        file = open(path, "w")
+        file.write(rspec)
+        file.close()
+        return path
+    
+    # get stripped down slice id/name plc.maxpl.xislice1 --> maxpl_xislice1
+    def get_plc_slice_id(self, cred, xrn):
+        (hrn, type) = urn_to_hrn(xrn)
+        slice_id = hrn.find(':')
+        sep = '.'
+        if hrn.find(':') != -1:
+            sep=':'
+        elif hrn.find('+') != -1:
+            sep='+'
+        else:
+            sep='.'
+        slice_id = hrn.split(sep)[-2] + '_' + hrn.split(sep)[-1]
+        return slice_id
+    
+    # extract xml 
+    def get_xml_by_tag(self, text, tag):
+        indx1 = text.find('<'+tag)
+        indx2 = text.find('/'+tag+'>')
+        xml = None
+        if indx1!=-1 and indx2>indx1:
+            xml = text[indx1:indx2+len(tag)+2]
+        return xml
+    
+    def prepare_slice(self, api, slice_xrn, creds, users):
+        reg_objects = self._get_registry_objects(slice_xrn, creds, users)
+        (hrn, type) = urn_to_hrn(slice_xrn)
+        slices = Slices(api)
+        peer = slices.get_peer(hrn)
+        sfa_peer = slices.get_sfa_peer(hrn)
+        slice_record=None
+        if users:
+            slice_record = users[0].get('slice_record', {})
+        registry = api.registries[api.hrn]
+        credential = api.getCredential()
+        # ensure site record exists
+        site = slices.verify_site(hrn, slice_record, peer, sfa_peer)
+        # ensure slice record exists
+        slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)
+        # ensure person records exists
+        persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)
+    
+    def parse_resources(self, text, slice_xrn):
+        resources = []
+        urn = hrn_to_urn(slice_xrn, 'sliver')
+        plc_slice = re.search("Slice Status => ([^\n]+)", text)
+        if plc_slice.group(1) != 'NONE':
+            res = {}
+            res['geni_urn'] = urn + '_plc_slice'
+            res['geni_error'] = ''
+            res['geni_status'] = 'unknown'
+            if plc_slice.group(1) == 'CREATED':
+                res['geni_status'] = 'ready'
+            resources.append(res)
+        vlans = re.findall("GRI => ([^\n]+)\n\t  Status => ([^\n]+)", text)
+        for vlan in vlans:
+            res = {}
+            res['geni_error'] = ''
+            res['geni_urn'] = urn + '_vlan_' + vlan[0]
+            if vlan[1] == 'ACTIVE':
+                res['geni_status'] = 'ready'
+            elif vlan[1] == 'FAILED':
+                res['geni_status'] = 'failed'
+            else:
+                res['geni_status'] = 'configuring'
+            resources.append(res)
+        return resources
+    
+    def slice_status(self, api, slice_xrn, creds):
+        urn = hrn_to_urn(slice_xrn, 'slice')
+        result = {}
+        top_level_status = 'unknown'
+        slice_id = self.get_plc_slice_id(creds, urn)
+        (ret, output) = self.call_am_apiclient("QuerySliceNetworkClient", [slice_id,], 5)
+        # parse output into rspec XML
+        if output.find("Unkown Rspec:") > 0:
+            top_level_staus = 'failed'
+            result['geni_resources'] = ''
+        else:
+            has_failure = 0
+            all_active = 0
+            if output.find("Status => FAILED") > 0:
+                top_level_staus = 'failed'
+            elif (    output.find("Status => ACCEPTED") > 0 or output.find("Status => PENDING") > 0
+                   or output.find("Status => INSETUP") > 0 or output.find("Status => INCREATE") > 0
+                 ):
+                top_level_status = 'configuring'
+            else:
+                top_level_status = 'ready'
+            result['geni_resources'] = self.parse_resources(output, slice_xrn)
+        result['geni_urn'] = urn
+        result['geni_status'] = top_level_status
+        return result
+    
+    def create_slice(self, api, xrn, cred, rspec, users):
+        indx1 = rspec.find("<RSpec")
+        indx2 = rspec.find("</RSpec>")
+        if indx1 > -1 and indx2 > indx1:
+            rspec = rspec[indx1+len("<RSpec type=\"SFA\">"):indx2-1]
+        rspec_path = self.save_rspec_to_file(rspec)
+        self.prepare_slice(api, xrn, cred, users)
+        slice_id = self.get_plc_slice_id(cred, xrn)
+        sys_cmd = "sed -i \"s/rspec id=\\\"[^\\\"]*/rspec id=\\\"" +slice_id+ "/g\" " + rspec_path + ";sed -i \"s/:rspec=[^:'<\\\" ]*/:rspec=" +slice_id+ "/g\" " + rspec_path
+        ret = self.shell_execute(sys_cmd, 1)
+        sys_cmd = "sed -i \"s/rspec id=\\\"[^\\\"]*/rspec id=\\\"" + rspec_path + "/g\""
+        ret = self.shell_execute(sys_cmd, 1)
+        (ret, output) = self.call_am_apiclient("CreateSliceNetworkClient", [rspec_path,], 3)
+        # parse output ?
+        rspec = "<RSpec type=\"SFA\"> Done! </RSpec>"
+        return True
+    
+    def delete_slice(self, api, xrn, cred):
+        slice_id = self.get_plc_slice_id(cred, xrn)
+        (ret, output) = self.call_am_apiclient("DeleteSliceNetworkClient", [slice_id,], 3)
+        # parse output ?
+        return 1
+    
+    
+    def get_rspec(self, api, cred, slice_urn):
+        logger.debug("#### called max-get_rspec")
+        #geni_slice_urn: urn:publicid:IDN+plc:maxpl+slice+xi_rspec_test1
+        if slice_urn == None:
+            (ret, output) = self.call_am_apiclient("GetResourceTopology", ['all', '\"\"'], 5)
+        else:
+            slice_id = self.get_plc_slice_id(cred, slice_urn)
+            (ret, output) = self.call_am_apiclient("GetResourceTopology", ['all', slice_id,], 5)
+        # parse output into rspec XML
+        if output.find("No resouce found") > 0:
+            rspec = "<RSpec type=\"SFA\"> <Fault>No resource found</Fault> </RSpec>"
+        else:
+            comp_rspec = self.get_xml_by_tag(output, 'computeResource')
+            logger.debug("#### computeResource %s" % comp_rspec)
+            topo_rspec = self.get_xml_by_tag(output, 'topology')
+            logger.debug("#### topology %s" % topo_rspec)
+            rspec = "<RSpec type=\"SFA\"> <network name=\"" + Config().get_interface_hrn() + "\">"
+            if comp_rspec != None:
+                rspec = rspec + self.get_xml_by_tag(output, 'computeResource')
+            if topo_rspec != None:
+                rspec = rspec + self.get_xml_by_tag(output, 'topology')
+            rspec = rspec + "</network> </RSpec>"
+        return (rspec)
+    
+    def start_slice(self, api, xrn, cred):
+        # service not supported
+        return None
+    
+    def stop_slice(self, api, xrn, cred):
+        # service not supported
+        return None
+    
+    def reset_slices(self, api, xrn):
+        # service not supported
+        return None
+    
+    ### GENI AM API Methods
+    
+    def SliverStatus(self, api, slice_xrn, creds, call_id):
+        if Callids().already_handled(call_id): return {}
+        return self.slice_status(api, slice_xrn, creds)
+    
+    def CreateSliver(self, api, slice_xrn, creds, rspec_string, users, call_id):
+        if Callids().already_handled(call_id): return ""
+        #TODO: create real CreateSliver response rspec
+        ret = self.create_slice(api, slice_xrn, creds, rspec_string, users)
+        if ret:
+            return self.get_rspec(api, creds, slice_xrn)
+        else:
+            return "<?xml version=\"1.0\" ?> <RSpec type=\"SFA\"> Error! </RSpec>"
+    
+    def DeleteSliver(self, api, xrn, creds, call_id):
+        if Callids().already_handled(call_id): return ""
+        return self.delete_slice(api, xrn, creds)
+    
+    # no caching
+    def ListResources(self, api, creds, options,call_id):
+        if Callids().already_handled(call_id): return ""
+        # version_string = "rspec_%s" % (rspec_version.get_version_name())
+        slice_urn = options.get('geni_slice_urn')
+        return self.get_rspec(api, creds, slice_urn)
+    
+    def fetch_context(self, slice_hrn, user_hrn, contexts):
+        """
+        Returns the request context required by sfatables. At some point, this mechanism should be changed
+        to refer to "contexts", which is the information that sfatables is requesting. But for now, we just
+        return the basic information needed in a dict.
+        """
+        base_context = {'sfa':{'user':{'hrn':user_hrn}}}
+        return base_context
+
diff --git a/sfa/managers/aggregate_manager_openflow.py b/sfa/managers/aggregate_manager_openflow.py
deleted file mode 100755 (executable)
index 1edc90b..0000000
+++ /dev/null
@@ -1,179 +0,0 @@
-import sys
-
-import socket
-import struct
-
-#The following is not essential
-#from soaplib.wsgi_soap import SimpleWSGISoapApp
-#from soaplib.serializers.primitive import *
-#from soaplib.serializers.clazz import *
-
-from sfa.util.faults import *
-from sfa.util.xrn import urn_to_hrn
-from sfa.util.rspec import RSpec
-from sfa.server.registry import Registries
-from sfa.util.config import Config
-from sfa.plc.nodes import *
-from sfa.util.callids import Callids
-
-# Message IDs for all the SFA light calls
-# This will be used by the aggrMgr controller
-SFA_GET_RESOURCES = 101
-SFA_CREATE_SLICE = 102
-SFA_START_SLICE = 103
-SFA_STOP_SLICE = 104
-SFA_DELETE_SLICE = 105
-SFA_GET_SLICES = 106
-SFA_RESET_SLICES = 107
-
-DEBUG = 1
-
-def print_buffer(buf):
-    for i in range(0,len(buf)):
-        print('%x' % buf[i])
-
-def extract(sock):
-    # Shud we first obtain the message length?
-    # msg_len = socket.ntohs(sock.recv(2))
-    msg = ""
-
-    while (1):
-        try:
-            chunk = sock.recv(1)
-        except socket.error, message:
-            if 'timed out' in message:
-                break
-            else:
-                sys.exit("Socket error: " + message)
-
-        if len(chunk) == 0:
-            break
-        msg += chunk
-
-    print 'Done extracting %d bytes of response from aggrMgr' % len(msg)
-    return msg
-   
-def connect(server, port):
-    '''Connect to the Aggregate Manager module'''
-    sock = socket.socket ( socket.AF_INET, socket.SOCK_STREAM )
-    sock.connect ( ( server, port) )
-    sock.settimeout(1)
-    if DEBUG: print 'Connected!'
-    return sock
-    
-def connect_aggrMgr():
-    (aggr_mgr_ip, aggr_mgr_port) = Config().get_openflow_aggrMgr_info()
-    if DEBUG: print """Connecting to port %d of %s""" % (aggr_mgr_port, aggr_mgr_ip)
-    return connect(aggr_mgr_ip, aggr_mgr_port)
-
-def generate_slide_id(cred, hrn):
-    if cred == None:
-        cred = ""
-    if hrn == None:
-        hrn = ""
-    #return cred + '_' + hrn
-    return str(hrn)
-
-def msg_aggrMgr(cred, hrn, msg_id):
-    slice_id = generate_slide_id(cred, hrn)
-
-    msg = struct.pack('> B%ds' % len(slice_id), msg_id, slice_id)
-    buf = struct.pack('> H', len(msg)+2) + msg
-
-    try:
-        aggrMgr_sock = connect_aggrMgr()
-        aggrMgr_sock.send(buf)
-        aggrMgr_sock.close()
-        return 1
-    except socket.error, message:
-        print "Socket error"
-    except IOerror, message:
-        print "IO error"
-    return 0
-
-def start_slice(cred, xrn):
-    hrn = urn_to_hrn(xrn)[0]
-    if DEBUG: print "Received start_slice call"
-    return msg_aggrMgr(SFA_START_SLICE)
-
-def stop_slice(cred, xrn):
-    hrn = urn_to_hrn(xrn)[0]
-    if DEBUG: print "Received stop_slice call"
-    return msg_aggrMgr(SFA_STOP_SLICE)
-
-def DeleteSliver(cred, xrn, call_id):
-    if Callids().already_handled(call_id): return ""
-    hrn = urn_to_hrn(xrn)[0]
-    if DEBUG: print "Received DeleteSliver call"
-    return msg_aggrMgr(SFA_DELETE_SLICE)
-
-def reset_slices(cred, xrn):
-    hrn = urn_to_hrn(xrn)[0]
-    if DEBUG: print "Received reset_slices call"
-    return msg_aggrMgr(SFA_RESET_SLICES)
-
-### Thierry: xxx this should ahve api as a first arg - probably outdated 
-def CreateSliver(cred, xrn, rspec, call_id):
-    if Callids().already_handled(call_id): return ""
-
-    hrn = urn_to_hrn(xrn)[0]
-    if DEBUG: print "Received CreateSliver call"
-    slice_id = generate_slide_id(cred, hrn)
-
-    msg = struct.pack('> B%ds%ds' % (len(slice_id)+1, len(rspec)), SFA_CREATE_SLICE, slice_id, rspec)
-    buf = struct.pack('> H', len(msg)+2) + msg
-
-    try:
-        aggrMgr_sock = connect_aggrMgr()
-        aggrMgr_sock.send(buf)
-        if DEBUG: print "Sent %d bytes and closing connection" % len(buf)
-        aggrMgr_sock.close()
-
-        if DEBUG: print "----------------"
-        return rspec
-    except socket.error, message:
-        print "Socket error"
-    except IOerror, message:
-        print "IO error"
-    return ""
-
-# Thierry : xxx this would need to handle call_id like the other AMs but is outdated...
-def ListResources(cred, xrn=None):
-    hrn = urn_to_hrn(xrn)[0]
-    if DEBUG: print "Received ListResources call"
-    slice_id = generate_slide_id(cred, hrn)
-
-    msg = struct.pack('> B%ds' % len(slice_id), SFA_GET_RESOURCES, slice_id)
-    buf = struct.pack('> H', len(msg)+2) + msg
-
-    try:
-        aggrMgr_sock = connect_aggrMgr()
-        aggrMgr_sock.send(buf)
-        resource_list = extract(aggrMgr_sock);
-        aggrMgr_sock.close()
-
-        if DEBUG: print "----------------"
-        return resource_list 
-    except socket.error, message:
-        print "Socket error"
-    except IOerror, message:
-        print "IO error"
-    return None
-
-"""
-Returns the request context required by sfatables. At some point, this mechanism should be changed
-to refer to "contexts", which is the information that sfatables is requesting. But for now, we just
-return the basic information needed in a dict.
-"""
-def fetch_context(slice_hrn, user_hrn, contexts):
-    base_context = {'sfa':{'user':{'hrn':user_hrn}}}
-    return base_context
-
-def main():
-    r = RSpec()
-    r.parseFile(sys.argv[1])
-    rspec = r.toDict()
-    CreateSliver(None,'plc',rspec,'call-id-plc')
-    
-if __name__ == "__main__":
-    main()
diff --git a/sfa/managers/aggregate_manager_pl.py b/sfa/managers/aggregate_manager_pl.py
deleted file mode 100644 (file)
index e61efc7..0000000
+++ /dev/null
@@ -1,407 +0,0 @@
-import datetime
-import time
-import traceback
-import sys
-import re
-from types import StringTypes
-
-from sfa.util.faults import *
-from sfa.util.xrn import get_authority, hrn_to_urn, urn_to_hrn, Xrn, urn_to_sliver_id
-from sfa.util.plxrn import slicename_to_hrn, hrn_to_pl_slicename, hostname_to_urn
-from sfa.util.rspec import *
-from sfa.util.specdict import *
-from sfa.util.record import SfaRecord
-from sfa.util.policy import Policy
-from sfa.util.record import *
-from sfa.util.sfaticket import SfaTicket
-from sfa.plc.slices import Slices
-from sfa.trust.credential import Credential
-import sfa.plc.peers as peers
-from sfa.plc.network import *
-from sfa.plc.api import SfaAPI
-from sfa.plc.aggregate import Aggregate
-from sfa.plc.slices import *
-from sfa.util.version import version_core
-from sfa.rspecs.version_manager import VersionManager
-from sfa.rspecs.rspec import RSpec
-from sfa.util.sfatime import utcparse
-from sfa.util.callids import Callids
-
-def GetVersion(api):
-
-    version_manager = VersionManager()
-    ad_rspec_versions = []
-    request_rspec_versions = []
-    for rspec_version in version_manager.versions:
-        if rspec_version.content_type in ['*', 'ad']:
-            ad_rspec_versions.append(rspec_version.to_dict())
-        if rspec_version.content_type in ['*', 'request']:
-            request_rspec_versions.append(rspec_version.to_dict()) 
-    default_rspec_version = version_manager.get_version("sfa 1").to_dict()
-    xrn=Xrn(api.hrn)
-    version_more = {'interface':'aggregate',
-                    'testbed':'myplc',
-                    'hrn':xrn.get_hrn(),
-                    'request_rspec_versions': request_rspec_versions,
-                    'ad_rspec_versions': ad_rspec_versions,
-                    'default_ad_rspec': default_rspec_version
-                    }
-    return version_core(version_more)
-
-def __get_registry_objects(slice_xrn, creds, users):
-    """
-
-    """
-    hrn, type = urn_to_hrn(slice_xrn)
-
-    hrn_auth = get_authority(hrn)
-
-    # Build up objects that an SFA registry would return if SFA
-    # could contact the slice's registry directly
-    reg_objects = None
-
-    if users:
-        # dont allow special characters in the site login base
-        #only_alphanumeric = re.compile('[^a-zA-Z0-9]+')
-        #login_base = only_alphanumeric.sub('', hrn_auth[:20]).lower()
-        slicename = hrn_to_pl_slicename(hrn)
-        login_base = slicename.split('_')[0]
-        reg_objects = {}
-        site = {}
-        site['site_id'] = 0
-        site['name'] = 'geni.%s' % login_base 
-        site['enabled'] = True
-        site['max_slices'] = 100
-
-        # Note:
-        # Is it okay if this login base is the same as one already at this myplc site?
-        # Do we need uniqueness?  Should use hrn_auth instead of just the leaf perhaps?
-        site['login_base'] = login_base
-        site['abbreviated_name'] = login_base
-        site['max_slivers'] = 1000
-        reg_objects['site'] = site
-
-        slice = {}
-        
-        # get_expiration always returns a normalized datetime - no need to utcparse
-        extime = Credential(string=creds[0]).get_expiration()
-        # If the expiration time is > 60 days from now, set the expiration time to 60 days from now
-        if extime > datetime.datetime.utcnow() + datetime.timedelta(days=60):
-            extime = datetime.datetime.utcnow() + datetime.timedelta(days=60)
-        slice['expires'] = int(time.mktime(extime.timetuple()))
-        slice['hrn'] = hrn
-        slice['name'] = hrn_to_pl_slicename(hrn)
-        slice['url'] = hrn
-        slice['description'] = hrn
-        slice['pointer'] = 0
-        reg_objects['slice_record'] = slice
-
-        reg_objects['users'] = {}
-        for user in users:
-            user['key_ids'] = []
-            hrn, _ = urn_to_hrn(user['urn'])
-            user['email'] = hrn_to_pl_slicename(hrn) + "@geni.net"
-            user['first_name'] = hrn
-            user['last_name'] = hrn
-            reg_objects['users'][user['email']] = user
-
-        return reg_objects
-
-def __get_hostnames(nodes):
-    hostnames = []
-    for node in nodes:
-        hostnames.append(node.hostname)
-    return hostnames
-
-def SliverStatus(api, slice_xrn, creds, call_id):
-    if Callids().already_handled(call_id): return {}
-
-    (hrn, type) = urn_to_hrn(slice_xrn)
-    # find out where this slice is currently running
-    slicename = hrn_to_pl_slicename(hrn)
-    
-    slices = api.plshell.GetSlices(api.plauth, [slicename], ['slice_id', 'node_ids','person_ids','name','expires'])
-    if len(slices) == 0:        
-        raise Exception("Slice %s not found (used %s as slicename internally)" % (slice_xrn, slicename))
-    slice = slices[0]
-    
-    # report about the local nodes only
-    nodes = api.plshell.GetNodes(api.plauth, {'node_id':slice['node_ids'],'peer_id':None},
-                                 ['node_id', 'hostname', 'site_id', 'boot_state', 'last_contact'])
-    site_ids = [node['site_id'] for node in nodes]
-    sites = api.plshell.GetSites(api.plauth, site_ids, ['site_id', 'login_base'])
-    sites_dict = dict ( [ (site['site_id'],site['login_base'] ) for site in sites ] )
-
-    result = {}
-    top_level_status = 'unknown'
-    if nodes:
-        top_level_status = 'ready'
-    slice_urn = Xrn(slice_xrn, 'slice').get_urn()
-    result['geni_urn'] = slice_urn
-    result['pl_login'] = slice['name']
-    result['pl_expires'] = datetime.datetime.fromtimestamp(slice['expires']).ctime()
-    
-    resources = []
-    for node in nodes:
-        res = {}
-        res['pl_hostname'] = node['hostname']
-        res['pl_boot_state'] = node['boot_state']
-        res['pl_last_contact'] = node['last_contact']
-        if node['last_contact'] is not None:
-            res['pl_last_contact'] = datetime.datetime.fromtimestamp(node['last_contact']).ctime()
-        sliver_id = urn_to_sliver_id(slice_urn, slice['slice_id'], node['node_id']) 
-        res['geni_urn'] = sliver_id
-        if node['boot_state'] == 'boot':
-            res['geni_status'] = 'ready'
-        else:
-            res['geni_status'] = 'failed'
-            top_level_staus = 'failed' 
-            
-        res['geni_error'] = ''
-
-        resources.append(res)
-        
-    result['geni_status'] = top_level_status
-    result['geni_resources'] = resources
-    return result
-
-def CreateSliver(api, slice_xrn, creds, rspec_string, users, call_id):
-    """
-    Create the sliver[s] (slice) at this aggregate.    
-    Verify HRN and initialize the slice record in PLC if necessary.
-    """
-    if Callids().already_handled(call_id): return ""
-
-    aggregate = Aggregate(api)
-    slices = Slices(api)
-    (hrn, type) = urn_to_hrn(slice_xrn)
-    peer = slices.get_peer(hrn)
-    sfa_peer = slices.get_sfa_peer(hrn)
-    slice_record=None    
-    if users:
-        slice_record = users[0].get('slice_record', {})
-
-    # parse rspec
-    rspec = RSpec(rspec_string)
-    requested_attributes = rspec.version.get_slice_attributes()
-    
-    # ensure site record exists
-    site = slices.verify_site(hrn, slice_record, peer, sfa_peer)
-    # ensure slice record exists
-    slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)
-    # ensure person records exists
-    persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)
-    # ensure slice attributes exists
-    slices.verify_slice_attributes(slice, requested_attributes)
-    
-    # add/remove slice from nodes
-    requested_slivers = [str(host) for host in rspec.version.get_nodes_with_slivers()]
-    slices.verify_slice_nodes(slice, requested_slivers, peer) 
-
-    # hanlde MyPLC peer association.
-    # only used by plc and ple.
-    slices.handle_peer(site, slice, persons, peer)
-    
-    return aggregate.get_rspec(slice_xrn=slice_xrn, version=rspec.version)
-
-
-def RenewSliver(api, xrn, creds, expiration_time, call_id):
-    if Callids().already_handled(call_id): return True
-    (hrn, type) = urn_to_hrn(xrn)
-    slicename = hrn_to_pl_slicename(hrn)
-    slices = api.plshell.GetSlices(api.plauth, {'name': slicename}, ['slice_id'])
-    if not slices:
-        raise RecordNotFound(hrn)
-    slice = slices[0]
-    requested_time = utcparse(expiration_time)
-    record = {'expires': int(time.mktime(requested_time.timetuple()))}
-    try:
-        api.plshell.UpdateSlice(api.plauth, slice['slice_id'], record)
-        return True
-    except:
-        return False
-
-def start_slice(api, xrn, creds):
-    hrn, type = urn_to_hrn(xrn)
-    slicename = hrn_to_pl_slicename(hrn)
-    slices = api.plshell.GetSlices(api.plauth, {'name': slicename}, ['slice_id'])
-    if not slices:
-        raise RecordNotFound(hrn)
-    slice_id = slices[0]['slice_id']
-    slice_tags = api.plshell.GetSliceTags(api.plauth, {'slice_id': slice_id, 'tagname': 'enabled'}, ['slice_tag_id'])
-    # just remove the tag if it exists
-    if slice_tags:
-        api.plshell.DeleteSliceTag(api.plauth, slice_tags[0]['slice_tag_id'])
-
-    return 1
-def stop_slice(api, xrn, creds):
-    hrn, type = urn_to_hrn(xrn)
-    slicename = hrn_to_pl_slicename(hrn)
-    slices = api.plshell.GetSlices(api.plauth, {'name': slicename}, ['slice_id'])
-    if not slices:
-        raise RecordNotFound(hrn)
-    slice_id = slices[0]['slice_id']
-    slice_tags = api.plshell.GetSliceTags(api.plauth, {'slice_id': slice_id, 'tagname': 'enabled'})
-    if not slice_tags:
-        api.plshell.AddSliceTag(api.plauth, slice_id, 'enabled', '0')
-    elif slice_tags[0]['value'] != "0":
-        tag_id = attributes[0]['slice_tag_id']
-        api.plshell.UpdateSliceTag(api.plauth, tag_id, '0')
-    return 1
-
-def reset_slice(api, xrn):
-    # XX not implemented at this interface
-    return 1
-
-def DeleteSliver(api, xrn, creds, call_id):
-    if Callids().already_handled(call_id): return ""
-    (hrn, type) = urn_to_hrn(xrn)
-    slicename = hrn_to_pl_slicename(hrn)
-    slices = api.plshell.GetSlices(api.plauth, {'name': slicename})
-    if not slices:
-        return 1
-    slice = slices[0]
-
-    # determine if this is a peer slice
-    peer = peers.get_peer(api, hrn)
-    try:
-        if peer:
-            api.plshell.UnBindObjectFromPeer(api.plauth, 'slice', slice['slice_id'], peer)
-        api.plshell.DeleteSliceFromNodes(api.plauth, slicename, slice['node_ids'])
-    finally:
-        if peer:
-            api.plshell.BindObjectToPeer(api.plauth, 'slice', slice['slice_id'], peer, slice['peer_slice_id'])
-    return 1
-
-# xxx Thierry : caching at the aggregate level sounds wrong...
-#caching=True
-caching=False
-def ListSlices(api, creds, call_id):
-    if Callids().already_handled(call_id): return []
-    # look in cache first
-    if caching and api.cache:
-        slices = api.cache.get('slices')
-        if slices:
-            return slices
-
-    # get data from db 
-    slices = api.plshell.GetSlices(api.plauth, {'peer_id': None}, ['name'])
-    slice_hrns = [slicename_to_hrn(api.hrn, slice['name']) for slice in slices]
-    slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
-
-    # cache the result
-    if caching and api.cache:
-        api.cache.add('slices', slice_urns) 
-
-    return slice_urns
-    
-def ListResources(api, creds, options, call_id):
-    if Callids().already_handled(call_id): return ""
-    # get slice's hrn from options
-    xrn = options.get('geni_slice_urn', None)
-    (hrn, type) = urn_to_hrn(xrn)
-
-    version_manager = VersionManager()
-    # get the rspec's return format from options
-    rspec_version = version_manager.get_version(options.get('rspec_version'))
-    version_string = "rspec_%s" % (rspec_version.to_string())
-
-    #panos adding the info option to the caching key (can be improved)
-    if options.get('info'):
-        version_string = version_string + "_"+options.get('info', 'default')
-
-    # look in cache first
-    if caching and api.cache and not xrn:
-        rspec = api.cache.get(version_string)
-        if rspec:
-            api.logger.info("aggregate.ListResources: returning cached value for hrn %s"%hrn)
-            return rspec 
-
-    #panos: passing user-defined options
-    #print "manager options = ",options
-    aggregate = Aggregate(api, options)
-    rspec =  aggregate.get_rspec(slice_xrn=xrn, version=rspec_version)
-
-    # cache the result
-    if caching and api.cache and not xrn:
-        api.cache.add(version_string, rspec)
-
-    return rspec
-
-
-def get_ticket(api, xrn, creds, rspec, users):
-
-    reg_objects = __get_registry_objects(xrn, creds, users)
-
-    slice_hrn, type = urn_to_hrn(xrn)
-    slices = Slices(api)
-    peer = slices.get_peer(slice_hrn)
-    sfa_peer = slices.get_sfa_peer(slice_hrn)
-
-    # get the slice record
-    registry = api.registries[api.hrn]
-    credential = api.getCredential()
-    records = registry.Resolve(xrn, credential)
-
-    # similar to CreateSliver, we must verify that the required records exist
-    # at this aggregate before we can issue a ticket
-    site_id, remote_site_id = slices.verify_site(registry, credential, slice_hrn,
-                                                 peer, sfa_peer, reg_objects)
-    slice = slices.verify_slice(registry, credential, slice_hrn, site_id,
-                                remote_site_id, peer, sfa_peer, reg_objects)
-
-    # make sure we get a local slice record
-    record = None
-    for tmp_record in records:
-        if tmp_record['type'] == 'slice' and \
-           not tmp_record['peer_authority']:
-            record = SliceRecord(dict=tmp_record)
-    if not record:
-        raise RecordNotFound(slice_hrn)
-
-    # get sliver info
-    slivers = Slices(api).get_slivers(slice_hrn)
-    if not slivers:
-        raise SliverDoesNotExist(slice_hrn)
-
-    # get initscripts
-    initscripts = []
-    data = {
-        'timestamp': int(time.time()),
-        'initscripts': initscripts,
-        'slivers': slivers
-    }
-
-    # create the ticket
-    object_gid = record.get_gid_object()
-    new_ticket = SfaTicket(subject = object_gid.get_subject())
-    new_ticket.set_gid_caller(api.auth.client_gid)
-    new_ticket.set_gid_object(object_gid)
-    new_ticket.set_issuer(key=api.key, subject=api.hrn)
-    new_ticket.set_pubkey(object_gid.get_pubkey())
-    new_ticket.set_attributes(data)
-    new_ticket.set_rspec(rspec)
-    #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
-    new_ticket.encode()
-    new_ticket.sign()
-
-    return new_ticket.save_to_string(save_parents=True)
-
-
-
-def main():
-    api = SfaAPI()
-    """
-    rspec = ListResources(api, "plc.princeton.sapan", None, 'pl_test_sapan')
-    #rspec = ListResources(api, "plc.princeton.coblitz", None, 'pl_test_coblitz')
-    #rspec = ListResources(api, "plc.pl.sirius", None, 'pl_test_sirius')
-    print rspec
-    """
-    f = open(sys.argv[1])
-    xml = f.read()
-    f.close()
-    CreateSliver(api, "plc.princeton.sapan", xml, 'CreateSliver_sapan')
-
-if __name__ == "__main__":
-    main()
index e45e509..15de0ce 100644 (file)
@@ -10,20 +10,20 @@ from types import StringTypes
 from sfa.util.faults import *
 from sfa.util.xrn import get_authority, hrn_to_urn, urn_to_hrn, Xrn, urn_to_sliver_id
 from sfa.util.plxrn import slicename_to_hrn, hrn_to_pl_slicename, hostname_to_urn
-from sfa.util.rspec import *
-from sfa.util.specdict import *
+#from sfa.util.rspec import *
+#from sfa.util.specdict import *
 from sfa.util.record import SfaRecord
 from sfa.util.policy import Policy
 from sfa.util.record import *
-from sfa.util.sfaticket import SfaTicket
+#from sfa.util.sfaticket import SfaTicket
 
-from sfa.senslab.slices import *
+#from sfa.senslab.slices import *
 
 from sfa.trust.credential import Credential
 import sfa.plc.peers as peers
 from sfa.plc.network import *
 from sfa.senslab.OARrestapi import *
-from sfa.senslab.api import SfaAPI
+#from sfa.senslab.api import SfaAPI
 #from sfa.plc.aggregate import Aggregate
 #from sfa.plc.slices import *
 from sfa.util.version import version_core
diff --git a/sfa/managers/aggregate_manager_vini.py b/sfa/managers/aggregate_manager_vini.py
deleted file mode 100644 (file)
index 7f36419..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-import datetime
-import time
-import traceback
-import sys
-
-from types import StringTypes
-from sfa.util.xrn import urn_to_hrn, Xrn
-from sfa.util.plxrn import hrn_to_pl_slicename
-from sfa.util.rspec import *
-from sfa.util.specdict import *
-from sfa.util.faults import *
-from sfa.util.record import SfaRecord
-from sfa.util.policy import Policy
-from sfa.util.record import *
-from sfa.util.sfaticket import SfaTicket
-from sfa.server.registry import Registries
-from sfa.plc.slices import Slices
-import sfa.plc.peers as peers
-from sfa.managers.vini.vini_network import *
-from sfa.plc.vini_aggregate import ViniAggregate
-from sfa.rspecs.version_manager import VersionManager
-from sfa.plc.api import SfaAPI
-from sfa.plc.slices import *
-from sfa.managers.aggregate_manager_pl import __get_registry_objects, __get_hostnames
-from sfa.util.version import version_core
-from sfa.util.callids import Callids
-
-# VINI aggregate is almost identical to PLC aggregate for many operations, 
-# so lets just import the methods form the PLC manager
-from sfa.managers.aggregate_manager_pl import (
-start_slice, stop_slice, RenewSliver, reset_slice, ListSlices, get_ticket, SliverStatus)
-
-
-def GetVersion(api):
-    xrn=Xrn(api.hrn)
-    return version_core({'interface':'aggregate',
-                         'testbed':'myplc.vini',
-                         'hrn':xrn.get_hrn(),
-                         })
-
-def DeleteSliver(api, xrn, creds, call_id):
-    if Callids().already_handled(call_id): return ""
-    (hrn, type) = urn_to_hrn(xrn)
-    slicename = hrn_to_pl_slicename(hrn)
-    slices = api.plshell.GetSlices(api.plauth, {'name': slicename})
-    if not slices:
-        return 1
-    slice = slices[0]
-
-    api.plshell.DeleteSliceFromNodes(api.plauth, slicename, slice['node_ids'])
-    return 1
-
-def CreateSliver(api, xrn, creds, xml, users, call_id):
-    """
-    Verify HRN and initialize the slice record in PLC if necessary.
-    """
-
-    if Callids().already_handled(call_id): return ""
-
-    hrn, type = urn_to_hrn(xrn)
-    peer = None
-    reg_objects = __get_registry_objects(xrn, creds, users)
-    slices = Slices(api)
-    peer = slices.get_peer(hrn)
-    sfa_peer = slices.get_sfa_peer(hrn)
-    registries = Registries(api)
-    registry = registries[api.hrn]
-    credential = api.getCredential()
-    site_id, remote_site_id = slices.verify_site(registry, credential, hrn, 
-                                                 peer, sfa_peer, reg_objects)
-    slice = slices.verify_slice(registry, credential, hrn, site_id, 
-                                remote_site_id, peer, sfa_peer, reg_objects)
-
-    network = ViniNetwork(api)
-
-    slice = network.get_slice(api, hrn)
-    current = __get_hostnames(slice.get_nodes())
-
-    network.addRSpec(xml, "/var/www/html/schemas/vini.rng")
-    #network.addRSpec(xml, "/root/SVN/sfa/trunk/sfa/managers/vini/vini.rng")
-    request = __get_hostnames(network.nodesWithSlivers())
-    
-    # remove nodes not in rspec
-    deleted_nodes = list(set(current).difference(request))
-
-    # add nodes from rspec
-    added_nodes = list(set(request).difference(current))
-
-    api.plshell.AddSliceToNodes(api.plauth, slice.name, added_nodes) 
-    api.plshell.DeleteSliceFromNodes(api.plauth, slice.name, deleted_nodes)
-    network.updateSliceTags()
-
-    # xxx - check this holds enough data for the client to understand what's happened
-    return network.toxml()
-
-def ListResources(api, creds, options,call_id):
-    if Callids().already_handled(call_id): return ""
-    # get slice's hrn from options
-    xrn = options.get('geni_slice_urn', '')
-    hrn, type = urn_to_hrn(xrn)
-
-    version_manager = VersionManager()
-    # get the rspec's return format from options
-    rspec_version = version_manager.get_version(options.get('rspec_version'))
-    version_string = "rspec_%s" % (rspec_version.to_string())
-    
-    # look in cache first
-    if api.cache and not xrn:
-        rspec = api.cache.get(version_string)
-        if rspec:
-            api.logger.info("aggregate.ListResources: returning cached value for hrn %s"%hrn)
-            return rspec
-
-    aggregate = ViniAggregate(api, options) 
-    rspec =  aggregate.get_rspec(slice_xrn=xrn, version=rspec_version)
-           
-    # cache the result
-    if api.cache and not xrn:
-        api.cache.add('nodes', rspec)
-
-    return rspec
-
-def main():
-    api = SfaAPI()
-    """
-    #rspec = ListResources(api, None, None,)
-    rspec = ListResources(api, "plc.princeton.iias", None, 'vini_test')
-    print rspec
-    """
-    f = open(sys.argv[1])
-    xml = f.read()
-    f.close()
-    CreateSliver(api, "plc.princeton.iias", xml, 'call-id-iias')
-
-if __name__ == "__main__":
-    main()
index 6100e76..164b2d6 100644 (file)
@@ -1,9 +1,8 @@
-import os
 import xmlrpclib
 
-from sfa.util.faults import *
+from sfa.util.faults import SliverDoesNotExist
 from sfa.util.plxrn import PlXrn
-from sfa.util.sfaticket import SfaTicket
+from sfa.trust.sfaticket import SfaTicket
 from sfa.util.version import version_core
 
 def GetVersion(api):
@@ -30,26 +29,26 @@ def SliverStatus(api, slice_xrn, creds):
            
 def start_slice(api, xrn, creds):
     slicename = PlXrn(xrn, type='slice').pl_slicename()
-    api.nodemanger.Start(slicename)
+    api.driver.nodemanager.Start(slicename)
 
 def stop_slice(api, xrn, creds):
     slicename = PlXrn(xrn, type='slice').pl_slicename()
-    api.nodemanager.Stop(slicename)
+    api.driver.nodemanager.Stop(slicename)
 
 def DeleteSliver(api, xrn, creds, call_id):
     slicename = PlXrn(xrn, type='slice').pl_slicename()
-    api.nodemanager.Destroy(slicename)
+    api.driver.nodemanager.Destroy(slicename)
 
 def reset_slice(api, xrn):
     slicename = PlXrn(xrn, type='slice').pl_slicename()
     if not api.sliver_exists(slicename):
         raise SliverDoesNotExist(slicename)
-    api.nodemanager.ReCreate(slicename)
+    api.driver.nodemanager.ReCreate(slicename)
  
 # xxx outdated - this should accept a credential & call_id
 def ListSlices(api):
     # this returns a tuple, the data we want is at index 1 
-    xids = api.nodemanager.GetXIDs()
+    xids = api.driver.nodemanager.GetXIDs()
     # unfortunately the data we want is given to us as 
     # a string but we really want it as a dict
     # lets eval it
@@ -66,6 +65,6 @@ def redeem_ticket(api, ticket_string):
 
     # convert ticket to format nm is used to
     nm_ticket = xmlrpclib.dumps((ticket.attributes,), methodresponse=True)
-    api.nodemanager.AdminTicket(nm_ticket)
+    api.driver.nodemanager.AdminTicket(nm_ticket)
     
 
diff --git a/sfa/managers/managerwrapper.py b/sfa/managers/managerwrapper.py
new file mode 100644 (file)
index 0000000..b0326d6
--- /dev/null
@@ -0,0 +1,34 @@
+from types import ModuleType, ClassType
+
+from sfa.util.faults import SfaNotImplemented, SfaAPIError
+from sfa.util.sfalogging import logger
+
+####################
+class ManagerWrapper:
+    """
+    This class acts as a wrapper around an SFA interface manager module, but
+    can be used with any python module. The purpose of this class is raise a 
+    SfaNotImplemented exception if someone attempts to use an attribute 
+    (could be a callable) thats not available in the library by checking the
+    library using hasattr. This helps to communicate better errors messages 
+    to the users and developers in the event that a specifiec operation 
+    is not implemented by a libarary and will generally be more helpful than
+    the standard AttributeError         
+    """
+    def __init__(self, manager, interface):
+        if isinstance (manager, ModuleType):
+            # old-fashioned module implementation
+            self.manager = manager
+        elif isinstance (manager, ClassType):
+            # create an instance; we don't pass the api in argument as it is passed 
+            # to the actual method calls anyway
+            self.manager = manager()
+        else:
+            raise SfaAPIError,"Argument to ManagerWrapper must be a module or class"
+        self.interface = interface
+        
+    def __getattr__(self, method):
+        if not hasattr(self.manager, method):
+            raise SfaNotImplemented(method, self.interface)
+        return getattr(self.manager, method)
+        
similarity index 83%
rename from sfa/managers/registry_manager_pl.py
rename to sfa/managers/registry_manager.py
index 6052eee..5888b2b 100644 (file)
@@ -1,18 +1,19 @@
 import types
 import time 
 
-from sfa.util.faults import *
+from sfa.util.faults import RecordNotFound, AccountNotEnabled, PermissionError, MissingAuthority, \
+    UnknownSfaType, ExistingRecord
 from sfa.util.prefixTree import prefixTree
 from sfa.util.record import SfaRecord
 from sfa.util.table import SfaTable
-from sfa.util.record import SfaRecord
-from sfa.trust.gid import GID 
 from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn, urn_to_hrn
 from sfa.util.plxrn import hrn_to_pl_login_base
+from sfa.util.version import version_core
+
+from sfa.trust.gid import GID 
 from sfa.trust.credential import Credential
-from sfa.trust.certificate import Certificate, Keypair
+from sfa.trust.certificate import Certificate, Keypair, convert_public_key
 from sfa.trust.gid import create_uuid
-from sfa.util.version import version_core
 
 # The GENI GetVersion call
 def GetVersion(api):
@@ -45,7 +46,7 @@ def get_credential(api, xrn, type, is_self=False):
 
     # verify_cancreate_credential requires that the member lists
     # (researchers, pis, etc) be filled in
-    api.fill_record_info(record)
+    api.driver.fill_record_info(record, api.aggregates)
     if record['type']=='user':
        if not record['enabled']:
           raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record['email']))
@@ -118,7 +119,9 @@ def resolve(api, xrns, type=None, full=True):
         xrns = xrn_dict[registry_hrn]
         if registry_hrn != api.hrn:
             credential = api.getCredential()
-            peer_records = registries[registry_hrn].Resolve(xrns, credential)
+            interface = api.registries[registry_hrn]
+            server = api.server_proxy(interface, credential)
+            peer_records = server.Resolve(xrns, credential)
             records.extend([SfaRecord(dict=record).as_dict() for record in peer_records])
 
     # try resolving the remaining unfound records at the local registry
@@ -128,7 +131,7 @@ def resolve(api, xrns, type=None, full=True):
     table = SfaTable()
     local_records = table.findObjects({'hrn': remaining_hrns})
     if full:
-        api.fill_record_info(local_records)
+        api.driver.fill_record_info(local_records, api.aggregates)
     
     # convert local record objects to dicts
     records.extend([dict(record) for record in local_records])
@@ -154,13 +157,14 @@ def list(api, xrn, origin_hrn=None):
     #if there was no match then this record belongs to an unknow registry
     if not registry_hrn:
         raise MissingAuthority(xrn)
-    
     # if the best match (longest matching hrn) is not the local registry,
     # forward the request
     records = []    
     if registry_hrn != api.hrn:
         credential = api.getCredential()
-        record_list = registries[registry_hrn].List(xrn, credential)
+        interface = api.registries[registry_hrn]
+        server = api.server_proxy(interface, credential)
+        record_list = server.List(xrn, credential)
         records = [SfaRecord(dict=record).as_dict() for record in record_list]
     
     # if we still have not found the record yet, try the local registry
@@ -230,10 +234,10 @@ def register(api, record):
         # get the GID from the newly created authority
         gid = auth_info.get_gid_object()
         record.set_gid(gid.save_to_string(save_parents=True))
-        pl_record = api.sfa_fields_to_pl_fields(type, hrn, record)
-        sites = api.plshell.GetSites(api.plauth, [pl_record['login_base']])
+        pl_record = api.driver.sfa_fields_to_pl_fields(type, hrn, record)
+        sites = api.driver.GetSites([pl_record['login_base']])
         if not sites:
-            pointer = api.plshell.AddSite(api.plauth, pl_record)
+            pointer = api.driver.AddSite(pl_record)
         else:
             pointer = sites[0]['site_id']
 
@@ -242,45 +246,45 @@ def register(api, record):
 
     elif (type == "slice"):
         acceptable_fields=['url', 'instantiation', 'name', 'description']
-        pl_record = api.sfa_fields_to_pl_fields(type, hrn, record)
+        pl_record = api.driver.sfa_fields_to_pl_fields(type, hrn, record)
         for key in pl_record.keys():
             if key not in acceptable_fields:
                 pl_record.pop(key)
-        slices = api.plshell.GetSlices(api.plauth, [pl_record['name']])
+        slices = api.driver.GetSlices([pl_record['name']])
         if not slices:
-             pointer = api.plshell.AddSlice(api.plauth, pl_record)
+             pointer = api.driver.AddSlice(pl_record)
         else:
              pointer = slices[0]['slice_id']
         record.set_pointer(pointer)
         record['pointer'] = pointer
 
     elif  (type == "user"):
-        persons = api.plshell.GetPersons(api.plauth, [record['email']])
+        persons = api.driver.GetPersons([record['email']])
         if not persons:
-            pointer = api.plshell.AddPerson(api.plauth, dict(record))
+            pointer = api.driver.AddPerson(dict(record))
         else:
             pointer = persons[0]['person_id']
 
         if 'enabled' in record and record['enabled']:
-            api.plshell.UpdatePerson(api.plauth, pointer, {'enabled': record['enabled']})
+            api.driver.UpdatePerson(pointer, {'enabled': record['enabled']})
         # add this persons to the site only if he is being added for the first
         # time by sfa and doesont already exist in plc
         if not persons or not persons[0]['site_ids']:
             login_base = get_leaf(record['authority'])
-            api.plshell.AddPersonToSite(api.plauth, pointer, login_base)
+            api.driver.AddPersonToSite(pointer, login_base)
 
         # What roles should this user have?
-        api.plshell.AddRoleToPerson(api.plauth, 'user', pointer)
+        api.driver.AddRoleToPerson('user', pointer)
         # Add the user's key
         if pub_key:
-            api.plshell.AddPersonKey(api.plauth, pointer, {'key_type' : 'ssh', 'key' : pub_key})
+            api.driver.AddPersonKey(pointer, {'key_type' : 'ssh', 'key' : pub_key})
 
     elif (type == "node"):
-        pl_record = api.sfa_fields_to_pl_fields(type, hrn, record)
+        pl_record = api.driver.sfa_fields_to_pl_fields(type, hrn, record)
         login_base = hrn_to_pl_login_base(record['authority'])
-        nodes = api.plshell.GetNodes(api.plauth, [pl_record['hostname']])
+        nodes = api.driver.GetNodes([pl_record['hostname']])
         if not nodes:
-            pointer = api.plshell.AddNode(api.plauth, login_base, pl_record)
+            pointer = api.driver.AddNode(login_base, pl_record)
         else:
             pointer = nodes[0]['node_id']
 
@@ -290,7 +294,7 @@ def register(api, record):
     record['record_id'] = record_id
 
     # update membership for researchers, pis, owners, operators
-    api.update_membership(None, record)
+    api.driver.update_membership(None, record)
 
     return record.get_gid_object().save_to_string(save_parents=True)
 
@@ -309,7 +313,7 @@ def update(api, record_dict):
 
     # Update_membership needs the membership lists in the existing record
     # filled in, so it can see if members were added or removed
-    api.fill_record_info(record)
+    api.driver.fill_record_info(record, api.aggregates)
 
     # Use the pointer from the existing record, not the one that the user
     # gave us. This prevents the user from inserting a forged pointer
@@ -317,13 +321,13 @@ def update(api, record_dict):
     # update the PLC information that was specified with the record
 
     if (type == "authority"):
-        api.plshell.UpdateSite(api.plauth, pointer, new_record)
+        api.driver.UpdateSite(pointer, new_record)
 
     elif type == "slice":
-        pl_record=api.sfa_fields_to_pl_fields(type, hrn, new_record)
+        pl_record=api.driver.sfa_fields_to_pl_fields(type, hrn, new_record)
         if 'name' in pl_record:
             pl_record.pop('name')
-            api.plshell.UpdateSlice(api.plauth, pointer, pl_record)
+            api.driver.UpdateSlice(pointer, pl_record)
 
     elif type == "user":
         # SMBAKER: UpdatePerson only allows a limited set of fields to be
@@ -336,14 +340,14 @@ def update(api, record_dict):
                        'password', 'phone', 'url', 'bio', 'accepted_aup',
                        'enabled']:
                 update_fields[key] = all_fields[key]
-        api.plshell.UpdatePerson(api.plauth, pointer, update_fields)
+        api.driver.UpdatePerson(pointer, update_fields)
 
         if 'key' in new_record and new_record['key']:
             # must check this key against the previous one if it exists
-            persons = api.plshell.GetPersons(api.plauth, [pointer], ['key_ids'])
+            persons = api.driver.GetPersons([pointer], ['key_ids'])
             person = persons[0]
             keys = person['key_ids']
-            keys = api.plshell.GetKeys(api.plauth, person['key_ids'])
+            keys = api.driver.GetKeys(person['key_ids'])
             key_exists = False
             if isinstance(new_record['key'], types.ListType):
                 new_key = new_record['key'][0]
@@ -353,11 +357,11 @@ def update(api, record_dict):
             # Delete all stale keys
             for key in keys:
                 if new_record['key'] != key['key']:
-                    api.plshell.DeleteKey(api.plauth, key['key_id'])
+                    api.driver.DeleteKey(key['key_id'])
                 else:
                     key_exists = True
             if not key_exists:
-                api.plshell.AddPersonKey(api.plauth, pointer, {'key_type': 'ssh', 'key': new_key})
+                api.driver.AddPersonKey(pointer, {'key_type': 'ssh', 'key': new_key})
 
             # update the openssl key and gid
             pkey = convert_public_key(new_key)
@@ -369,13 +373,13 @@ def update(api, record_dict):
             table.update(record)
 
     elif type == "node":
-        api.plshell.UpdateNode(api.plauth, pointer, new_record)
+        api.driver.UpdateNode(pointer, new_record)
 
     else:
         raise UnknownSfaType(type)
 
     # update membership for researchers, pis, owners, operators
-    api.update_membership(record, new_record)
+    api.driver.update_membership(record, new_record)
     
     return 1 
 
@@ -407,20 +411,20 @@ def remove(api, xrn, origin_hrn=None):
                 except:
                     pass
     if type == "user":
-        persons = api.plshell.GetPersons(api.plauth, record['pointer'])
+        persons = api.driver.GetPersons(record['pointer'])
         # only delete this person if he has site ids. if he doesnt, it probably means
         # he was just removed from a site, not actually deleted
         if persons and persons[0]['site_ids']:
-            api.plshell.DeletePerson(api.plauth, record['pointer'])
+            api.driver.DeletePerson(record['pointer'])
     elif type == "slice":
-        if api.plshell.GetSlices(api.plauth, record['pointer']):
-            api.plshell.DeleteSlice(api.plauth, record['pointer'])
+        if api.driver.GetSlices(record['pointer']):
+            api.driver.DeleteSlice(record['pointer'])
     elif type == "node":
-        if api.plshell.GetNodes(api.plauth, record['pointer']):
-            api.plshell.DeleteNode(api.plauth, record['pointer'])
+        if api.driver.GetNodes(record['pointer']):
+            api.driver.DeleteNode(record['pointer'])
     elif type == "authority":
-        if api.plshell.GetSites(api.plauth, record['pointer']):
-            api.plshell.DeleteSite(api.plauth, record['pointer'])
+        if api.driver.GetSites(record['pointer']):
+            api.driver.DeleteSite(record['pointer'])
     else:
         raise UnknownSfaType(type)
 
index f3f18f7..903f49c 100644 (file)
@@ -2,26 +2,27 @@ import types
 import time 
 import sys
 
-from sfa.util.faults import *
+from sfa.util.faults import RecordNotFound, AccountNotEnabled, PermissionError, MissingAuthority, \
+    UnknownSfaType, ExistingRecord
 from sfa.util.prefixTree import prefixTree
 from sfa.util.record import SfaRecord
 from sfa.senslab.table_slab import SfaTable
-from sfa.util.record import SfaRecord
-from sfa.trust.gid import GID 
 from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn, urn_to_hrn
-from sfa.util.plxrn import hrn_to_pl_login_base
+from sfa.util.version import version_core
+
+from sfa.trust.gid import GID 
 from sfa.trust.credential import Credential
-from sfa.trust.certificate import Certificate, Keypair
+from sfa.trust.certificate import Certificate, Keypair, convert_public_key
 from sfa.trust.gid import create_uuid
-from sfa.util.version import version_core
-from sfa.senslab.api import *
 
 #myapi=SfaAPI()
 # The GENI GetVersion call
 def GetVersion(api):
     
-    peers =dict ([ (peername,v._ServerProxy__host) for (peername,v) in api.registries.iteritems() 
-                   if peername != api.hrn])
+    # Bugfix TP 09/11/2011
+    #peers =dict ([ (peername,v._ServerProxy__host) for (peername,v) in api.registries.iteritems()
+    peers =dict ([ (peername,v.get_url()) for (peername,v) in api.registries.iteritems()
+        if peername != api.hrn])
     xrn=Xrn(api.hrn)
     return version_core({'interface':'registry',
                          'hrn':xrn.get_hrn(),
@@ -34,7 +35,7 @@ def get_credential(api, xrn, type, is_self=False):
         hrn = urn_to_hrn(xrn)[0]
     else:
         hrn, type = urn_to_hrn(xrn)
-
+        
     # Is this a root or sub authority
     auth_hrn = api.auth.get_authority(hrn)
     print>> sys.stderr , " \r\n        REGISTRY get_credential auth_hrn:" , auth_hrn,"hrn : ", hrn, " Type : ", type, "is self : " , is_self,"<<"
@@ -52,7 +53,7 @@ def get_credential(api, xrn, type, is_self=False):
 
     # verify_cancreate_credential requires that the member lists
     # (researchers, pis, etc) be filled in
-    api.fill_record_info(record)
+    api.driver.fill_record_info(record, api.aggregates)
     record['enabled'] = True
     print>> sys.stderr , " \r\n    ++    REGISTRY get_credential hrn %s record['enabled'] %s is_self %s" %(hrn, record['enabled'], is_self)    
     if record['type']=='user':
@@ -105,7 +106,7 @@ def get_credential(api, xrn, type, is_self=False):
 def resolve(api, xrns, type=None, full=True):
 
     # load all known registry names into a prefix tree and attempt to find
-    # the longest matching prefix 
+    # the longest matching prefix
     print >>sys.stderr , '\t\t REGISTRY MANAGER : resolve=========xrns ', xrns
     if not isinstance(xrns, types.ListType):
         if not type:
@@ -130,10 +131,9 @@ def resolve(api, xrns, type=None, full=True):
         xrn_dict[registry_hrn].append(xrn)
        print >>sys.stderr, '\t\tREGISTRY MANAGER  *****xrn_dict[registry_hrn] ',xrn_dict[registry_hrn]
     records = [] 
-
     for registry_hrn in xrn_dict:
         # skip the hrn without a registry hrn
-        # XX should we let the user know the authority is unknown?   
+        # XX should we let the user know the authority is unknown?       
        print >>sys.stderr, '\t\t registry_hrn in xrn_dict ', registry_hrn    
         if not registry_hrn:
             continue
@@ -143,14 +143,15 @@ def resolve(api, xrns, type=None, full=True):
         xrns = xrn_dict[registry_hrn]
         if registry_hrn != api.hrn:
             credential = api.getCredential()
-            peer_records = registries[registry_hrn].Resolve(xrns, credential)
+            interface = api.registries[registry_hrn]
+            server = api.server_proxy(interface, credential)
+            peer_records = server.Resolve(xrns, credential)
             print >>sys.stderr , '\t\t peer_records ', peer_records
             records.extend([SfaRecord(dict=record).as_dict() for record in peer_records])
 
     print >>sys.stderr,'\t\t hrns ' , hrns
     # try resolving the remaining unfound records at the local registry
     remaining_hrns = set(hrns).difference([record['hrn'] for record in records])
-
     # convert set to list
     remaining_hrns = [hrn for hrn in remaining_hrns] 
     print >>sys.stderr, '\t\t remaining_hrns', remaining_hrns
@@ -163,9 +164,9 @@ def resolve(api, xrns, type=None, full=True):
                    
     if full:
        print >>sys.stderr, '\r\n \r\n REGISTRY:_FULL', api     
-        api.fill_record_info(local_records)
-       
   
+        api.driver.fill_record_info(local_records)
+    
     # convert local record objects to dicts
     records.extend([dict(record) for record in local_records])
     print >>sys.stderr, "\r\n \t\t records extends %s" %(records)      
@@ -191,13 +192,15 @@ def list(api, xrn, origin_hrn=None):
     #if there was no match then this record belongs to an unknow registry
     if not registry_hrn:
         raise MissingAuthority(xrn)
-    
     # if the best match (longest matching hrn) is not the local registry,
     # forward the request
     records = []    
     if registry_hrn != api.hrn:
         credential = api.getCredential()
-        record_list = registries[registry_hrn].List(xrn, credential)
+       print>>sys.stderr, "Registries : ", registries
+        interface = api.registries[registry_hrn]
+        server = api.server_proxy(interface, credential)
+        record_list = server.List(xrn, credential)
         records = [SfaRecord(dict=record).as_dict() for record in record_list]
     
     # if we still have not found the record yet, try the local registry
@@ -213,20 +216,18 @@ def list(api, xrn, origin_hrn=None):
 
 def register(api, record):
 
-    #hrn = str(record['hrn']).strip("['']")
-    #type_of_rec = str( record['type']).strip("['']")
-    hrn = record['hrn']
-    type_of_rec =record['type']
  
-    urn = hrn_to_urn(hrn,type_of_rec)
+    #hrn, type = record['hrn'], record['type']
+    hrn = str(record['hrn']).strip("['']")
+    type = str( record['type']).strip("['']")
+    urn = hrn_to_urn(hrn,type)
     # validate the type
-    if type_of_rec not in ['authority', 'slice', 'node', 'user']:
-        raise UnknownSfaType(type_of_rec) 
+    if type not in ['authority', 'slice', 'node', 'user']:
+        raise UnknownSfaType(type) 
     
     # check if record already exists
     table = SfaTable()
-    existing_records = table.find({'type': type_of_rec, 'hrn': hrn})
+    existing_records = table.find({'type': type, 'hrn': hrn})
     if existing_records:
         raise ExistingRecord(hrn)
        
@@ -236,11 +237,11 @@ def register(api, record):
     #record['authority'] = get_authority(record['hrn'])
     record['authority'] = get_authority(hrn)
     
-    type_of_rec = record['type']
-    hrn = record['hrn']
+    #type_of_rec = record['type']
+    #hrn = record['hrn']
     
-    api.auth.verify_object_permission(hrn)
-    #myapi.auth.verify_object_permission( record['hrn'])
+    #api.auth.verify_object_permission(hrn)
+    api.auth.verify_object_permission( record['hrn'])
     auth_info = api.auth.get_auth_info(record['authority'])
   
     
@@ -262,10 +263,9 @@ def register(api, record):
         record['gid'] = gid
         record.set_gid(gid)
        print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAB.PY   record['gid']  %s" %(record['gid'])   
-       print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAB.PY  register type_of_rec %s"%(type_of_rec)
-       
-    if type_of_rec in ["authority"]:   
+       print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAB.PY  register type %s"%(type)
 
+    if type in ["authority"]:
         # update the tree
         if not api.auth.hierarchy.auth_exists(hrn):
             api.auth.hierarchy.create_auth(hrn_to_urn(hrn,'authority'))
@@ -274,63 +274,68 @@ def register(api, record):
         gid = auth_info.get_gid_object()
         record.set_gid(gid.save_to_string(save_parents=True))
        
-        #pl_record = api.sfa_fields_to_pl_fields(type, hrn, record)
-       print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAB.PY  register : type_of_rec in [authority ] sfa_fields_to_pl_fields FIELDS A CHANGER"    
+        #pl_record = api.driver.sfa_fields_to_pl_fields(type, hrn, record)
+       print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAB.PY  register : type in [authority ] sfa_fields_to_pl_fields FIELDS A CHANGER"    
        
-        sites = api.oar.GetSites( [pl_record['login_base']])
+        # thierry: ideally we'd like to be able to write api.driver.GetSites
+        # in which case the code would become mostly the same as for pl
+        sites = api.driver.GetSites([pl_record['login_base']])
         if not sites:
-            pointer = api.oar.AddSite( pl_record)
+            # thierry
+            # Error (E0601, register): Using variable 'pl_record' before assignment
+            pointer = api.driver.AddSite( pl_record)
         else:
             pointer = sites[0]['site_id']
 
         record.set_pointer(pointer)
         record['pointer'] = pointer
 
-    elif (type_of_rec == "slice"):
+    elif (type == "slice"):
         acceptable_fields=['url', 'instantiation', 'name', 'description']
-        pl_record = api.sfa_fields_to_pl_fields(type_of_rec, hrn, record)
+        pl_record = api.driver.sfa_fields_to_pl_fields(type, hrn, record)
        print>>sys.stderr, " \r\n \r\n ----------- REGISTRY_MANAGER_SLAB.PY  register  slice pl_record %s"%(pl_record)
         for key in pl_record.keys():
             if key not in acceptable_fields:
                 pl_record.pop(key)
-        slices = api.users.GetSlices( [pl_record['name']])
+        slices = api.driver.GetSlices([pl_record['name']])
         if not slices:
-             pointer = api.users.AddSlice(pl_record)
+             pointer = api.driver.AddSlice(pl_record)
         else:
              pointer = slices[0]['slice_id']
         record.set_pointer(pointer)
         record['pointer'] = pointer
 
-    elif  (type_of_rec == "user"):
-        persons = api.users.GetPersons( [record['email']]) 
+    elif  (type == "user"):
+        persons = api.driver.GetPersons([record['email']])
        if not persons:
            print>>sys.stderr, "  \r\n \r\n ----------- registry_manager_slab  register NO PERSON ADD TO LDAP?"
       
         #if not persons:
-            #pointer = api.users.AddPerson( dict(record))
+            #pointer = api.driver.AddPerson( dict(record))
         #else:
             #pointer = persons[0]['person_id']
 
         if 'enabled' in record and record['enabled']:
-            api.users.UpdatePerson( pointer, {'enabled': record['enabled']})
+            api.driver.UpdatePerson(pointer, {'enabled': record['enabled']})
         # add this persons to the site only if he is being added for the first
         # time by sfa and doesont already exist in plc
         if not persons or not persons[0]['site_ids']:
             login_base = get_leaf(record['authority'])
-            api.users.AddPersonToSite( pointer, login_base)
+
+            api.driver.AddPersonToSite(pointer, login_base)
 
         # What roles should this user have?
-        api.users.AddRoleToPerson( 'user', pointer)
+        api.driver.AddRoleToPerson('user', pointer)
         # Add the user's key
         if pub_key:
-            api.users.AddPersonKey( pointer, {'key_type' : 'ssh', 'key' : pub_key})
+            api.driver.AddPersonKey(pointer, {'key_type' : 'ssh', 'key' : pub_key})
 
     #elif (type == "node"):
-        #pl_record = api.sfa_fields_to_pl_fields(type, hrn, record)
+        #pl_record = api.driver.sfa_fields_to_pl_fields(type, hrn, record)
         #login_base = hrn_to_pl_login_base(record['authority'])
-        #nodes = api.oar.GetNodes( [pl_record['hostname']])
+        #nodes = api.driver.GetNodes([pl_record['hostname']])
         #if not nodes:
-            #pointer = api.oar.AddNode(login_base, pl_record)
+            #pointer = api.driver.AddNode(login_base, pl_record)
         #else:
             #pointer = nodes[0]['node_id']
 
@@ -340,7 +345,7 @@ def register(api, record):
     #record['record_id'] = record_id
 
     # update membership for researchers, pis, owners, operators
-    api.update_membership(None, record)
+    api.driver.update_membership(None, record)
 
     return record.get_gid_object().save_to_string(save_parents=True)
 
@@ -360,7 +365,7 @@ def update(api, record_dict):
 
     # Update_membership needs the membership lists in the existing record
     # filled in, so it can see if members were added or removed
-    api.fill_record_info(record)
+    api.driver.fill_record_info(record)
 
     # Use the pointer from the existing record, not the one that the user
     # gave us. This prevents the user from inserting a forged pointer
@@ -368,13 +373,13 @@ def update(api, record_dict):
     # update the PLC information that was specified with the record
 
     if (type == "authority"):
-        api.oar.UpdateSite( pointer, new_record)
+        api.driver.UpdateSite(pointer, new_record)
 
     elif type == "slice":
-        pl_record=api.sfa_fields_to_pl_fields(type, hrn, new_record)
+        pl_record=api.driver.sfa_fields_to_pl_fields(type, hrn, new_record)
         if 'name' in pl_record:
             pl_record.pop('name')
-            api.users.UpdateSlice( pointer, pl_record)
+            api.driver.UpdateSlice(pointer, pl_record)
 
     elif type == "user":
         # SMBAKER: UpdatePerson only allows a limited set of fields to be
@@ -387,14 +392,14 @@ def update(api, record_dict):
                        'password', 'phone', 'url', 'bio', 'accepted_aup',
                        'enabled']:
                 update_fields[key] = all_fields[key]
-        api.users.UpdatePerson( pointer, update_fields)
+        api.driver.UpdatePerson(pointer, update_fields)
 
         if 'key' in new_record and new_record['key']:
             # must check this key against the previous one if it exists
-            persons = api.users.GetPersons( [pointer], ['key_ids'])
+            persons = api.driver.GetPersons([pointer], ['key_ids'])
             person = persons[0]
             keys = person['key_ids']
-            keys = api.users.GetKeys( person['key_ids'])
+            keys = api.driver.GetKeys(person['key_ids'])
             key_exists = False
             if isinstance(new_record['key'], types.ListType):
                 new_key = new_record['key'][0]
@@ -404,11 +409,11 @@ def update(api, record_dict):
             # Delete all stale keys
             for key in keys:
                 if new_record['key'] != key['key']:
-                    api.users.DeleteKey( key['key_id'])
+                    api.driver.DeleteKey(key['key_id'])
                 else:
                     key_exists = True
             if not key_exists:
-                api.users.AddPersonKey( pointer, {'key_type': 'ssh', 'key': new_key})
+                api.driver.AddPersonKey(pointer, {'key_type': 'ssh', 'key': new_key})
 
             # update the openssl key and gid
             pkey = convert_public_key(new_key)
@@ -420,13 +425,13 @@ def update(api, record_dict):
             table.update(record)
 
     elif type == "node":
-        api.oar.UpdateNode( pointer, new_record)
+        api.driver.UpdateNode(pointer, new_record)
 
     else:
         raise UnknownSfaType(type)
 
     # update membership for researchers, pis, owners, operators
-    api.update_membership(record, new_record)
+    api.driver.update_membership(record, new_record)
     
     return 1 
 
@@ -458,20 +463,20 @@ def remove(api, xrn, origin_hrn=None):
                 except:
                     pass
     if type == "user":
-        persons = api.users.GetPersons(record['pointer'])
+        persons = api.driver.GetPersons(record['pointer'])
         # only delete this person if he has site ids. if he doesnt, it probably means
         # he was just removed from a site, not actually deleted
         if persons and persons[0]['site_ids']:
-            api.users.DeletePerson(record['pointer'])
+            api.driver.DeletePerson(record['pointer'])
     elif type == "slice":
-        if api.users.GetSlices( record['pointer']):
-            api.users.DeleteSlice( record['pointer'])
+        if api.driver.GetSlices(record['pointer']):
+            api.driver.DeleteSlice(record['pointer'])
     elif type == "node":
-        if api.oar.GetNodes( record['pointer']):
-            api.oar.DeleteNode( record['pointer'])
+        if api.driver.GetNodes(record['pointer']):
+            api.driver.DeleteNode(record['pointer'])
     elif type == "authority":
-        if api.oar.GetSites( record['pointer']):
-            api.oar.DeleteSite( record['pointer'])
+        if api.driver.GetSites(record['pointer']):
+            api.driver.DeleteSite(record['pointer'])
     else:
         raise UnknownSfaType(type)
 
diff --git a/sfa/managers/slice_manager.py b/sfa/managers/slice_manager.py
new file mode 100644 (file)
index 0000000..3d6c0a6
--- /dev/null
@@ -0,0 +1,550 @@
+import sys
+import time
+import traceback
+from StringIO import StringIO
+from copy import copy
+from lxml import etree
+
+from sfa.trust.sfaticket import SfaTicket
+from sfa.trust.credential import Credential
+
+from sfa.util.sfalogging import logger
+from sfa.util.xrn import Xrn, urn_to_hrn
+from sfa.util.version import version_core
+from sfa.util.callids import Callids
+
+from sfa.server.threadmanager import ThreadManager
+
+from sfa.rspecs.rspec_converter import RSpecConverter
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec 
+from sfa.client.client_helper import sfa_to_pg_users_arg
+
+class SliceManager:
+    def __init__ (self):
+    #    self.caching=False
+        self.caching=True
+        
+    
+    def _call_id_supported(self, api, server):
+        """
+        Returns true if server support the optional call_id arg, false otherwise.
+        """
+        server_version = api.get_cached_server_version(server)
+    
+        if 'sfa' in server_version:
+            code_tag = server_version['code_tag']
+            code_tag_parts = code_tag.split("-")
+    
+            version_parts = code_tag_parts[0].split(".")
+            major, minor = version_parts[0:2]
+            rev = code_tag_parts[1]
+            if int(major) > 1:
+                if int(minor) > 0 or int(rev) > 20:
+                    return True
+        return False
+    
+    # we have specialized xmlrpclib.ServerProxy to remember the input url
+    # OTOH it's not clear if we're only dealing with XMLRPCServerProxy instances
+    def get_serverproxy_url (self, server):
+        try:
+            return server.get_url()
+        except:
+            logger.warning("GetVersion, falling back to xmlrpclib.ServerProxy internals")
+            return server._ServerProxy__host + server._ServerProxy__handler
+    
+    def GetVersion(self, api):
+        # peers explicitly in aggregates.xml
+        peers =dict ([ (peername,self.get_serverproxy_url(v)) for (peername,v) in api.aggregates.iteritems()
+                       if peername != api.hrn])
+        version_manager = VersionManager()
+        ad_rspec_versions = []
+        request_rspec_versions = []
+        for rspec_version in version_manager.versions:
+            if rspec_version.content_type in ['*', 'ad']:
+                ad_rspec_versions.append(rspec_version.to_dict())
+            if rspec_version.content_type in ['*', 'request']:
+                request_rspec_versions.append(rspec_version.to_dict())
+        default_rspec_version = version_manager.get_version("sfa 1").to_dict()
+        xrn=Xrn(api.hrn, 'authority+sa')
+        version_more = {'interface':'slicemgr',
+                        'hrn' : xrn.get_hrn(),
+                        'urn' : xrn.get_urn(),
+                        'peers': peers,
+                        'request_rspec_versions': request_rspec_versions,
+                        'ad_rspec_versions': ad_rspec_versions,
+                        'default_ad_rspec': default_rspec_version
+                    }
+        sm_version=version_core(version_more)
+        # local aggregate if present needs to have localhost resolved
+        if api.hrn in api.aggregates:
+            local_am_url=self.get_serverproxy_url(api.aggregates[api.hrn])
+            sm_version['peers'][api.hrn]=local_am_url.replace('localhost',sm_version['hostname'])
+        return sm_version
+    
+    def drop_slicemgr_stats(self, rspec):
+        try:
+            stats_elements = rspec.xml.xpath('//statistics')
+            for node in stats_elements:
+                node.getparent().remove(node)
+        except Exception, e:
+            logger.warn("drop_slicemgr_stats failed: %s " % (str(e)))
+    
+    def add_slicemgr_stat(self, rspec, callname, aggname, elapsed, status, exc_info=None):
+        try:
+            stats_tags = rspec.xml.xpath('//statistics[@call="%s"]' % callname)
+            if stats_tags:
+                stats_tag = stats_tags[0]
+            else:
+                stats_tag = etree.SubElement(rspec.xml.root, "statistics", call=callname)
+
+            stat_tag = etree.SubElement(stats_tag, "aggregate", name=str(aggname), elapsed=str(elapsed), status=str(status))
+
+            if exc_info:
+                exc_tag = etree.SubElement(stat_tag, "exc_info", name=str(exc_info[1]))
+
+                # formats the traceback as one big text blob
+                #exc_tag.text = "\n".join(traceback.format_exception(exc_info[0], exc_info[1], exc_info[2]))
+
+                # formats the traceback as a set of xml elements
+                tb = traceback.extract_tb(exc_info[2])
+                for item in tb:
+                    exc_frame = etree.SubElement(exc_tag, "tb_frame", filename=str(item[0]), line=str(item[1]), func=str(item[2]), code=str(item[3]))
+
+        except Exception, e:
+            logger.warn("add_slicemgr_stat failed on  %s: %s" %(aggname, str(e)))
+    
+    def ListResources(self, api, creds, options, call_id):
+        version_manager = VersionManager()
+        def _ListResources(aggregate, server, credential, opts, call_id):
+    
+            my_opts = copy(opts)
+            args = [credential, my_opts]
+            tStart = time.time()
+            try:
+                if self._call_id_supported(api, server):
+                    args.append(call_id)
+                version = api.get_cached_server_version(server)
+                # force ProtoGENI aggregates to give us a v2 RSpec
+                if 'sfa' not in version.keys():
+                    my_opts['rspec_version'] = version_manager.get_version('ProtoGENI 2').to_dict()
+                rspec = server.ListResources(*args)
+                return {"aggregate": aggregate, "rspec": rspec, "elapsed": time.time()-tStart, "status": "success"}
+            except Exception, e:
+                api.logger.log_exc("ListResources failed at %s" %(server.url))
+                return {"aggregate": aggregate, "elapsed": time.time()-tStart, "status": "exception", "exc_info": sys.exc_info()}
+    
+        if Callids().already_handled(call_id): return ""
+    
+        # get slice's hrn from options
+        xrn = options.get('geni_slice_urn', '')
+        (hrn, type) = urn_to_hrn(xrn)
+        if 'geni_compressed' in options:
+            del(options['geni_compressed'])
+    
+        # get the rspec's return format from options
+        rspec_version = version_manager.get_version(options.get('rspec_version'))
+        version_string = "rspec_%s" % (rspec_version.to_string())
+    
+        # look in cache first
+        if self.caching and api.cache and not xrn:
+            rspec =  api.cache.get(version_string)
+            if rspec:
+                return rspec
+    
+        # get the callers hrn
+        valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0]
+        caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+    
+        # attempt to use delegated credential first
+        cred = api.getDelegatedCredential(creds)
+        if not cred:
+            cred = api.getCredential()
+        threads = ThreadManager()
+        for aggregate in api.aggregates:
+            # prevent infinite loop. Dont send request back to caller
+            # unless the caller is the aggregate's SM
+            if caller_hrn == aggregate and aggregate != api.hrn:
+                continue
+    
+            # get the rspec from the aggregate
+            interface = api.aggregates[aggregate]
+            server = api.server_proxy(interface, cred)
+            threads.run(_ListResources, aggregate, server, [cred], options, call_id)
+    
+    
+        results = threads.get_results()
+        rspec_version = version_manager.get_version(options.get('rspec_version'))
+        if xrn:    
+            result_version = version_manager._get_version(rspec_version.type, rspec_version.version, 'manifest')
+        else: 
+            result_version = version_manager._get_version(rspec_version.type, rspec_version.version, 'ad')
+        rspec = RSpec(version=result_version)
+        for result in results:
+            self.add_slicemgr_stat(rspec, "ListResources", result["aggregate"], result["elapsed"], result["status"], result.get("exc_info",None))
+            if result["status"]=="success":
+                try:
+                    rspec.version.merge(result["rspec"])
+                except:
+                    api.logger.log_exc("SM.ListResources: Failed to merge aggregate rspec")
+    
+        # cache the result
+        if self.caching and api.cache and not xrn:
+            api.cache.add(version_string, rspec.toxml())
+    
+        return rspec.toxml()
+    
+    
+    def CreateSliver(self, api, xrn, creds, rspec_str, users, call_id):
+    
+        version_manager = VersionManager()
+        def _CreateSliver(aggregate, server, xrn, credential, rspec, users, call_id):
+            tStart = time.time()
+            try:
+                # Need to call GetVersion at an aggregate to determine the supported
+                # rspec type/format beofre calling CreateSliver at an Aggregate.
+                server_version = api.get_cached_server_version(server)
+                requested_users = users
+                if 'sfa' not in server_version and 'geni_api' in server_version:
+                    # sfa aggregtes support both sfa and pg rspecs, no need to convert
+                    # if aggregate supports sfa rspecs. otherwise convert to pg rspec
+                    rspec = RSpec(RSpecConverter.to_pg_rspec(rspec, 'request'))
+                    filter = {'component_manager_id': server_version['urn']}
+                    rspec.filter(filter)
+                    rspec = rspec.toxml()
+                    requested_users = sfa_to_pg_users_arg(users)
+                args = [xrn, credential, rspec, requested_users]
+                if self._call_id_supported(api, server):
+                    args.append(call_id)
+                rspec = server.CreateSliver(*args)
+                return {"aggregate": aggregate, "rspec": rspec, "elapsed": time.time()-tStart, "status": "success"}
+            except:
+                logger.log_exc('Something wrong in _CreateSliver with URL %s'%server.url)
+                return {"aggregate": aggregate, "elapsed": time.time()-tStart, "status": "exception", "exc_info": sys.exc_info()}
+    
+        if Callids().already_handled(call_id): return ""
+        # Validate the RSpec against PlanetLab's schema --disabled for now
+        # The schema used here needs to aggregate the PL and VINI schemas
+        # schema = "/var/www/html/schemas/pl.rng"
+        rspec = RSpec(rspec_str)
+    #    schema = None
+    #    if schema:
+    #        rspec.validate(schema)
+    
+        # if there is a <statistics> section, the aggregates don't care about it,
+        # so delete it.
+        self.drop_slicemgr_stats(rspec)
+    
+        # attempt to use delegated credential first
+        cred = api.getDelegatedCredential(creds)
+        if not cred:
+            cred = api.getCredential()
+    
+        # get the callers hrn
+        hrn, type = urn_to_hrn(xrn)
+        valid_cred = api.auth.checkCredentials(creds, 'createsliver', hrn)[0]
+        caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+        threads = ThreadManager()
+        for aggregate in api.aggregates:
+            # prevent infinite loop. Dont send request back to caller
+            # unless the caller is the aggregate's SM 
+            if caller_hrn == aggregate and aggregate != api.hrn:
+                continue
+            interface = api.aggregates[aggregate]
+            server = api.server_proxy(interface, cred)
+            # Just send entire RSpec to each aggregate
+            threads.run(_CreateSliver, aggregate, server, xrn, [cred], rspec.toxml(), users, call_id)
+                
+        results = threads.get_results()
+        manifest_version = version_manager._get_version(rspec.version.type, rspec.version.version, 'manifest')
+        result_rspec = RSpec(version=manifest_version)
+        for result in results:
+            self.add_slicemgr_stat(result_rspec, "CreateSliver", result["aggregate"], result["elapsed"], result["status"], result.get("exc_info",None))
+            if result["status"]=="success":
+                try:
+                    result_rspec.version.merge(result["rspec"])
+                except:
+                    api.logger.log_exc("SM.CreateSliver: Failed to merge aggregate rspec")
+        return result_rspec.toxml()
+    
+    def RenewSliver(self, api, xrn, creds, expiration_time, call_id):
+        def _RenewSliver(server, xrn, creds, expiration_time, call_id):
+            server_version = api.get_cached_server_version(server)
+            args =  [xrn, creds, expiration_time, call_id]
+            if self._call_id_supported(api, server):
+                args.append(call_id)
+            return server.RenewSliver(*args)
+    
+        if Callids().already_handled(call_id): return True
+    
+        (hrn, type) = urn_to_hrn(xrn)
+        # get the callers hrn
+        valid_cred = api.auth.checkCredentials(creds, 'renewsliver', hrn)[0]
+        caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+    
+        # attempt to use delegated credential first
+        cred = api.getDelegatedCredential(creds)
+        if not cred:
+            cred = api.getCredential()
+        threads = ThreadManager()
+        for aggregate in api.aggregates:
+            # prevent infinite loop. Dont send request back to caller
+            # unless the caller is the aggregate's SM
+            if caller_hrn == aggregate and aggregate != api.hrn:
+                continue
+            interface = api.aggregates[aggregate]
+            server = api.server_proxy(interface, cred)
+            threads.run(_RenewSliver, server, xrn, [cred], expiration_time, call_id)
+        # 'and' the results
+        return reduce (lambda x,y: x and y, threads.get_results() , True)
+    
+    def DeleteSliver(self, api, xrn, creds, call_id):
+        def _DeleteSliver(server, xrn, creds, call_id):
+            server_version = api.get_cached_server_version(server)
+            args =  [xrn, creds]
+            if self._call_id_supported(api, server):
+                args.append(call_id)
+            return server.DeleteSliver(*args)
+    
+        if Callids().already_handled(call_id): return ""
+        (hrn, type) = urn_to_hrn(xrn)
+        # get the callers hrn
+        valid_cred = api.auth.checkCredentials(creds, 'deletesliver', hrn)[0]
+        caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+    
+        # attempt to use delegated credential first
+        cred = api.getDelegatedCredential(creds)
+        if not cred:
+            cred = api.getCredential()
+        threads = ThreadManager()
+        for aggregate in api.aggregates:
+            # prevent infinite loop. Dont send request back to caller
+            # unless the caller is the aggregate's SM
+            if caller_hrn == aggregate and aggregate != api.hrn:
+                continue
+            interface = api.aggregates[aggregate]
+            server = api.server_proxy(interface, cred)
+            threads.run(_DeleteSliver, server, xrn, [cred], call_id)
+        threads.get_results()
+        return 1
+    
+    
+    # first draft at a merging SliverStatus
+    def SliverStatus(self, api, slice_xrn, creds, call_id):
+        def _SliverStatus(server, xrn, creds, call_id):
+            server_version = api.get_cached_server_version(server)
+            args =  [xrn, creds]
+            if self._call_id_supported(api, server):
+                args.append(call_id)
+            return server.SliverStatus(*args)
+        
+        if Callids().already_handled(call_id): return {}
+        # attempt to use delegated credential first
+        cred = api.getDelegatedCredential(creds)
+        if not cred:
+            cred = api.getCredential()
+        threads = ThreadManager()
+        for aggregate in api.aggregates:
+            interface = api.aggregates[aggregate]
+            server = api.server_proxy(interface, cred)
+            threads.run (_SliverStatus, server, slice_xrn, [cred], call_id)
+        results = threads.get_results()
+    
+        # get rid of any void result - e.g. when call_id was hit where by convention we return {}
+        results = [ result for result in results if result and result['geni_resources']]
+    
+        # do not try to combine if there's no result
+        if not results : return {}
+    
+        # otherwise let's merge stuff
+        overall = {}
+    
+        # mmh, it is expected that all results carry the same urn
+        overall['geni_urn'] = results[0]['geni_urn']
+        overall['pl_login'] = results[0]['pl_login']
+        # append all geni_resources
+        overall['geni_resources'] = \
+            reduce (lambda x,y: x+y, [ result['geni_resources'] for result in results] , [])
+        overall['status'] = 'unknown'
+        if overall['geni_resources']:
+            overall['status'] = 'ready'
+    
+        return overall
+    
+    def ListSlices(self, api, creds, call_id):
+        def _ListSlices(server, creds, call_id):
+            server_version = api.get_cached_server_version(server)
+            args =  [creds]
+            if self._call_id_supported(api, server):
+                args.append(call_id)
+            return server.ListSlices(*args)
+    
+        if Callids().already_handled(call_id): return []
+    
+        # look in cache first
+        if self.caching and api.cache:
+            slices = api.cache.get('slices')
+            if slices:
+                return slices
+    
+        # get the callers hrn
+        valid_cred = api.auth.checkCredentials(creds, 'listslices', None)[0]
+        caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+    
+        # attempt to use delegated credential first
+        cred= api.getDelegatedCredential(creds)
+        if not cred:
+            cred = api.getCredential()
+        threads = ThreadManager()
+        # fetch from aggregates
+        for aggregate in api.aggregates:
+            # prevent infinite loop. Dont send request back to caller
+            # unless the caller is the aggregate's SM
+            if caller_hrn == aggregate and aggregate != api.hrn:
+                continue
+            interface = api.aggregates[aggregate]
+            server = api.server_proxy(interface, cred)
+            threads.run(_ListSlices, server, [cred], call_id)
+    
+        # combime results
+        results = threads.get_results()
+        slices = []
+        for result in results:
+            slices.extend(result)
+    
+        # cache the result
+        if self.caching and api.cache:
+            api.cache.add('slices', slices)
+    
+        return slices
+    
+    
+    def get_ticket(self, api, xrn, creds, rspec, users):
+        slice_hrn, type = urn_to_hrn(xrn)
+        # get the netspecs contained within the clients rspec
+        aggregate_rspecs = {}
+        tree= etree.parse(StringIO(rspec))
+        elements = tree.findall('./network')
+        for element in elements:
+            aggregate_hrn = element.values()[0]
+            aggregate_rspecs[aggregate_hrn] = rspec 
+    
+        # get the callers hrn
+        valid_cred = api.auth.checkCredentials(creds, 'getticket', slice_hrn)[0]
+        caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+    
+        # attempt to use delegated credential first
+        cred = api.getDelegatedCredential(creds)
+        if not cred:
+            cred = api.getCredential() 
+        threads = ThreadManager()
+        for (aggregate, aggregate_rspec) in aggregate_rspecs.iteritems():
+            # prevent infinite loop. Dont send request back to caller
+            # unless the caller is the aggregate's SM
+            if caller_hrn == aggregate and aggregate != api.hrn:
+                continue
+            
+            interface = api.aggregates[aggregate]
+            server = api.server_proxy(interface, cred)
+            threads.run(server.GetTicket, xrn, [cred], aggregate_rspec, users)
+    
+        results = threads.get_results()
+        
+        # gather information from each ticket 
+        rspec = None
+        initscripts = []
+        slivers = [] 
+        object_gid = None  
+        for result in results:
+            agg_ticket = SfaTicket(string=result)
+            attrs = agg_ticket.get_attributes()
+            if not object_gid:
+                object_gid = agg_ticket.get_gid_object()
+            if not rspec:
+                rspec = RSpec(agg_ticket.get_rspec())
+            else:
+                rspec.version.merge(agg_ticket.get_rspec())
+            initscripts.extend(attrs.get('initscripts', [])) 
+            slivers.extend(attrs.get('slivers', [])) 
+        
+        # merge info
+        attributes = {'initscripts': initscripts,
+                     'slivers': slivers}
+        
+        # create a new ticket
+        ticket = SfaTicket(subject = slice_hrn)
+        ticket.set_gid_caller(api.auth.client_gid)
+        ticket.set_issuer(key=api.key, subject=api.hrn)
+        ticket.set_gid_object(object_gid)
+        ticket.set_pubkey(object_gid.get_pubkey())
+        #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
+        ticket.set_attributes(attributes)
+        ticket.set_rspec(rspec.toxml())
+        ticket.encode()
+        ticket.sign()          
+        return ticket.save_to_string(save_parents=True)
+    
+    def start_slice(self, api, xrn, creds):
+        hrn, type = urn_to_hrn(xrn)
+    
+        # get the callers hrn
+        valid_cred = api.auth.checkCredentials(creds, 'startslice', hrn)[0]
+        caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+    
+        # attempt to use delegated credential first
+        cred = api.getDelegatedCredential(creds)
+        if not cred:
+            cred = api.getCredential()
+        threads = ThreadManager()
+        for aggregate in api.aggregates:
+            # prevent infinite loop. Dont send request back to caller
+            # unless the caller is the aggregate's SM
+            if caller_hrn == aggregate and aggregate != api.hrn:
+                continue
+            interface = api.aggregates[aggregate]
+            server = api.server_proxy(interface, cred)    
+            threads.run(server.Start, xrn, cred)
+        threads.get_results()    
+        return 1
+     
+    def stop_slice(self, api, xrn, creds):
+        hrn, type = urn_to_hrn(xrn)
+    
+        # get the callers hrn
+        valid_cred = api.auth.checkCredentials(creds, 'stopslice', hrn)[0]
+        caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+    
+        # attempt to use delegated credential first
+        cred = api.getDelegatedCredential(creds)
+        if not cred:
+            cred = api.getCredential()
+        threads = ThreadManager()
+        for aggregate in api.aggregates:
+            # prevent infinite loop. Dont send request back to caller
+            # unless the caller is the aggregate's SM
+            if caller_hrn == aggregate and aggregate != api.hrn:
+                continue
+            interface = api.aggregates[aggregate]
+            server = api.server_proxy(interface, cred)
+            threads.run(server.Stop, xrn, cred)
+        threads.get_results()    
+        return 1
+    
+    def reset_slice(self, api, xrn):
+        """
+        Not implemented
+        """
+        return 1
+    
+    def shutdown(self, api, xrn, creds):
+        """
+        Not implemented   
+        """
+        return 1
+    
+    def status(self, api, xrn, creds):
+        """
+        Not implemented 
+        """
+        return 1
+
diff --git a/sfa/managers/slice_manager_pl.py b/sfa/managers/slice_manager_pl.py
deleted file mode 100644 (file)
index 8d5a695..0000000
+++ /dev/null
@@ -1,551 +0,0 @@
-#
-import sys
-import time,datetime
-from StringIO import StringIO
-from types import StringTypes
-from copy import deepcopy
-from copy import copy
-from lxml import etree
-
-from sfa.util.sfalogging import logger
-from sfa.util.rspecHelper import merge_rspecs
-from sfa.util.xrn import Xrn, urn_to_hrn, hrn_to_urn
-from sfa.util.plxrn import hrn_to_pl_slicename
-from sfa.util.rspec import *
-from sfa.util.specdict import *
-from sfa.util.faults import *
-from sfa.util.record import SfaRecord
-from sfa.rspecs.rspec_converter import RSpecConverter
-from sfa.client.client_helper import sfa_to_pg_users_arg
-from sfa.rspecs.version_manager import VersionManager
-from sfa.rspecs.rspec import RSpec 
-from sfa.util.policy import Policy
-from sfa.util.prefixTree import prefixTree
-from sfa.util.sfaticket import *
-from sfa.trust.credential import Credential
-from sfa.util.threadmanager import ThreadManager
-import sfa.util.xmlrpcprotocol as xmlrpcprotocol     
-import sfa.plc.peers as peers
-from sfa.util.version import version_core
-from sfa.util.callids import Callids
-
-
-def _call_id_supported(api, server):
-    """
-    Returns true if server support the optional call_id arg, false otherwise.
-    """
-    server_version = api.get_cached_server_version(server)
-
-    if 'sfa' in server_version:
-        code_tag = server_version['code_tag']
-        code_tag_parts = code_tag.split("-")
-
-        version_parts = code_tag_parts[0].split(".")
-        major, minor = version_parts[0:2]
-        rev = code_tag_parts[1]
-        if int(major) > 1:
-            if int(minor) > 0 or int(rev) > 20:
-                return True
-    return False
-
-# we have specialized xmlrpclib.ServerProxy to remember the input url
-# OTOH it's not clear if we're only dealing with XMLRPCServerProxy instances
-def get_serverproxy_url (server):
-    try:
-        return server.get_url()
-    except:
-        logger.warning("GetVersion, falling back to xmlrpclib.ServerProxy internals")
-        return server._ServerProxy__host + server._ServerProxy__handler
-
-def GetVersion(api):
-    # peers explicitly in aggregates.xml
-    peers =dict ([ (peername,get_serverproxy_url(v)) for (peername,v) in api.aggregates.iteritems()
-                   if peername != api.hrn])
-    version_manager = VersionManager()
-    ad_rspec_versions = []
-    request_rspec_versions = []
-    for rspec_version in version_manager.versions:
-        if rspec_version.content_type in ['*', 'ad']:
-            ad_rspec_versions.append(rspec_version.to_dict())
-        if rspec_version.content_type in ['*', 'request']:
-            request_rspec_versions.append(rspec_version.to_dict())
-    default_rspec_version = version_manager.get_version("sfa 1").to_dict()
-    xrn=Xrn(api.hrn, 'authority+sa')
-    version_more = {'interface':'slicemgr',
-                    'hrn' : xrn.get_hrn(),
-                    'urn' : xrn.get_urn(),
-                    'peers': peers,
-                    'request_rspec_versions': request_rspec_versions,
-                    'ad_rspec_versions': ad_rspec_versions,
-                    'default_ad_rspec': default_rspec_version
-                    }
-    sm_version=version_core(version_more)
-    # local aggregate if present needs to have localhost resolved
-    if api.hrn in api.aggregates:
-        local_am_url=get_serverproxy_url(api.aggregates[api.hrn])
-        sm_version['peers'][api.hrn]=local_am_url.replace('localhost',sm_version['hostname'])
-    return sm_version
-
-def drop_slicemgr_stats(rspec):
-    try:
-        stats_elements = rspec.xml.xpath('//statistics')
-        for node in stats_elements:
-            node.getparent().remove(node)
-    except Exception, e:
-        api.logger.warn("drop_slicemgr_stats failed: %s " % (str(e)))
-
-def add_slicemgr_stat(rspec, callname, aggname, elapsed, status):
-    try:
-        stats_tags = rspec.xml.xpath('//statistics[@call="%s"]' % callname)
-        if stats_tags:
-            stats_tag = stats_tags[0]
-        else:
-            stats_tag = etree.SubElement(rspec.xml.root, "statistics", call=callname)
-
-        etree.SubElement(stats_tag, "aggregate", name=str(aggname), elapsed=str(elapsed), status=str(status))
-    except Exception, e:
-        api.logger.warn("add_slicemgr_stat failed on  %s: %s" %(aggname, str(e)))
-
-def ListResources(api, creds, options, call_id):
-    version_manager = VersionManager()
-    def _ListResources(aggregate, server, credential, opts, call_id):
-
-        my_opts = copy(opts)
-        args = [credential, my_opts]
-        tStart = time.time()
-        try:
-            if _call_id_supported(api, server):
-                args.append(call_id)
-            version = api.get_cached_server_version(server)
-            # force ProtoGENI aggregates to give us a v2 RSpec
-            if 'sfa' not in version.keys():
-                my_opts['rspec_version'] = version_manager.get_version('ProtoGENI 2').to_dict()
-            rspec = server.ListResources(*args)
-            return {"aggregate": aggregate, "rspec": rspec, "elapsed": time.time()-tStart, "status": "success"}
-        except Exception, e:
-            api.logger.log_exc("ListResources failed at %s" %(server.url))
-            return {"aggregate": aggregate, "elapsed": time.time()-tStart, "status": "exception"}
-
-    if Callids().already_handled(call_id): return ""
-
-    # get slice's hrn from options
-    xrn = options.get('geni_slice_urn', '')
-    (hrn, type) = urn_to_hrn(xrn)
-    if 'geni_compressed' in options:
-        del(options['geni_compressed'])
-
-    # get the rspec's return format from options
-    rspec_version = version_manager.get_version(options.get('rspec_version'))
-    version_string = "rspec_%s" % (rspec_version.to_string())
-
-    # look in cache first
-    if caching and api.cache and not xrn:
-        rspec =  api.cache.get(version_string)
-        if rspec:
-            return rspec
-
-    # get the callers hrn
-    valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0]
-    caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
-
-    # attempt to use delegated credential first
-    cred = api.getDelegatedCredential(creds)
-    if not cred:
-        cred = api.getCredential()
-    threads = ThreadManager()
-    for aggregate in api.aggregates:
-        # prevent infinite loop. Dont send request back to caller
-        # unless the caller is the aggregate's SM
-        if caller_hrn == aggregate and aggregate != api.hrn:
-            continue
-
-        # get the rspec from the aggregate
-        interface = api.aggregates[aggregate]
-        server = api.get_server(interface, cred)
-        threads.run(_ListResources, aggregate, server, [cred], options, call_id)
-
-
-    results = threads.get_results()
-    rspec_version = version_manager.get_version(options.get('rspec_version'))
-    if xrn:    
-        result_version = version_manager._get_version(rspec_version.type, rspec_version.version, 'manifest')
-    else: 
-        result_version = version_manager._get_version(rspec_version.type, rspec_version.version, 'ad')
-    rspec = RSpec(version=result_version)
-    for result in results:
-        add_slicemgr_stat(rspec, "ListResources", result["aggregate"], result["elapsed"], result["status"])
-        if result["status"]=="success":
-            try:
-                rspec.version.merge(result["rspec"])
-            except:
-                api.logger.log_exc("SM.ListResources: Failed to merge aggregate rspec")
-
-    # cache the result
-    if caching and api.cache and not xrn:
-        api.cache.add(version_string, rspec.toxml())
-
-    return rspec.toxml()
-
-
-def CreateSliver(api, xrn, creds, rspec_str, users, call_id):
-
-    version_manager = VersionManager()
-    def _CreateSliver(aggregate, server, xrn, credential, rspec, users, call_id):
-        tStart = time.time()
-        try:
-            # Need to call GetVersion at an aggregate to determine the supported
-            # rspec type/format beofre calling CreateSliver at an Aggregate.
-            server_version = api.get_cached_server_version(server)
-            requested_users = users
-            if 'sfa' not in server_version and 'geni_api' in server_version:
-                # sfa aggregtes support both sfa and pg rspecs, no need to convert
-                # if aggregate supports sfa rspecs. otherwise convert to pg rspec
-                rspec = RSpec(RSpecConverter.to_pg_rspec(rspec, 'request'))
-                filter = {'component_manager_id': server_version['urn']}
-                rspec.filter(filter)
-                rspec = rspec.toxml()
-                requested_users = sfa_to_pg_users_arg(users)
-            args = [xrn, credential, rspec, requested_users]
-            if _call_id_supported(api, server):
-                args.append(call_id)
-            rspec = server.CreateSliver(*args)
-            return {"aggregate": aggregate, "rspec": rspec, "elapsed": time.time()-tStart, "status": "success"}
-        except: 
-            logger.log_exc('Something wrong in _CreateSliver with URL %s'%server.url)
-            return {"aggregate": aggregate, "elapsed": time.time()-tStart, "status": "exception"}
-
-    if Callids().already_handled(call_id): return ""
-    # Validate the RSpec against PlanetLab's schema --disabled for now
-    # The schema used here needs to aggregate the PL and VINI schemas
-    # schema = "/var/www/html/schemas/pl.rng"
-    rspec = RSpec(rspec_str)
-    schema = None
-    if schema:
-        rspec.validate(schema)
-
-    # if there is a <statistics> section, the aggregates don't care about it,
-    # so delete it.
-    drop_slicemgr_stats(rspec)
-
-    # attempt to use delegated credential first
-    cred = api.getDelegatedCredential(creds)
-    if not cred:
-        cred = api.getCredential()
-
-    # get the callers hrn
-    hrn, type = urn_to_hrn(xrn)
-    valid_cred = api.auth.checkCredentials(creds, 'createsliver', hrn)[0]
-    caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
-    threads = ThreadManager()
-    for aggregate in api.aggregates:
-        # prevent infinite loop. Dont send request back to caller
-        # unless the caller is the aggregate's SM 
-        if caller_hrn == aggregate and aggregate != api.hrn:
-            continue
-        interface = api.aggregates[aggregate]
-        server = api.get_server(interface, cred)
-        # Just send entire RSpec to each aggregate
-        threads.run(_CreateSliver, aggregate, server, xrn, [cred], rspec.toxml(), users, call_id)
-            
-    results = threads.get_results()
-    manifest_version = version_manager._get_version(rspec.version.type, rspec.version.version, 'manifest')
-    result_rspec = RSpec(version=manifest_version)
-    for result in results:
-        add_slicemgr_stat(result_rspec, "CreateSliver", result["aggregate"], result["elapsed"], result["status"])
-        if result["status"]=="success":
-            try:
-                result_rspec.version.merge(result["rspec"])
-            except:
-                api.logger.log_exc("SM.CreateSliver: Failed to merge aggregate rspec")
-    return result_rspec.toxml()
-
-def RenewSliver(api, xrn, creds, expiration_time, call_id):
-    def _RenewSliver(server, xrn, creds, expiration_time, call_id):
-        server_version = api.get_cached_server_version(server)
-        args =  [xrn, creds, expiration_time, call_id]
-        if _call_id_supported(api, server):
-            args.append(call_id)
-        return server.RenewSliver(*args)
-
-    if Callids().already_handled(call_id): return True
-
-    (hrn, type) = urn_to_hrn(xrn)
-    # get the callers hrn
-    valid_cred = api.auth.checkCredentials(creds, 'renewsliver', hrn)[0]
-    caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
-
-    # attempt to use delegated credential first
-    cred = api.getDelegatedCredential(creds)
-    if not cred:
-        cred = api.getCredential()
-    threads = ThreadManager()
-    for aggregate in api.aggregates:
-        # prevent infinite loop. Dont send request back to caller
-        # unless the caller is the aggregate's SM
-        if caller_hrn == aggregate and aggregate != api.hrn:
-            continue
-        interface = api.aggregates[aggregate]
-        server = api.get_server(interface, cred)
-        threads.run(_RenewSliver, server, xrn, [cred], expiration_time, call_id)
-    # 'and' the results
-    return reduce (lambda x,y: x and y, threads.get_results() , True)
-
-def DeleteSliver(api, xrn, creds, call_id):
-    def _DeleteSliver(server, xrn, creds, call_id):
-        server_version = api.get_cached_server_version(server)
-        args =  [xrn, creds]
-        if _call_id_supported(api, server):
-            args.append(call_id)
-        return server.DeleteSliver(*args)
-
-    if Callids().already_handled(call_id): return ""
-    (hrn, type) = urn_to_hrn(xrn)
-    # get the callers hrn
-    valid_cred = api.auth.checkCredentials(creds, 'deletesliver', hrn)[0]
-    caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
-
-    # attempt to use delegated credential first
-    cred = api.getDelegatedCredential(creds)
-    if not cred:
-        cred = api.getCredential()
-    threads = ThreadManager()
-    for aggregate in api.aggregates:
-        # prevent infinite loop. Dont send request back to caller
-        # unless the caller is the aggregate's SM
-        if caller_hrn == aggregate and aggregate != api.hrn:
-            continue
-        interface = api.aggregates[aggregate]
-        server = api.get_server(interface, cred)
-        threads.run(_DeleteSliver, server, xrn, [cred], call_id)
-    threads.get_results()
-    return 1
-
-
-# first draft at a merging SliverStatus
-def SliverStatus(api, slice_xrn, creds, call_id):
-    def _SliverStatus(server, xrn, creds, call_id):
-        server_version = api.get_cached_server_version(server)
-        args =  [xrn, creds]
-        if _call_id_supported(api, server):
-            args.append(call_id)
-        return server.SliverStatus(*args)
-    
-    if Callids().already_handled(call_id): return {}
-    # attempt to use delegated credential first
-    cred = api.getDelegatedCredential(creds)
-    if not cred:
-        cred = api.getCredential()
-    threads = ThreadManager()
-    for aggregate in api.aggregates:
-        interface = api.aggregates[aggregate]
-        server = api.get_server(interface, cred)
-        threads.run (_SliverStatus, server, slice_xrn, [cred], call_id)
-    results = threads.get_results()
-
-    # get rid of any void result - e.g. when call_id was hit where by convention we return {}
-    results = [ result for result in results if result and result['geni_resources']]
-
-    # do not try to combine if there's no result
-    if not results : return {}
-
-    # otherwise let's merge stuff
-    overall = {}
-
-    # mmh, it is expected that all results carry the same urn
-    overall['geni_urn'] = results[0]['geni_urn']
-    overall['pl_login'] = results[0]['pl_login']
-    # append all geni_resources
-    overall['geni_resources'] = \
-        reduce (lambda x,y: x+y, [ result['geni_resources'] for result in results] , [])
-    overall['status'] = 'unknown'
-    if overall['geni_resources']:
-        overall['status'] = 'ready'
-
-    return overall
-
-caching=True
-#caching=False
-def ListSlices(api, creds, call_id):
-    def _ListSlices(server, creds, call_id):
-        server_version = api.get_cached_server_version(server)
-        args =  [creds]
-        if _call_id_supported(api, server):
-            args.append(call_id)
-        return server.ListSlices(*args)
-
-    if Callids().already_handled(call_id): return []
-
-    # look in cache first
-    if caching and api.cache:
-        slices = api.cache.get('slices')
-        if slices:
-            return slices
-
-    # get the callers hrn
-    valid_cred = api.auth.checkCredentials(creds, 'listslices', None)[0]
-    caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
-
-    # attempt to use delegated credential first
-    cred= api.getDelegatedCredential(creds)
-    if not cred:
-        cred = api.getCredential()
-    threads = ThreadManager()
-    # fetch from aggregates
-    for aggregate in api.aggregates:
-        # prevent infinite loop. Dont send request back to caller
-        # unless the caller is the aggregate's SM
-        if caller_hrn == aggregate and aggregate != api.hrn:
-            continue
-        interface = api.aggregates[aggregate]
-        server = api.get_server(interface, cred)
-        threads.run(_ListSlices, server, [cred], call_id)
-
-    # combime results
-    results = threads.get_results()
-    slices = []
-    for result in results:
-        slices.extend(result)
-
-    # cache the result
-    if caching and api.cache:
-        api.cache.add('slices', slices)
-
-    return slices
-
-
-def get_ticket(api, xrn, creds, rspec, users):
-    slice_hrn, type = urn_to_hrn(xrn)
-    # get the netspecs contained within the clients rspec
-    aggregate_rspecs = {}
-    tree= etree.parse(StringIO(rspec))
-    elements = tree.findall('./network')
-    for element in elements:
-        aggregate_hrn = element.values()[0]
-        aggregate_rspecs[aggregate_hrn] = rspec 
-
-    # get the callers hrn
-    valid_cred = api.auth.checkCredentials(creds, 'getticket', slice_hrn)[0]
-    caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
-
-    # attempt to use delegated credential first
-    cred = api.getDelegatedCredential(creds)
-    if not cred:
-        cred = api.getCredential() 
-    threads = ThreadManager()
-    for (aggregate, aggregate_rspec) in aggregate_rspecs.iteritems():
-        # prevent infinite loop. Dont send request back to caller
-        # unless the caller is the aggregate's SM
-        if caller_hrn == aggregate and aggregate != api.hrn:
-            continue
-        
-        interface = api.aggregates[aggregate]
-        server = api.get_server(interface, cred)
-        threads.run(server.GetTicket, xrn, [cred], aggregate_rspec, users)
-
-    results = threads.get_results()
-    
-    # gather information from each ticket 
-    rspecs = []
-    initscripts = []
-    slivers = [] 
-    object_gid = None  
-    for result in results:
-        agg_ticket = SfaTicket(string=result)
-        attrs = agg_ticket.get_attributes()
-        if not object_gid:
-            object_gid = agg_ticket.get_gid_object()
-        rspecs.append(agg_ticket.get_rspec())
-        initscripts.extend(attrs.get('initscripts', [])) 
-        slivers.extend(attrs.get('slivers', [])) 
-    
-    # merge info
-    attributes = {'initscripts': initscripts,
-                 'slivers': slivers}
-    merged_rspec = merge_rspecs(rspecs) 
-
-    # create a new ticket
-    ticket = SfaTicket(subject = slice_hrn)
-    ticket.set_gid_caller(api.auth.client_gid)
-    ticket.set_issuer(key=api.key, subject=api.hrn)
-    ticket.set_gid_object(object_gid)
-    ticket.set_pubkey(object_gid.get_pubkey())
-    #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
-    ticket.set_attributes(attributes)
-    ticket.set_rspec(merged_rspec)
-    ticket.encode()
-    ticket.sign()          
-    return ticket.save_to_string(save_parents=True)
-
-def start_slice(api, xrn, creds):
-    hrn, type = urn_to_hrn(xrn)
-
-    # get the callers hrn
-    valid_cred = api.auth.checkCredentials(creds, 'startslice', hrn)[0]
-    caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
-
-    # attempt to use delegated credential first
-    cred = api.getDelegatedCredential(creds)
-    if not cred:
-        cred = api.getCredential()
-    threads = ThreadManager()
-    for aggregate in api.aggregates:
-        # prevent infinite loop. Dont send request back to caller
-        # unless the caller is the aggregate's SM
-        if caller_hrn == aggregate and aggregate != api.hrn:
-            continue
-        interface = api.aggregates[aggregate]
-        server = api.get_server(interface, cred)    
-        threads.run(server.Start, xrn, cred)
-    threads.get_results()    
-    return 1
-def stop_slice(api, xrn, creds):
-    hrn, type = urn_to_hrn(xrn)
-
-    # get the callers hrn
-    valid_cred = api.auth.checkCredentials(creds, 'stopslice', hrn)[0]
-    caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
-
-    # attempt to use delegated credential first
-    cred = api.getDelegatedCredential(creds)
-    if not cred:
-        cred = api.getCredential()
-    threads = ThreadManager()
-    for aggregate in api.aggregates:
-        # prevent infinite loop. Dont send request back to caller
-        # unless the caller is the aggregate's SM
-        if caller_hrn == aggregate and aggregate != api.hrn:
-            continue
-        interface = api.aggregates[aggregate]
-        server = api.get_server(interface, cred)
-        threads.run(server.Stop, xrn, cred)
-    threads.get_results()    
-    return 1
-
-def reset_slice(api, xrn):
-    """
-    Not implemented
-    """
-    return 1
-
-def shutdown(api, xrn, creds):
-    """
-    Not implemented   
-    """
-    return 1
-
-def status(api, xrn, creds):
-    """
-    Not implemented 
-    """
-    return 1
-
-def main():
-    r = RSpec()
-    r.parseFile(sys.argv[1])
-    rspec = r.toDict()
-    CreateSliver(None,'plc.princeton.tmacktestslice',rspec,'create-slice-tmacktestslice')
-
-if __name__ == "__main__":
-    main()
-
index cdcf2b7..e67d2b5 100644 (file)
@@ -9,11 +9,11 @@ from lxml import etree
 
 from sfa.util.sfalogging import logger
 #from sfa.util.sfalogging import sfa_logger
-from sfa.util.rspecHelper import merge_rspecs
+#from sfa.util.rspecHelper import merge_rspecs
 from sfa.util.xrn import Xrn, urn_to_hrn, hrn_to_urn
 from sfa.util.plxrn import hrn_to_pl_slicename
-from sfa.util.rspec import *
-from sfa.util.specdict import *
+#from sfa.util.rspec import *
+#from sfa.util.specdict import *
 from sfa.util.faults import *
 from sfa.util.record import SfaRecord
 #from sfa.rspecs.pg_rspec import PGRSpec
@@ -26,16 +26,20 @@ from sfa.rspecs.rspec_converter import RSpecConverter
 from sfa.client.client_helper import sfa_to_pg_users_arg
 from sfa.rspecs.version_manager import VersionManager
 
-from sfa.rspecs.rspec import RSpec 
+#from sfa.rspecs.rspec import RSpec 
 from sfa.util.policy import Policy
 from sfa.util.prefixTree import prefixTree
-from sfa.util.sfaticket import *
+#from sfa.util.sfaticket import *
 from sfa.trust.credential import Credential
-from sfa.util.threadmanager import ThreadManager
-import sfa.util.xmlrpcprotocol as xmlrpcprotocol     
-import sfa.plc.peers as peers
+#from sfa.util.threadmanager import ThreadManager
+#import sfa.util.xmlrpcprotocol as xmlrpcprotocol     
+#import sfa.plc.peers as peers
 from sfa.util.version import version_core
 from sfa.util.callids import Callids
+#from sfa.senslab.api import *
+
+
+#api=SfaAPI(interface='slicemgr')
 
 def _call_id_supported(api, server):
     """
diff --git a/sfa/managers/vini/request.xml b/sfa/managers/vini/request.xml
deleted file mode 100644 (file)
index 111f7b0..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-<?xml version="1.0"?>
-<RSpec type="SFA">
-  <request name="plc.vini">
-    <sliver nodeid="n18"/>
-    <sliver nodeid="n20"/>
-    <sliver nodeid="n22"/>
-    <sliver nodeid="n26"/>
-    <sliver nodeid="n28"/>
-    <sliver nodeid="n30"/>
-    <sliver nodeid="n32"/>
-    <sliver nodeid="n34"/>
-    <sliver nodeid="n36"/>
-    <vlink endpoints="n18 n22"/>
-    <vlink endpoints="n18 n26"/>
-    <vlink endpoints="n18 n28"/>
-    <vlink endpoints="n20 n22"/>
-    <vlink endpoints="n22 n26"/>
-    <vlink endpoints="n26 n30"/>
-    <vlink endpoints="n28 n30"/>
-    <vlink endpoints="n28 n32"/>
-    <vlink endpoints="n30 n36"/>
-    <vlink endpoints="n34 n36"/>
-    <vlink endpoints="n32 n36"/>
-    <vlink endpoints="n32 n34"/>
-  </request>
-</RSpec>
diff --git a/sfa/managers/vini/topology.py b/sfa/managers/vini/topology.py
deleted file mode 100644 (file)
index b905c2f..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/python
-
-# $Id: topology.py 14181 2009-07-01 19:46:07Z acb $
-# $URL: https://svn.planet-lab.org/svn/NodeManager-topo/trunk/topology.py $
-
-#
-# Links in the physical topology, gleaned from looking at the Internet2
-# topology map.  Link (a, b) connects sites with IDs a and b.
-#
-PhysicalLinks = [(2, 12),  # I2 Princeton - New York 
-         (11, 13), # I2 Chicago - Wash DC
-         (11, 15), # I2 Chicago - Atlanta
-         (11, 16), # I2 Chicago - CESNET
-         (11, 17), # I2 Chicago - Kansas City
-         (12, 13), # I2 New York - Wash DC
-         (13, 15), # I2 Wash DC - Atlanta
-         (14, 15), # Ga Tech - I2 Atlanta
-         (15, 19), # I2 Atlanta - Houston
-         (17, 19), # I2 Kansas City - Houston
-         (17, 22), # I2 Kansas City - Salt Lake City
-         (17, 24), # I2 Kansas City - UMKC
-         (19, 20), # I2 Houston - Los Angeles
-         (20, 21), # I2 Los Angeles - Seattle
-         (20, 22), # I2 Los Angeles - Salt Lake City
-         (21, 22)] # I2 Seattle - Salt Lake City
-
-
diff --git a/sfa/managers/vini/utils.py b/sfa/managers/vini/utils.py
deleted file mode 100644 (file)
index b0d7e51..0000000
+++ /dev/null
@@ -1,719 +0,0 @@
-from __future__ import with_statement
-import re
-import socket
-from sfa.util.faults import *
-from sfa.managers.vini.topology import PhysicalLinks
-from xmlbuilder import XMLBuilder
-from lxml import etree
-import sys
-from StringIO import StringIO
-
-VINI_RELAXNG_SCHEMA = "/var/www/html/schemas/vini.rng"
-
-# Taken from bwlimit.py
-#
-# See tc_util.c and http://physics.nist.gov/cuu/Units/binary.html. Be
-# warned that older versions of tc interpret "kbps", "mbps", "mbit",
-# and "kbit" to mean (in this system) "kibps", "mibps", "mibit", and
-# "kibit" and that if an older version is installed, all rates will
-# be off by a small fraction.
-suffixes = {
-    "":         1,
-    "bit":     1,
-    "kibit":   1024,
-    "kbit":    1000,
-    "mibit":   1024*1024,
-    "mbit":    1000000,
-    "gibit":   1024*1024*1024,
-    "gbit":    1000000000,
-    "tibit":   1024*1024*1024*1024,
-    "tbit":    1000000000000,
-    "bps":     8,
-    "kibps":   8*1024,
-    "kbps":    8000,
-    "mibps":   8*1024*1024,
-    "mbps":    8000000,
-    "gibps":   8*1024*1024*1024,
-    "gbps":    8000000000,
-    "tibps":   8*1024*1024*1024*1024,
-    "tbps":    8000000000000
-}
-
-
-def get_tc_rate(s):
-    """
-    Parses an integer or a tc rate string (e.g., 1.5mbit) into bits/second
-    """
-
-    if type(s) == int:
-        return s
-    m = re.match(r"([0-9.]+)(\D*)", s)
-    if m is None:
-        return -1
-    suffix = m.group(2).lower()
-    if suffixes.has_key(suffix):
-        return int(float(m.group(1)) * suffixes[suffix])
-    else:
-        return -1
-
-def format_tc_rate(rate):
-    """
-    Formats a bits/second rate into a tc rate string
-    """
-
-    if rate >= 1000000000 and (rate % 1000000000) == 0:
-        return "%.0fgbit" % (rate / 1000000000.)
-    elif rate >= 1000000 and (rate % 1000000) == 0:
-        return "%.0fmbit" % (rate / 1000000.)
-    elif rate >= 1000:
-        return "%.0fkbit" % (rate / 1000.)
-    else:
-        return "%.0fbit" % rate
-
-
-class Node:
-    def __init__(self, node, bps = 1000 * 1000000):
-        self.id = node['node_id']
-        self.idtag = "n%s" % self.id
-        self.hostname = node['hostname']
-        self.name = self.shortname = self.hostname.replace('.vini-veritas.net', '')
-        self.site_id = node['site_id']
-        self.ipaddr = socket.gethostbyname(self.hostname)
-        self.bps = bps
-        self.links = set()
-        self.sliver = False
-
-    def get_link_id(self, remote):
-        if self.id < remote.id:
-            link = (self.id<<7) + remote.id
-        else:
-            link = (remote.id<<7) + self.id
-        return link
-        
-    def get_iface_id(self, remote):
-        if self.id < remote.id:
-            iface = 1
-        else:
-            iface = 2
-        return iface
-    
-    def get_virt_ip(self, remote):
-        link = self.get_link_id(remote)
-        iface = self.get_iface_id(remote)
-        first = link >> 6
-        second = ((link & 0x3f)<<2) + iface
-        return "192.168.%d.%d" % (first, second)
-
-    def get_virt_net(self, remote):
-        link = self.get_link_id(remote)
-        first = link >> 6
-        second = (link & 0x3f)<<2
-        return "192.168.%d.%d/30" % (first, second)
-        
-    def get_site(self, sites):
-        return sites[self.site_id]
-    
-    def get_topo_rspec(self, link):
-        if link.end1 == self:
-            remote = link.end2
-        elif link.end2 == self:
-            remote = link.end1
-        else:
-            raise Error("Link does not connect to Node")
-            
-        my_ip = self.get_virt_ip(remote)
-        remote_ip = remote.get_virt_ip(self)
-        net = self.get_virt_net(remote)
-        bw = format_tc_rate(link.bps)
-        return (remote.id, remote.ipaddr, bw, my_ip, remote_ip, net)
-        
-    def add_link(self, link):
-        self.links.add(link)
-        
-    def add_tag(self, sites):
-        s = self.get_site(sites)
-        words = self.hostname.split(".")
-        index = words[0].replace("node", "")
-        if index.isdigit():
-            self.tag = s.tag + index
-        else:
-            self.tag = None
-
-    # Assumes there is at most one Link between two sites
-    def get_sitelink(self, node, sites):
-        site1 = sites[self.site_id]
-        site2 = sites[node.site_id]
-        sl = site1.links.intersection(site2.links)
-        if len(sl):
-            return sl.pop()
-        return None
-
-    def add_sliver(self):
-        self.sliver = True
-
-    def toxml(self, xml, hrn):
-        if not self.tag:
-            return
-        with xml.node(id = self.idtag):
-            with xml.hostname:
-                xml << self.hostname
-            with xml.kbps:
-                xml << str(int(self.bps/1000))
-            if self.sliver:
-                with xml.sliver:
-                    pass
-    
-
-class Link:
-    def __init__(self, end1, end2, bps = 1000 * 1000000, parent = None):
-        self.end1 = end1
-        self.end2 = end2
-        self.bps = bps
-        self.parent = parent
-        self.children = []
-
-        end1.add_link(self)
-        end2.add_link(self)
-        
-        if self.parent:
-            self.parent.children.append(self)
-            
-    def toxml(self, xml):
-        end_ids = "%s %s" % (self.end1.idtag, self.end2.idtag)
-
-        if self.parent:
-            element = xml.vlink(endpoints=end_ids)
-        else:
-            element = xml.link(endpoints=end_ids)
-
-        with element:
-            with xml.description:
-                xml << "%s -- %s" % (self.end1.name, self.end2.name)
-            with xml.kbps:
-                xml << str(int(self.bps/1000))
-            for child in self.children:
-                child.toxml(xml)
-        
-
-class Site:
-    def __init__(self, site):
-        self.id = site['site_id']
-        self.idtag = "s%s" % self.id
-        self.node_ids = site['node_ids']
-        self.name = site['abbreviated_name'].replace(" ", "_")
-        self.tag = site['login_base']
-        self.public = site['is_public']
-        self.enabled = site['enabled']
-        self.links = set()
-
-    def get_sitenodes(self, nodes):
-        n = []
-        for i in self.node_ids:
-            n.append(nodes[i])
-        return n
-    
-    def add_link(self, link):
-        self.links.add(link)
-
-    def toxml(self, xml, hrn, nodes):
-        if not (self.public and self.enabled and self.node_ids):
-            return
-        with xml.site(id = self.idtag):
-            with xml.name:
-                xml << self.name
-                
-            for node in self.get_sitenodes(nodes):
-                node.toxml(xml, hrn)
-   
-    
-class Slice:
-    def __init__(self, slice):
-        self.id = slice['slice_id']
-        self.name = slice['name']
-        self.node_ids = set(slice['node_ids'])
-        self.slice_tag_ids = slice['slice_tag_ids']
-    
-    def get_tag(self, tagname, slicetags, node = None):
-        for i in self.slice_tag_ids:
-            tag = slicetags[i]
-            if tag.tagname == tagname:
-                if (not node) or (node.id == tag.node_id):
-                    return tag
-        else:
-            return None
-        
-    def get_nodes(self, nodes):
-        n = []
-        for id in self.node_ids:
-            n.append(nodes[id])
-        return n
-             
-    
-    # Add a new slice tag   
-    def add_tag(self, tagname, value, slicetags, node = None):
-        record = {'slice_tag_id':None, 'slice_id':self.id, 'tagname':tagname, 'value':value}
-        if node:
-            record['node_id'] = node.id
-        else:
-            record['node_id'] = None
-        tag = Slicetag(record)
-        slicetags[tag.id] = tag
-        self.slice_tag_ids.append(tag.id)
-        tag.changed = True       
-        tag.updated = True
-        return tag
-    
-    # Update a slice tag if it exists, else add it             
-    def update_tag(self, tagname, value, slicetags, node = None):
-        tag = self.get_tag(tagname, slicetags, node)
-        if tag and tag.value == value:
-            value = "no change"
-        elif tag:
-            tag.value = value
-            tag.changed = True
-        else:
-            tag = self.add_tag(tagname, value, slicetags, node)
-        tag.updated = True
-            
-    def assign_egre_key(self, slicetags):
-        if not self.get_tag('egre_key', slicetags):
-            try:
-                key = free_egre_key(slicetags)
-                self.update_tag('egre_key', key, slicetags)
-            except:
-                # Should handle this case...
-                pass
-        return
-            
-    def turn_on_netns(self, slicetags):
-        tag = self.get_tag('netns', slicetags)
-        if (not tag) or (tag.value != '1'):
-            self.update_tag('netns', '1', slicetags)
-        return
-   
-    def turn_off_netns(self, slicetags):
-        tag = self.get_tag('netns', slicetags)
-        if tag and (tag.value != '0'):
-            tag.delete()
-        return
-    
-    def add_cap_net_admin(self, slicetags):
-        tag = self.get_tag('capabilities', slicetags)
-        if tag:
-            caps = tag.value.split(',')
-            for cap in caps:
-                if cap == "CAP_NET_ADMIN":
-                    return
-            else:
-                newcaps = "CAP_NET_ADMIN," + tag.value
-                self.update_tag('capabilities', newcaps, slicetags)
-        else:
-            self.add_tag('capabilities', 'CAP_NET_ADMIN', slicetags)
-        return
-    
-    def remove_cap_net_admin(self, slicetags):
-        tag = self.get_tag('capabilities', slicetags)
-        if tag:
-            caps = tag.value.split(',')
-            newcaps = []
-            for cap in caps:
-                if cap != "CAP_NET_ADMIN":
-                    newcaps.append(cap)
-            if newcaps:
-                value = ','.join(newcaps)
-                self.update_tag('capabilities', value, slicetags)
-            else:
-                tag.delete()
-        return
-
-    # Update the vsys/setup-link and vsys/setup-nat slice tags.
-    def add_vsys_tags(self, slicetags):
-        link = nat = False
-        for i in self.slice_tag_ids:
-            tag = slicetags[i]
-            if tag.tagname == 'vsys':
-                if tag.value == 'setup-link':
-                    link = True
-                elif tag.value == 'setup-nat':
-                    nat = True
-        if not link:
-            self.add_tag('vsys', 'setup-link', slicetags)
-        if not nat:
-            self.add_tag('vsys', 'setup-nat', slicetags)
-        return
-
-
-class Slicetag:
-    newid = -1 
-    def __init__(self, tag):
-        self.id = tag['slice_tag_id']
-        if not self.id:
-            # Make one up for the time being...
-            self.id = Slicetag.newid
-            Slicetag.newid -= 1
-        self.slice_id = tag['slice_id']
-        self.tagname = tag['tagname']
-        self.value = tag['value']
-        self.node_id = tag['node_id']
-        self.updated = False
-        self.changed = False
-        self.deleted = False
-    
-    # Mark a tag as deleted
-    def delete(self):
-        self.deleted = True
-        self.updated = True
-    
-    def write(self, api):
-        if self.changed:
-            if int(self.id) > 0:
-                api.plshell.UpdateSliceTag(api.plauth, self.id, self.value)
-            else:
-                api.plshell.AddSliceTag(api.plauth, self.slice_id, 
-                                        self.tagname, self.value, self.node_id)
-        elif self.deleted and int(self.id) > 0:
-            api.plshell.DeleteSliceTag(api.plauth, self.id)
-
-
-"""
-A topology is a compound object consisting of:
-* a dictionary mapping site IDs to Site objects
-* a dictionary mapping node IDs to Node objects
-* the Site objects are connected via SiteLink objects representing
-  the physical topology and available bandwidth
-* the Node objects are connected via Link objects representing
-  the requested or assigned virtual topology of a slice
-"""
-class Topology:
-    def __init__(self, api):
-        self.api = api
-        self.sites = get_sites(api)
-        self.nodes = get_nodes(api)
-        self.tags = get_slice_tags(api)
-        self.sitelinks = []
-        self.nodelinks = []
-    
-        for (s1, s2) in PhysicalLinks:
-            self.sitelinks.append(Link(self.sites[s1], self.sites[s2]))
-        
-        for id in self.nodes:
-            self.nodes[id].add_tag(self.sites)
-        
-        for t in self.tags:
-            tag = self.tags[t]
-            if tag.tagname == 'topo_rspec':
-                node1 = self.nodes[tag.node_id]
-                l = eval(tag.value)
-                for (id, realip, bw, lvip, rvip, vnet) in l:
-                    allocbps = get_tc_rate(bw)
-                    node1.bps -= allocbps
-                    try:
-                        node2 = self.nodes[id]
-                        if node1.id < node2.id:
-                            sl = node1.get_sitelink(node2, self.sites)
-                            sl.bps -= allocbps
-                    except:
-                        pass
-
-    
-    """ Lookup site based on id or idtag value """
-    def lookupSite(self, id):
-        val = None
-        if isinstance(id, basestring):
-            id = int(id.lstrip('s'))
-        try:
-            val = self.sites[id]
-        except:
-            raise KeyError("site ID %s not found" % id)
-        return val
-    
-    def getSites(self):
-        sites = []
-        for s in self.sites:
-            sites.append(self.sites[s])
-        return sites
-        
-    """ Lookup node based on id or idtag value """
-    def lookupNode(self, id):
-        val = None
-        if isinstance(id, basestring):
-            id = int(id.lstrip('n'))
-        try:
-            val = self.nodes[id]
-        except:
-            raise KeyError("node ID %s not found" % id)
-        return val
-    
-    def getNodes(self):
-        nodes = []
-        for n in self.nodes:
-            nodes.append(self.nodes[n])
-        return nodes
-    
-    def nodesInTopo(self):
-        nodes = []
-        for n in self.nodes:
-            node = self.nodes[n]
-            if node.sliver:
-                nodes.append(node)
-        return nodes
-            
-    def lookupSliceTag(self, id):
-        val = None
-        try:
-            val = self.tags[id]
-        except:
-            raise KeyError("slicetag ID %s not found" % id)
-        return val
-    
-    def getSliceTags(self):
-        tags = []
-        for t in self.tags:
-            tags.append(self.tags[t])
-        return tags
-    
-    def lookupSiteLink(self, node1, node2):
-        site1 = self.sites[node1.site_id]
-        site2 = self.sites[node2.site_id]
-        for link in self.sitelinks:
-            if site1 == link.end1 and site2 == link.end2:
-                return link
-            if site2 == link.end1 and site1 == link.end2:
-                return link
-        return None
-    
-
-    def __add_vlink(self, vlink, slicenodes, parent = None):
-        n1 = n2 = None
-        endpoints = vlink.get("endpoints")
-        if endpoints:
-            (end1, end2) = endpoints.split()
-            n1 = self.lookupNode(end1)
-            n2 = self.lookupNode(end2)
-        elif parent:
-            """ Try to infer the endpoints for the virtual link """
-            site_endpoints = parent.get("endpoints")
-            (n1, n2) = self.__infer_endpoints(site_endpoints, slicenodes)
-        else:
-            raise Error("no endpoints given")
-
-        #print "Added virtual link: %s -- %s" % (n1.tag, n2.tag)
-        bps = int(vlink.findtext("kbps")) * 1000
-        sitelink = self.lookupSiteLink(n1, n2)
-        if not sitelink:
-            raise PermissionError("nodes %s and %s not adjacent" % 
-                                  (n1.idtag, n2.idtag))
-        self.nodelinks.append(Link(n1, n2, bps, sitelink))
-        return
-
-    """ 
-    Infer the endpoints of the virtual link.  If the slice exists on 
-    only a single node at each end of the physical link, we'll assume that
-    the user wants the virtual link to terminate at these nodes.
-    """
-    def __infer_endpoints(self, endpoints, slicenodes):
-        n = []
-        ends = endpoints.split()
-        for end in ends:
-            found = 0
-            site = self.lookupSite(end)
-            for id in site.node_ids:
-                if id in slicenodes:
-                    n.append(slicenodes[id])
-                    found += 1
-            if found != 1:
-                raise Error("could not infer endpoint for site %s" % site.id)
-        #print "Inferred endpoints: %s %s" % (n[0].idtag, n[1].idtag)
-        return n
-        
-    def nodeTopoFromRSpec(self, xml):
-        if self.nodelinks:
-            raise Error("virtual topology already present")
-            
-        nodedict = {}
-        for node in self.getNodes():
-            nodedict[node.idtag] = node
-            
-        slicenodes = {}
-
-        tree = etree.parse(StringIO(xml))
-
-        # Validate the incoming request against the RelaxNG schema
-        relaxng_doc = etree.parse(VINI_RELAXNG_SCHEMA)
-        relaxng = etree.RelaxNG(relaxng_doc)
-        
-        if not relaxng(tree):
-            error = relaxng.error_log.last_error
-            message = "%s (line %s)" % (error.message, error.line)
-            raise InvalidRSpec(message)
-
-        rspec = tree.getroot()
-
-        """
-        Handle requests where the user has annotated a description of the
-        physical resources (nodes and links) with virtual ones (slivers
-        and vlinks).
-        """
-        # Find slivers under node elements
-        for sliver in rspec.iterfind("./network/site/node/sliver"):
-            elem = sliver.getparent()
-            node = nodedict[elem.get("id")]
-            slicenodes[node.id] = node
-            node.add_sliver()
-
-        # Find vlinks under link elements
-        for vlink in rspec.iterfind("./network/link/vlink"):
-            link = vlink.getparent()
-            self.__add_vlink(vlink, slicenodes, link)
-
-        """
-        Handle requests where the user has listed the virtual resources only
-        """
-        # Find slivers that specify nodeid
-        for sliver in rspec.iterfind("./request/sliver[@nodeid]"):
-            node = nodedict[sliver.get("nodeid")]
-            slicenodes[node.id] = node
-            node.add_sliver()
-
-        # Find vlinks that specify endpoints
-        for vlink in rspec.iterfind("./request/vlink[@endpoints]"):
-            self.__add_vlink(vlink, slicenodes)
-
-        return
-
-    def nodeTopoFromSliceTags(self, slice):
-        if self.nodelinks:
-            raise Error("virtual topology already present")
-            
-        for node in slice.get_nodes(self.nodes):
-            node.sliver = True
-            linktag = slice.get_tag('topo_rspec', self.tags, node)
-            if linktag:
-                l = eval(linktag.value)
-                for (id, realip, bw, lvip, rvip, vnet) in l:
-                    if node.id < id:
-                        bps = get_tc_rate(bw)
-                        remote = self.lookupNode(id)
-                        sitelink = self.lookupSiteLink(node, remote)
-                        self.nodelinks.append(Link(node,remote,bps,sitelink))
-
-    def updateSliceTags(self, slice):
-        if not self.nodelinks:
-            return
-        slice.update_tag('vini_topo', 'manual', self.tags)
-        slice.assign_egre_key(self.tags)
-        slice.turn_on_netns(self.tags)
-        slice.add_cap_net_admin(self.tags)
-
-        for node in slice.get_nodes(self.nodes):
-            linkdesc = []
-            for link in node.links:
-                linkdesc.append(node.get_topo_rspec(link))
-            if linkdesc:
-                topo_str = "%s" % linkdesc
-                slice.update_tag('topo_rspec', topo_str, self.tags, node)
-
-        # Update slice tags in database
-        for tag in self.getSliceTags():
-            if tag.slice_id == slice.id:
-                if tag.tagname == 'topo_rspec' and not tag.updated:
-                    tag.delete()
-                tag.write(self.api)
-                
-    """
-    Check the requested topology against the available topology and capacity
-    """
-    def verifyNodeTopo(self, hrn, topo):
-        for link in self.nodelinks:
-            if link.bps <= 0:
-                raise GeniInvalidArgument(bw, "BW")
-                
-            n1 = link.end1
-            n2 = link.end2
-            sitelink = self.lookupSiteLink(n1, n2)
-            if not sitelink:
-                raise PermissionError("%s: nodes %s and %s not adjacent" % (hrn, n1.tag, n2.tag))
-            if sitelink.bps < link.bps:
-                raise PermissionError("%s: insufficient capacity between %s and %s" % (hrn, n1.tag, n2.tag))
-                
-    """
-    Produce XML directly from the topology specification.
-    """
-    def toxml(self, hrn = None):
-        xml = XMLBuilder(format = True, tab_step = "  ")
-        with xml.RSpec(type="VINI"):
-            if hrn:
-                element = xml.network(name="Public_VINI", slice=hrn)
-            else:
-                element = xml.network(name="Public_VINI")
-                
-            with element:
-                for site in self.getSites():
-                    site.toxml(xml, hrn, self.nodes)
-                for link in self.sitelinks:
-                    link.toxml(xml)
-
-        header = '<?xml version="1.0"?>\n'
-        return header + str(xml)
-
-"""
-Create a dictionary of site objects keyed by site ID
-"""
-def get_sites(api):
-    tmp = []
-    for site in api.plshell.GetSites(api.plauth):
-        t = site['site_id'], Site(site)
-        tmp.append(t)
-    return dict(tmp)
-
-
-"""
-Create a dictionary of node objects keyed by node ID
-"""
-def get_nodes(api):
-    tmp = []
-    for node in api.plshell.GetNodes(api.plauth):
-        t = node['node_id'], Node(node)
-        tmp.append(t)
-    return dict(tmp)
-
-"""
-Create a dictionary of slice objects keyed by slice ID
-"""
-def get_slice(api, slicename):
-    slice = api.plshell.GetSlices(api.plauth, [slicename])
-    if slice:
-        return Slice(slice[0])
-    else:
-        return None
-
-"""
-Create a dictionary of slicetag objects keyed by slice tag ID
-"""
-def get_slice_tags(api):
-    tmp = []
-    for tag in api.plshell.GetSliceTags(api.plauth):
-        t = tag['slice_tag_id'], Slicetag(tag)
-        tmp.append(t)
-    return dict(tmp)
-    
-"""
-Find a free EGRE key
-"""
-def free_egre_key(slicetags):
-    used = set()
-    for i in slicetags:
-        tag = slicetags[i]
-        if tag.tagname == 'egre_key':
-            used.add(int(tag.value))
-                
-    for i in range(1, 256):
-        if i not in used:
-            key = i
-            break
-    else:
-        raise KeyError("No more EGRE keys available")
-        
-    return "%s" % key
-   
diff --git a/sfa/managers/vini/vini.rnc b/sfa/managers/vini/vini.rnc
deleted file mode 100644 (file)
index 09cf902..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-start = RSpec
-RSpec = element RSpec { 
-   attribute expires { xsd:NMTOKEN },
-   attribute generated { xsd:NMTOKEN },
-   attribute type { xsd:NMTOKEN },
-   ( network | request )
-}
-network = element network {
-   attribute name { xsd:NMTOKEN },
-   attribute slice { xsd:NMTOKEN }?,
-   sliver_defaults?,
-   site+,
-   link*
-}
-sliver_defaults = element sliver_defaults {
-   sliver_elements
-}
-site = element site { 
-   attribute id { xsd:ID },
-   element name { text },
-   node*
-}
-node = element node {
-   attribute id { xsd:ID },
-   element hostname { text },
-   element bw_unallocated { attribute units { xsd:NMTOKEN }, text }?,
-   element bw_limit { attribute units { xsd:NMTOKEN }, text }?,
-   sliver*
-}
-link = element link { 
-   attribute endpoints { xsd:IDREFS },
-   element description { text }?,
-   element bw_unallocated { attribute units { xsd:NMTOKEN }, text },
-   vlink*
-}
-request = element request {
-   attribute name { xsd:NMTOKEN },
-   sliver_defaults?,
-   sliver*,
-   vlink*
-}
-sliver = element sliver { 
-   attribute nodeid { xsd:ID }?,
-   sliver_elements
-}
-sliver_elements = ( 
-   element capabilities { text }? 
- & element codemux { text }* 
- & element cpu_pct { text }?
- & element cpu_share { text }?
- & element delegations { text }?
- & element disk_max { text }?
- & element initscript { text }?
- & element ip_addresses {text }*
- & element net_i2_max_kbyte { text }?
- & element net_i2_max_rate { text }?
- & element net_i2_min_rate { text }?
- & element net_i2_share { text }?
- & element net_i2_thresh_kbyte { text }?
- & element net_max_kbyte { text }?
- & element net_max_rate { text }?
- & element net_min_rate { text }?
- & element net_share { text }?
- & element net_thresh_kbyte { text }?
- & element vsys {text}*
- & element vsys_vnet { text }?
-)
-vlink = element vlink { 
-   attribute endpoints { xsd:IDREFS }?,
-   element description { text }?,
-   element kbps { text }? 
-}
diff --git a/sfa/managers/vini/vini.rng b/sfa/managers/vini/vini.rng
deleted file mode 100644 (file)
index 387c831..0000000
+++ /dev/null
@@ -1,261 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
-  <start>
-    <ref name="RSpec"/>
-  </start>
-  <define name="RSpec">
-    <element name="RSpec">
-      <attribute name="expires">
-        <data type="NMTOKEN"/>
-      </attribute>
-      <attribute name="generated">
-        <data type="NMTOKEN"/>
-      </attribute>
-      <attribute name="type">
-        <data type="NMTOKEN"/>
-      </attribute>
-      <choice>
-        <ref name="network"/>
-        <ref name="request"/>
-      </choice>
-    </element>
-  </define>
-  <define name="network">
-    <element name="network">
-      <attribute name="name">
-        <data type="NMTOKEN"/>
-      </attribute>
-      <optional>
-        <attribute name="slice">
-          <data type="NMTOKEN"/>
-        </attribute>
-      </optional>
-      <optional>
-        <ref name="sliver_defaults"/>
-      </optional>
-      <oneOrMore>
-        <ref name="site"/>
-      </oneOrMore>
-      <zeroOrMore>
-        <ref name="link"/>
-      </zeroOrMore>
-    </element>
-  </define>
-  <define name="sliver_defaults">
-    <element name="sliver_defaults">
-      <ref name="sliver_elements"/>
-    </element>
-  </define>
-  <define name="site">
-    <element name="site">
-      <attribute name="id">
-        <data type="ID"/>
-      </attribute>
-      <element name="name">
-        <text/>
-      </element>
-      <zeroOrMore>
-        <ref name="node"/>
-      </zeroOrMore>
-    </element>
-  </define>
-  <define name="node">
-    <element name="node">
-      <attribute name="id">
-        <data type="ID"/>
-      </attribute>
-      <element name="hostname">
-        <text/>
-      </element>
-      <optional>
-        <element name="bw_unallocated">
-          <attribute name="units">
-            <data type="NMTOKEN"/>
-          </attribute>
-          <text/>
-        </element>
-      </optional>
-      <optional>
-        <element name="bw_limit">
-          <attribute name="units">
-            <data type="NMTOKEN"/>
-          </attribute>
-          <text/>
-        </element>
-      </optional>
-      <zeroOrMore>
-        <ref name="sliver"/>
-      </zeroOrMore>
-    </element>
-  </define>
-  <define name="link">
-    <element name="link">
-      <attribute name="endpoints">
-        <data type="IDREFS"/>
-      </attribute>
-      <optional>
-        <element name="description">
-          <text/>
-        </element>
-      </optional>
-      <element name="bw_unallocated">
-        <attribute name="units">
-          <data type="NMTOKEN"/>
-        </attribute>
-        <text/>
-      </element>
-      <zeroOrMore>
-        <ref name="vlink"/>
-      </zeroOrMore>
-    </element>
-  </define>
-  <define name="request">
-    <element name="request">
-      <attribute name="name">
-        <data type="NMTOKEN"/>
-      </attribute>
-      <optional>
-        <ref name="sliver_defaults"/>
-      </optional>
-      <zeroOrMore>
-        <ref name="sliver"/>
-      </zeroOrMore>
-      <zeroOrMore>
-        <ref name="vlink"/>
-      </zeroOrMore>
-    </element>
-  </define>
-  <define name="sliver">
-    <element name="sliver">
-      <optional>
-        <attribute name="nodeid">
-          <data type="ID"/>
-        </attribute>
-      </optional>
-      <ref name="sliver_elements"/>
-    </element>
-  </define>
-  <define name="sliver_elements">
-    <interleave>
-      <optional>
-        <element name="capabilities">
-          <text/>
-        </element>
-      </optional>
-      <zeroOrMore>
-        <element name="codemux">
-          <text/>
-        </element>
-      </zeroOrMore>
-      <optional>
-        <element name="cpu_pct">
-          <text/>
-        </element>
-      </optional>
-      <optional>
-        <element name="cpu_share">
-          <text/>
-        </element>
-      </optional>
-      <optional>
-        <element name="delegations">
-          <text/>
-        </element>
-      </optional>
-      <optional>
-        <element name="disk_max">
-          <text/>
-        </element>
-      </optional>
-      <optional>
-        <element name="initscript">
-          <text/>
-        </element>
-      </optional>
-      <zeroOrMore>
-        <element name="ip_addresses">
-          <text/>
-        </element>
-      </zeroOrMore>
-      <optional>
-        <element name="net_i2_max_kbyte">
-          <text/>
-        </element>
-      </optional>
-      <optional>
-        <element name="net_i2_max_rate">
-          <text/>
-        </element>
-      </optional>
-      <optional>
-        <element name="net_i2_min_rate">
-          <text/>
-        </element>
-      </optional>
-      <optional>
-        <element name="net_i2_share">
-          <text/>
-        </element>
-      </optional>
-      <optional>
-        <element name="net_i2_thresh_kbyte">
-          <text/>
-        </element>
-      </optional>
-      <optional>
-        <element name="net_max_kbyte">
-          <text/>
-        </element>
-      </optional>
-      <optional>
-        <element name="net_max_rate">
-          <text/>
-        </element>
-      </optional>
-      <optional>
-        <element name="net_min_rate">
-          <text/>
-        </element>
-      </optional>
-      <optional>
-        <element name="net_share">
-          <text/>
-        </element>
-      </optional>
-      <optional>
-        <element name="net_thresh_kbyte">
-          <text/>
-        </element>
-      </optional>
-      <zeroOrMore>
-        <element name="vsys">
-          <text/>
-        </element>
-      </zeroOrMore>
-      <optional>
-        <element name="vsys_vnet">
-          <text/>
-        </element>
-      </optional>
-    </interleave>
-  </define>
-  <define name="vlink">
-    <element name="vlink">
-      <optional>
-        <attribute name="endpoints">
-          <data type="IDREFS"/>
-        </attribute>
-      </optional>
-      <optional>
-        <element name="description">
-          <text/>
-        </element>
-      </optional>
-      <optional>
-        <element name="kbps">
-          <text/>
-        </element>
-      </optional>
-    </element>
-  </define>
-</grammar>
diff --git a/sfa/managers/vini/vini.xml b/sfa/managers/vini/vini.xml
deleted file mode 100644 (file)
index eb0049b..0000000
+++ /dev/null
@@ -1,373 +0,0 @@
-<?xml version="1.0"?>
-<RSpec type="SFA">
-  <network name="plc.vini">
-    <site id="s2">
-      <name>Princeton</name>
-      <node id="n1">
-        <hostname>node1.princeton.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">999000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s3">
-      <name>PSG</name>
-      <node id="n6">
-        <hostname>node1.psg.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-      <node id="n25">
-        <hostname>node2.psg.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s4">
-      <name>NLR Chicago</name>
-      <node id="n2">
-        <hostname>node1.chic.nlr.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-      <node id="n3">
-        <hostname>node2.chic.nlr.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s5">
-      <name>NLR Houston</name>
-      <node id="n4">
-        <hostname>node1.hous.nlr.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-      <node id="n5">
-        <hostname>node2.hous.nlr.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s6">
-      <name>NLR Atlanta</name>
-      <node id="n8">
-        <hostname>node1.atla.nlr.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-      <node id="n9">
-        <hostname>node2.atla.nlr.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s7">
-      <name>NLR Seattle</name>
-      <node id="n10">
-        <hostname>node1.seat.nlr.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-      <node id="n11">
-        <hostname>node2.seat.nlr.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s8">
-      <name>NLR Los Angeles</name>
-      <node id="n12">
-        <hostname>node1.losa.nlr.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-      <node id="n13">
-        <hostname>node2.losa.nlr.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s9">
-      <name>NLR New York</name>
-      <node id="n14">
-        <hostname>node1.newy.nlr.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-      <node id="n15">
-        <hostname>node2.newy.nlr.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s10">
-      <name>NLR Wash DC</name>
-      <node id="n16">
-        <hostname>node1.wash.nlr.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-      <node id="n17">
-        <hostname>node2.wash.nlr.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s11">
-      <name>I2 Chicago</name>
-      <node id="n18">
-        <hostname>node1.chic.internet2.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">963000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-      <node id="n19">
-        <hostname>node2.chic.internet2.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s12">
-      <name>I2 New York</name>
-      <node id="n20">
-        <hostname>node1.newy.internet2.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">988000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-      <node id="n21">
-        <hostname>node2.newy.internet2.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s13">
-      <name>I2 Wash DC</name>
-      <node id="n22">
-        <hostname>node1.wash.internet2.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">964000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-      <node id="n23">
-        <hostname>node2.wash.internet2.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s14">
-      <name>Georgia Tech</name>
-      <node id="n45">
-        <hostname>node1.gatech.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">999000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s15">
-      <name>I2 Atlanta</name>
-      <node id="n26">
-        <hostname>node1.atla.internet2.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">964000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-      <node id="n27">
-        <hostname>node2.atla.internet2.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s16">
-      <name>CESNET</name>
-      <node id="n42">
-        <hostname>node1.cesnet.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-      <node id="n43">
-        <hostname>node2.cesnet.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-      </node>
-    </site>
-    <site id="s17">
-      <name>I2 Kansas City</name>
-      <node id="n28">
-        <hostname>node1.kans.internet2.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">961000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-      <node id="n29">
-        <hostname>node2.kans.internet2.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s19">
-      <name>I2 Houston</name>
-      <node id="n30">
-        <hostname>node1.hous.internet2.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">964000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-      <node id="n31">
-        <hostname>node2.hous.internet2.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s20">
-      <name>I2 Los Angeles</name>
-      <node id="n36">
-        <hostname>node1.losa.internet2.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">964000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-      <node id="n37">
-        <hostname>node2.losa.internet2.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s21">
-      <name>I2 Seattle</name>
-      <node id="n34">
-        <hostname>node1.seat.internet2.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">975000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-      <node id="n35">
-        <hostname>node2.seat.internet2.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s22">
-      <name>I2 Salt Lake</name>
-      <node id="n32">
-        <hostname>node1.salt.internet2.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">962000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-      <node id="n33">
-        <hostname>node2.salt.internet2.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-        <bw_limit units="kbps">1000000</bw_limit>
-      </node>
-    </site>
-    <site id="s24">
-      <name>UMKC</name>
-      <node id="n48">
-        <hostname>node1.umkc.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">999000</bw_unallocated>
-      </node>
-      <node id="n50">
-        <hostname>node2.umkc.vini-veritas.net</hostname>
-        <bw_unallocated units="kbps">1000000</bw_unallocated>
-      </node>
-    </site>
-    <link endpoints="s2 s12">
-      <description>Princeton -- I2 New York</description>
-      <bw_unallocated units="kbps">1000000</bw_unallocated>
-    </link>
-    <link endpoints="s4 s5">
-      <description>NLR Chicago -- NLR Houston</description>
-      <bw_unallocated units="kbps">1000000</bw_unallocated>
-    </link>
-    <link endpoints="s4 s6">
-      <description>NLR Chicago -- NLR Atlanta</description>
-      <bw_unallocated units="kbps">1000000</bw_unallocated>
-    </link>
-    <link endpoints="s4 s7">
-      <description>NLR Chicago -- NLR Seattle</description>
-      <bw_unallocated units="kbps">1000000</bw_unallocated>
-    </link>
-    <link endpoints="s4 s9">
-      <description>NLR Chicago -- NLR New York</description>
-      <bw_unallocated units="kbps">1000000</bw_unallocated>
-    </link>
-    <link endpoints="s4 s10">
-      <description>NLR Chicago -- NLR Wash DC</description>
-      <bw_unallocated units="kbps">1000000</bw_unallocated>
-    </link>
-    <link endpoints="s5 s6">
-      <description>NLR Houston -- NLR Atlanta</description>
-      <bw_unallocated units="kbps">1000000</bw_unallocated>
-    </link>
-    <link endpoints="s5 s8">
-      <description>NLR Houston -- NLR Los Angeles</description>
-      <bw_unallocated units="kbps">1000000</bw_unallocated>
-    </link>
-    <link endpoints="s6 s10">
-      <description>NLR Atlanta -- NLR Wash DC</description>
-      <bw_unallocated units="kbps">1000000</bw_unallocated>
-    </link>
-    <link endpoints="s6 s14">
-      <description>NLR Atlanta -- Georgia Tech</description>
-      <bw_unallocated units="kbps">1000000</bw_unallocated>
-    </link>
-    <link endpoints="s7 s8">
-      <description>NLR Seattle -- NLR Los Angeles</description>
-      <bw_unallocated units="kbps">1000000</bw_unallocated>
-    </link>
-    <link endpoints="s9 s10">
-      <description>NLR New York -- NLR Wash DC</description>
-      <bw_unallocated units="kbps">1000000</bw_unallocated>
-    </link>
-    <link endpoints="s11 s13">
-      <description>I2 Chicago -- I2 Wash DC</description>
-      <bw_unallocated units="kbps">988000</bw_unallocated>
-    </link>
-    <link endpoints="s11 s15">
-      <description>I2 Chicago -- I2 Atlanta</description>
-      <bw_unallocated units="kbps">988000</bw_unallocated>
-    </link>
-    <link endpoints="s11 s16">
-      <description>I2 Chicago -- CESNET</description>
-      <bw_unallocated units="kbps">1000000</bw_unallocated>
-    </link>
-    <link endpoints="s11 s17">
-      <description>I2 Chicago -- I2 Kansas City</description>
-      <bw_unallocated units="kbps">987000</bw_unallocated>
-    </link>
-    <link endpoints="s12 s13">
-      <description>I2 New York -- I2 Wash DC</description>
-      <bw_unallocated units="kbps">988000</bw_unallocated>
-    </link>
-    <link endpoints="s13 s15">
-      <description>I2 Wash DC -- I2 Atlanta</description>
-      <bw_unallocated units="kbps">988000</bw_unallocated>
-    </link>
-    <link endpoints="s14 s15">
-      <description>Georgia Tech -- I2 Atlanta</description>
-      <bw_unallocated units="kbps">1000000</bw_unallocated>
-    </link>
-    <link endpoints="s15 s19">
-      <description>I2 Atlanta -- I2 Houston</description>
-      <bw_unallocated units="kbps">988000</bw_unallocated>
-    </link>
-    <link endpoints="s17 s19">
-      <description>I2 Kansas City -- I2 Houston</description>
-      <bw_unallocated units="kbps">988000</bw_unallocated>
-    </link>
-    <link endpoints="s17 s22">
-      <description>I2 Kansas City -- I2 Salt Lake</description>
-      <bw_unallocated units="kbps">987000</bw_unallocated>
-    </link>
-    <link endpoints="s17 s24">
-      <description>I2 Kansas City -- UMKC</description>
-      <bw_unallocated units="kbps">999000</bw_unallocated>
-    </link>
-    <link endpoints="s19 s20">
-      <description>I2 Houston -- I2 Los Angeles</description>
-      <bw_unallocated units="kbps">988000</bw_unallocated>
-    </link>
-    <link endpoints="s20 s21">
-      <description>I2 Los Angeles -- I2 Seattle</description>
-      <bw_unallocated units="kbps">988000</bw_unallocated>
-    </link>
-    <link endpoints="s20 s22">
-      <description>I2 Los Angeles -- I2 Salt Lake</description>
-      <bw_unallocated units="kbps">988000</bw_unallocated>
-    </link>
-    <link endpoints="s21 s22">
-      <description>I2 Seattle -- I2 Salt Lake</description>
-      <bw_unallocated units="kbps">987000</bw_unallocated>
-    </link>
-  </network>
-</RSpec>
diff --git a/sfa/managers/vini/vini_network.py b/sfa/managers/vini/vini_network.py
deleted file mode 100644 (file)
index fcade6c..0000000
+++ /dev/null
@@ -1,469 +0,0 @@
-from __future__ import with_statement
-from sfa.util.faults import *
-from xmlbuilder import XMLBuilder
-from lxml import etree
-import sys
-from sfa.plc.network import *
-from sfa.managers.vini.topology import PhysicalLinks
-
-# Taken from bwlimit.py
-#
-# See tc_util.c and http://physics.nist.gov/cuu/Units/binary.html. Be
-# warned that older versions of tc interpret "kbps", "mbps", "mbit",
-# and "kbit" to mean (in this system) "kibps", "mibps", "mibit", and
-# "kibit" and that if an older version is installed, all rates will
-# be off by a small fraction.
-suffixes = {
-    "":         1,
-    "bit":     1,
-    "kibit":   1024,
-    "kbit":    1000,
-    "mibit":   1024*1024,
-    "mbit":    1000000,
-    "gibit":   1024*1024*1024,
-    "gbit":    1000000000,
-    "tibit":   1024*1024*1024*1024,
-    "tbit":    1000000000000,
-    "bps":     8,
-    "kibps":   8*1024,
-    "kbps":    8000,
-    "mibps":   8*1024*1024,
-    "mbps":    8000000,
-    "gibps":   8*1024*1024*1024,
-    "gbps":    8000000000,
-    "tibps":   8*1024*1024*1024*1024,
-    "tbps":    8000000000000
-}
-
-
-def get_tc_rate(s):
-    """
-    Parses an integer or a tc rate string (e.g., 1.5mbit) into bits/second
-    """
-
-    if type(s) == int:
-        return s
-    m = re.match(r"([0-9.]+)(\D*)", s)
-    if m is None:
-        return -1
-    suffix = m.group(2).lower()
-    if suffixes.has_key(suffix):
-        return int(float(m.group(1)) * suffixes[suffix])
-    else:
-        return -1
-
-def format_tc_rate(rate):
-    """
-    Formats a bits/second rate into a tc rate string
-    """
-
-    if rate >= 1000000000 and (rate % 1000000000) == 0:
-        return "%.0fgbit" % (rate / 1000000000.)
-    elif rate >= 1000000 and (rate % 1000000) == 0:
-        return "%.0fmbit" % (rate / 1000000.)
-    elif rate >= 1000:
-        return "%.0fkbit" % (rate / 1000.)
-    else:
-        return "%.0fbit" % rate
-
-
-class ViniNode(Node):
-    def __init__(self, network, node, bps = 1000 * 1000000):
-        Node.__init__(self, network, node)
-        self.bps = bps
-        self.links = set()
-        self.name = self.hostname.replace('.vini-veritas.net', '')
-
-    def get_link_id(self, remote):
-        if self.id < remote.id:
-            link = (self.id<<7) + remote.id
-        else:
-            link = (remote.id<<7) + self.id
-        return link
-        
-    def get_iface_id(self, remote):
-        if self.id < remote.id:
-            iface = 1
-        else:
-            iface = 2
-        return iface
-    
-    def get_virt_ip(self, remote):
-        link = self.get_link_id(remote)
-        iface = self.get_iface_id(remote)
-        first = link >> 6
-        second = ((link & 0x3f)<<2) + iface
-        return "192.168.%d.%d" % (first, second)
-
-    def get_virt_net(self, remote):
-        link = self.get_link_id(remote)
-        first = link >> 6
-        second = (link & 0x3f)<<2
-        return "192.168.%d.%d/30" % (first, second)
-        
-    def get_topo_rspec(self, link):
-        if link.end1 == self:
-            remote = link.end2
-        elif link.end2 == self:
-            remote = link.end1
-        else:
-            raise Error("Link does not connect to Node")
-            
-        my_ip = self.get_virt_ip(remote)
-        remote_ip = remote.get_virt_ip(self)
-        net = self.get_virt_net(remote)
-        bw = format_tc_rate(link.bps)
-        ipaddr = remote.get_primary_iface().ipv4
-        return (remote.id, ipaddr, bw, my_ip, remote_ip, net)
-        
-    def add_link(self, link):
-        self.links.add(link)
-        
-    # Assumes there is at most one Link between two sites
-    def get_sitelink(self, node):
-        site1 = self.network.sites[self.site_id]
-        site2 = self.network.sites[node.site_id]
-        sl = site1.links.intersection(site2.links)
-        if len(sl):
-            return sl.pop()
-        return None
-
-    def toxml(self, xml):
-        slice = self.network.slice
-        if self.whitelist and not self.sliver:
-            if not slice or slice.id not in self.whitelist:
-                return
-
-        with xml.node(id = self.idtag):
-            with xml.hostname:
-                xml << self.hostname
-            with xml.bw_unallocated(units="kbps"):
-                xml << str(int(self.bps/1000))
-            self.get_primary_iface().toxml(xml)
-            if self.sliver:
-                self.sliver.toxml(xml)
-
-
-class ViniSite(Site):
-    def __init__(self, network, site):
-        Site.__init__(self, network, site)
-        self.links = set()
-
-    def add_link(self, link):
-        self.links.add(link)
-
-class ViniSlice(Slice):
-    def assign_egre_key(self):
-        tag = self.get_tag('egre_key')
-        if not tag:
-            try:
-                key = self.network.free_egre_key()
-            except:
-                raise InvalidRSpec("ran out of EGRE keys!")
-            tag = self.update_tag('egre_key', key, None, 'admin')
-        return
-            
-    def turn_on_netns(self):
-        tag = self.get_tag('netns')
-        if (not tag) or (tag.value != '1'):
-            tag = self.update_tag('netns', '1', None, 'admin')
-        return
-   
-    def turn_off_netns(self):
-        tag = self.get_tag('netns')
-        if tag and (tag.value != '0'):
-            tag.delete()
-        return
-    
-    def add_cap_net_admin(self):
-        tag = self.get_tag('capabilities')
-        if tag:
-            caps = tag.value.split(',')
-            for cap in caps:
-                if cap == "CAP_NET_ADMIN":
-                    newcaps = tag.value
-                    break
-            else:
-                newcaps = "CAP_NET_ADMIN," + tag.value
-            self.update_tag('capabilities', newcaps, None, 'admin')
-        else:
-            tag = self.add_tag('capabilities', 'CAP_NET_ADMIN', None, 'admin')
-        return
-    
-    def remove_cap_net_admin(self):
-        tag = self.get_tag('capabilities')
-        if tag:
-            caps = tag.value.split(',')
-            newcaps = []
-            for cap in caps:
-                if cap != "CAP_NET_ADMIN":
-                    newcaps.append(cap)
-            if newcaps:
-                value = ','.join(newcaps)
-                self.update_tag('capabilities', value, None, 'admin')
-            else:
-                tag.delete()
-        return
-
-class Link:
-    def __init__(self, end1, end2, bps = 1000 * 1000000, parent = None):
-        self.end1 = end1
-        self.end2 = end2
-        self.bps = bps
-        self.parent = parent
-        self.children = []
-
-        end1.add_link(self)
-        end2.add_link(self)
-        
-        if self.parent:
-            self.parent.children.append(self)
-            
-    def toxml(self, xml):
-        end_ids = "%s %s" % (self.end1.idtag, self.end2.idtag)
-
-        if self.parent:
-            with  xml.vlink(endpoints=end_ids):
-                with xml.description:
-                    xml << "%s -- %s" % (self.end1.name, self.end2.name)
-                with xml.kbps:
-                    xml << str(int(self.bps/1000))
-        else:
-            with xml.link(endpoints=end_ids):
-                with xml.description:
-                    xml << "%s -- %s" % (self.end1.name, self.end2.name)
-                with xml.bw_unallocated(units="kbps"):
-                    xml << str(int(self.bps/1000))
-                for child in self.children:
-                    child.toxml(xml)
-        
-
-
-class ViniNetwork(Network):
-    def __init__(self, api, type = "SFA"):
-        Network.__init__(self, api, type)
-        self.sitelinks = []
-        self.nodelinks = []
-    
-        for (s1, s2) in PhysicalLinks:
-            self.sitelinks.append(Link(self.sites[s1], self.sites[s2]))
-        
-        for t in self.tags:
-            tag = self.tags[t]
-            if tag.tagname == 'topo_rspec':
-                node1 = self.nodes[tag.node_id]
-                l = eval(tag.value)
-                for (id, realip, bw, lvip, rvip, vnet) in l:
-                    allocbps = get_tc_rate(bw)
-                    node1.bps -= allocbps
-                    try:
-                        node2 = self.nodes[id]
-                        if node1.id < node2.id:
-                            sl = node1.get_sitelink(node2)
-                            sl.bps -= allocbps
-                    except:
-                        pass
-
-    def lookupSiteLink(self, node1, node2):
-        site1 = self.sites[node1.site_id]
-        site2 = self.sites[node2.site_id]
-        for link in self.sitelinks:
-            if site1 == link.end1 and site2 == link.end2:
-                return link
-            if site2 == link.end1 and site1 == link.end2:
-                return link
-        return None
-    
-
-    """
-    Check the requested topology against the available topology and capacity
-    """
-    def verifyTopology(self):
-        for link in self.nodelinks:
-            if link.bps <= 0:
-                raise InvalidRSpec("must request positive bandwidth")
-                
-            n1 = link.end1
-            n2 = link.end2
-            sitelink = self.lookupSiteLink(n1, n2)
-            if not sitelink:
-                raise InvalidRSpec("nodes %s and %s are not adjacent" % 
-                                   (n1.idtag, n2.idtag))
-            if sitelink.bps < link.bps:
-                raise InvalidRSpec("not enough capacity between %s and %s" % 
-                                   (n1.idtag, n2.idtag))
-                
-    def __add_vlink(self, vlink, parent = None):
-        n1 = n2 = None
-        endpoints = vlink.get("endpoints")
-        if endpoints:
-            (end1, end2) = endpoints.split()
-            n1 = self.lookupNode(end1)
-            n2 = self.lookupNode(end2)
-        elif parent:
-            """ Try to infer the endpoints for the virtual link """
-            site_endpoints = parent.get("endpoints")
-            (n1, n2) = self.__infer_endpoints(site_endpoints)
-        else:
-            raise InvalidRSpec("no endpoints given")
-
-        #print "Added virtual link: %s -- %s" % (n1.tag, n2.tag)
-        bps = int(vlink.findtext("kbps")) * 1000
-        sitelink = self.lookupSiteLink(n1, n2)
-        if not sitelink:
-            raise InvalidRSpec("nodes %s and %s are not adjacent" % 
-                                  (n1.idtag, n2.idtag))
-        self.nodelinks.append(Link(n1, n2, bps, sitelink))
-        return
-
-    """ 
-    Infer the endpoints of the virtual link.  If the slice exists on 
-    only a single node at each end of the physical link, we'll assume that
-    the user wants the virtual link to terminate at these nodes.
-    """
-    def __infer_endpoints(self, endpoints):
-        n = []
-        ends = endpoints.split()
-        for end in ends:
-            found = 0
-            site = self.lookupSite(end)
-            for id in site.node_ids:
-                if id in self.nodedict:
-                    n.append(self.nodedict[id])
-                    found += 1
-            if found != 1:
-                raise InvalidRSpec("could not infer endpoint for site %s" % 
-                                   site.idtag)
-        #print "Inferred endpoints: %s %s" % (n[0].idtag, n[1].idtag)
-        return n
-        
-    def addRSpec(self, xml, schema = None):
-        Network.addRSpec(self, xml, schema)
-        self.nodedict = {}
-        for node in self.nodesWithSlivers():
-            self.nodedict[node.id] = node
-        
-        # Find vlinks under link elements
-        for vlink in self.rspec.iterfind("./network/link/vlink"):
-            link = vlink.getparent()
-            self.__add_vlink(vlink, link)
-
-        # Find vlinks that specify endpoints
-        for vlink in self.rspec.iterfind("./request/vlink[@endpoints]"):
-            self.__add_vlink(vlink)
-
-
-    def addSlice(self):
-        Network.addSlice(self)
-
-        for node in self.slice.get_nodes():
-            linktag = self.slice.get_tag('topo_rspec', node)
-            if linktag:
-                l = eval(linktag.value)
-                for (id, realip, bw, lvip, rvip, vnet) in l:
-                    if node.id < id:
-                        bps = get_tc_rate(bw)
-                        remote = self.lookupNode(id)
-                        sitelink = self.lookupSiteLink(node, remote)
-                        self.nodelinks.append(Link(node,remote,bps,sitelink))
-
-
-    def updateSliceTags(self):
-        slice = self.slice
-
-        tag = slice.update_tag('vini_topo', 'manual', None, 'admin')
-        slice.assign_egre_key()
-        slice.turn_on_netns()
-        slice.add_cap_net_admin()
-
-        for node in self.nodesWithSlivers():
-            linkdesc = []
-            for link in node.links:
-                linkdesc.append(node.get_topo_rspec(link))
-            if linkdesc:
-                topo_str = "%s" % linkdesc
-                tag = slice.update_tag('topo_rspec', topo_str, node, 'admin')
-
-        # Expire the un-updated topo_rspec tags
-        for tag in self.getSliceTags():
-            if tag.tagname in ['topo_rspec']:
-                if not tag.was_updated():
-                    tag.delete()
-
-        Network.updateSliceTags(self)
-
-    """
-    Find a free EGRE key
-    """
-    def free_egre_key(self):
-        used = set()
-        for tag in self.getSliceTags():
-            if tag.tagname == 'egre_key':
-                used.add(int(tag.value))
-                
-        for i in range(1, 256):
-            if i not in used:
-                key = i
-                break
-        else:
-            raise KeyError("No more EGRE keys available")
-        
-        return str(key)
-
-    """
-    Produce XML directly from the topology specification.
-    """
-    def toxml(self):
-        xml = XMLBuilder(format = True, tab_step = "  ")
-        with xml.RSpec(type=self.type):
-            if self.slice:
-                element = xml.network(name=self.api.hrn, slice=self.slice.hrn)
-            else:
-                element = xml.network(name=self.api.hrn)
-
-            with element:
-                if self.slice:
-                    self.slice.toxml(xml)
-                for site in self.getSites():
-                    site.toxml(xml)
-                for link in self.sitelinks:
-                    link.toxml(xml)
-
-        header = '<?xml version="1.0"?>\n'
-        return header + str(xml)
-
-    """
-    Create a dictionary of ViniSite objects keyed by site ID
-    """
-    def get_sites(self, api):
-        tmp = []
-        for site in api.plshell.GetSites(api.plauth, {'peer_id': None}):
-            t = site['site_id'], ViniSite(self, site)
-            tmp.append(t)
-        return dict(tmp)
-
-
-    """
-    Create a dictionary of ViniNode objects keyed by node ID
-    """
-    def get_nodes(self, api):
-        tmp = []
-        for node in api.plshell.GetNodes(api.plauth, {'peer_id': None}):
-            t = node['node_id'], ViniNode(self, node)
-            tmp.append(t)
-        return dict(tmp)
-
-    """
-    Return a ViniSlice object for a single slice
-    """
-    def get_slice(self, api, hrn):
-        slicename = hrn_to_pl_slicename(hrn)
-        slice = api.plshell.GetSlices(api.plauth, [slicename])
-        if slice:
-            self.slice = ViniSlice(self, slicename, slice[0])
-            return self.slice
-        else:
-            return None
-
-
-    
index 7c1bf8a..74ce9de 100644 (file)
@@ -1,5 +1,3 @@
-### $Id: register.py 16477 2010-01-05 16:31:37Z thierry $
-### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/methods/register.py $
 
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
@@ -45,6 +43,4 @@ class CreateGid(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, xrn, self.name))
 
-        manager = self.api.get_interface_manager()
-
-        return manager.create_gid(self.api, xrn, cert)
+        return self.api.manager.create_gid(self.api, xrn, cert)
index 7895de3..dd76d9b 100644 (file)
@@ -1,9 +1,10 @@
-from sfa.util.faults import *
+from sfa.util.faults import SfaInvalidArgument
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
 from sfa.util.sfatablesRuntime import run_sfatables
 from sfa.trust.credential import Credential
+from sfa.rspecs.rspec import RSpec
 
 class CreateSliver(Method):
     """
@@ -42,8 +43,6 @@ class CreateSliver(Method):
             msg = "'users' musst be specified and cannot be null. You may need to update your client." 
             raise SfaInvalidArgument(name='users', extra=msg)  
 
-        manager = self.api.get_interface_manager()
-        
         # flter rspec through sfatables
         if self.api.interface in ['aggregate']:
             chain_name = 'INCOMING'
@@ -51,5 +50,9 @@ class CreateSliver(Method):
             chain_name = 'FORWARD-INCOMING'
         self.api.logger.debug("CreateSliver: sfatables on chain %s"%chain_name)
         rspec = run_sfatables(chain_name, hrn, origin_hrn, rspec)
-
-        return manager.CreateSliver(self.api, slice_xrn, creds, rspec, users, call_id)
+        slivers = RSpec(rspec).version.get_nodes_with_slivers()
+        if slivers:
+            result = self.api.manager.CreateSliver(self.api, slice_xrn, creds, rspec, users, call_id)
+        else:
+            result = rspec     
+        return result
index ae30177..58b8846 100644 (file)
@@ -1,4 +1,3 @@
-from sfa.util.faults import *
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
@@ -33,7 +32,6 @@ class DeleteSliver(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
 
-        manager = self.api.get_interface_manager() 
-        manager.DeleteSliver(self.api, xrn, creds, call_id)
+        self.api.manager.DeleteSliver(self.api, xrn, creds, call_id)
  
         return 1 
index 34a4cb9..b645cd4 100644 (file)
@@ -1,6 +1,3 @@
-#
-from sfa.trust.rights import *
-from sfa.util.faults import *
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
@@ -45,7 +42,5 @@ class GetCredential(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))        
 
-        manager = self.api.get_interface_manager()
-        
-        return manager.get_credential(self.api, xrn, type)
+        return self.api.manager.get_credential(self.api, xrn, type)
 
index 37ad796..601db6f 100644 (file)
@@ -1,9 +1,6 @@
-from sfa.util.faults import *
+from sfa.util.faults import RecordNotFound
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
-from sfa.trust.auth import Auth
-from sfa.trust.gid import GID
-from sfa.trust.certificate import Certificate
 from sfa.trust.credential import Credential
 
 class GetGids(Method):
@@ -30,15 +27,14 @@ class GetGids(Method):
     def call(self, xrns, creds):
         # validate the credential
         valid_creds = self.api.auth.checkCredentials(creds, 'getgids')
+        # xxxpylintxxx origin_hrn is unused..
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         
         # resolve the record
-        manager = self.api.get_interface_manager()
-        records = manager.resolve(self.api, xrns, full = False)
+        records = self.api.manager.resolve(self.api, xrns, full = False)
         if not records:
-            raise RecordNotFound(hrns)
+            raise RecordNotFound(xrns)
 
-        gids = []
         allowed_fields =  ['hrn', 'type', 'gid']
         for record in records:
             for key in record.keys():
index 6a8261c..37f2e7b 100644 (file)
@@ -1,12 +1,10 @@
 
-from sfa.util.faults import *
+from sfa.util.faults import RecordNotFound, ConnectionKeyGIDMismatch
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
 from sfa.util.record import SfaRecord
-from sfa.trust.credential import Credential
 from sfa.trust.certificate import Certificate
-from sfa.trust.rights import Right, Rights
 
 class GetSelfCredential(Method):
     """
@@ -54,10 +52,9 @@ class GetSelfCredential(Method):
         origin_hrn = Certificate(string=cert).get_subject()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
         
-        manager = self.api.get_interface_manager()
  
         # authenticate the gid
-        records = manager.resolve(self.api, xrn, type)
+        records = self.api.manager.resolve(self.api, xrn, type)
         if not records:
             raise RecordNotFound(hrn)
         record = SfaRecord(dict=records[0])
@@ -74,4 +71,4 @@ class GetSelfCredential(Method):
                     self.api.logger.debug("ConnectionKeyGIDMismatch, %s filename: %s"%(name,obj.filename))
             raise ConnectionKeyGIDMismatch(gid.get_subject())
         
-        return manager.get_credential(self.api, xrn, type, is_self=True)
+        return self.api.manager.get_credential(self.api, xrn, type, is_self=True)
index e175cfe..c21ce16 100644 (file)
@@ -1,15 +1,10 @@
-### $Id: get_ticket.py 17732 2010-04-19 21:10:45Z tmack $
-### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/methods/get_ticket.py $
-import time
-from sfa.util.faults import *
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
-from sfa.trust.auth import Auth
-from sfa.util.config import Config
-from sfa.trust.credential import Credential
 from sfa.util.sfatablesRuntime import run_sfatables
 
+from sfa.trust.credential import Credential
+
 class GetTicket(Method):
     """
     Retrieve a ticket. This operation is currently implemented on PLC
@@ -48,8 +43,6 @@ class GetTicket(Method):
         #log the call
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
 
-        manager = self.api.get_interface_manager()
-
         # filter rspec through sfatables
         if self.api.interface in ['aggregate']:
             chain_name = 'OUTGOING'
@@ -58,7 +51,5 @@ class GetTicket(Method):
         rspec = run_sfatables(chain_name, hrn, origin_hrn, rspec)
         
         # remove nodes that are not available at this interface from the rspec
-        ticket = manager.get_ticket(self.api, xrn, creds, rspec, users)
-        
-        return ticket
+        return self.api.manager.get_ticket(self.api, xrn, creds, rspec, users)
         
index 2a8f6b2..8c215c4 100644 (file)
@@ -1,4 +1,3 @@
-from sfa.util.faults import *
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter
 
@@ -14,5 +13,4 @@ class GetVersion(Method):
 
     def call(self):
         self.api.logger.info("interface: %s\tmethod-name: %s" % (self.api.interface, self.name))
-        manager = self.api.get_interface_manager()
-        return manager.GetVersion(self.api)
+        return self.api.manager.GetVersion(self.api)
index 8b4fcbe..ccd66b4 100644 (file)
@@ -1,5 +1,4 @@
 
-from sfa.util.faults import *
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
@@ -32,5 +31,4 @@ class List(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
        
-        manager = self.api.get_interface_manager()
-        return manager.list(self.api, xrn) 
+        return self.api.manager.list(self.api, xrn) 
index fb83117..a12447f 100644 (file)
@@ -1,7 +1,5 @@
-import sys
 import zlib
 
-from sfa.util.faults import *
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
@@ -38,9 +36,7 @@ class ListResources(Method):
         origin_hrn = options.get('origin_hrn', None)
         if not origin_hrn:
             origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
-        # get manager for this interface    
-        manager = self.api.get_interface_manager()
-        rspec = manager.ListResources(self.api, creds, options, call_id)
+        rspec = self.api.manager.ListResources(self.api, creds, options, call_id)
 
         # filter rspec through sfatables 
         if self.api.interface in ['aggregate']:
index fa65b07..e8521c1 100644 (file)
@@ -1,7 +1,5 @@
-from sfa.util.faults import *
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
-from sfa.trust.auth import Auth
 from sfa.trust.credential import Credential
  
 class ListSlices(Method):
@@ -29,6 +27,5 @@ class ListSlices(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, self.name))
 
-        manager = self.api.get_interface_manager() 
-        return manager.ListSlices(self.api, creds, call_id)
+        return self.api.manager.ListSlices(self.api, creds, call_id)
  
index f66e90c..c6a75e2 100644 (file)
@@ -1,7 +1,3 @@
-### $Id: reset_slice.py 15428 2009-10-23 15:28:03Z tmack $
-### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfacomponent/methods/reset_slice.py $
-import xmlrpclib
-from sfa.util.faults import *
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
 
@@ -27,8 +23,6 @@ class RedeemTicket(Method):
         valid_creds = self.api.auth.checkCredentials(cred, 'redeemticket')
         self.api.auth.check_ticket(ticket)
 
-        
         # send the call to the right manager
-        manager = self.api.get_interface_manager()
-        manager.redeem_ticket(self.api, ticket) 
+        self.api.manager.redeem_ticket(self.api, ticket) 
         return 1 
index 6f61870..92f230a 100644 (file)
@@ -1,14 +1,5 @@
-### $Id: register.py 16477 2010-01-05 16:31:37Z thierry $
-### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/methods/register.py $
-
-from sfa.trust.certificate import Keypair, convert_public_key
-from sfa.trust.gid import *
-from sfa.util.faults import *
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
-from sfa.util.record import SfaRecord
-from sfa.trust.auth import Auth
-from sfa.trust.gid import create_uuid
 from sfa.trust.credential import Credential
 
 class Register(Method):
@@ -45,6 +36,4 @@ class Register(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
         
-        manager = self.api.get_interface_manager()
-
-        return manager.register(self.api, record)
+        return self.api.manager.register(self.api, record)
index 864b1d5..2eec2f5 100644 (file)
@@ -1,17 +1,9 @@
-### $Id: register.py 15001 2009-09-11 20:18:54Z tmack $
-### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/methods/register.py $
-
-from sfa.trust.certificate import Keypair, convert_public_key
-from sfa.trust.gid import *
-
-from sfa.util.faults import *
+from sfa.util.faults import SfaInvalidArgument
 from sfa.util.xrn import get_authority
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
 from sfa.util.record import SfaRecord
 from sfa.util.table import SfaTable
-from sfa.trust.auth import Auth
-from sfa.trust.gid import create_uuid
 from sfa.trust.credential import Credential
 
 class RegisterPeerObject(Method):
index 73437a3..98ef38b 100644 (file)
@@ -1,4 +1,3 @@
-from sfa.util.faults import *
 from sfa.util.xrn import Xrn
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
@@ -40,6 +39,4 @@ class Remove(Method):
         self.api.logger.info("interface: %s\tmethod-name: %s\tcaller-hrn: %s\ttarget-urn: %s"%(
                 self.api.interface, self.name, origin_hrn, xrn.get_urn()))
 
-        manager = self.api.get_interface_manager()
-
-        return manager.remove(self.api, xrn) 
+        return self.api.manager.remove(self.api, xrn) 
index 460aa98..c831924 100644 (file)
@@ -1,11 +1,8 @@
-from sfa.util.faults import *
+from sfa.util.faults import UnknownSfaType, SfaInvalidArgument
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
-from sfa.trust.auth import Auth
-from sfa.util.record import SfaRecord
 from sfa.util.table import SfaTable
 from sfa.trust.credential import Credential
-from types import StringTypes
 
 class RemovePeerObject(Method):
     """
@@ -58,33 +55,33 @@ class RemovePeerObject(Method):
     def remove_plc_record(self, record):
         type = record['type']        
         if type == "user":
-            persons = self.api.plshell.GetPersons(self.api.plauth, {'person_id' : record['pointer']})
+            persons = self.api.driver.GetPersons({'person_id' : record['pointer']})
             if not persons:
                 return 1
             person = persons[0]
             if person['peer_id']:
                 peer = self.get_peer_name(person['peer_id']) 
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'person', person['person_id'], peer)
-            self.api.plshell.DeletePerson(self.api.plauth, person['person_id'])
+                self.api.driver.UnBindObjectFromPeer('person', person['person_id'], peer)
+            self.api.driver.DeletePerson(person['person_id'])
            
         elif type == "slice":
-            slices=self.api.plshell.GetSlices(self.api.plauth, {'slice_id' : record['pointer']})
+            slices=self.api.driver.GetSlices({'slice_id' : record['pointer']})
             if not slices:
                 return 1
             slice=slices[0]
             if slice['peer_id']:
                 peer = self.get_peer_name(slice['peer_id']) 
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', slice['slice_id'], peer)
-            self.api.plshell.DeleteSlice(self.api.plauth, slice['slice_id'])
+                self.api.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
+            self.api.driver.DeleteSlice(slice['slice_id'])
         elif type == "authority":
-            sites=self.api.plshell.GetSites(self.api.plauth, {'site_id' : record['pointer']})
+            sites=self.api.driver.GetSites({'site_id' : record['pointer']})
             if not sites:
                 return 1
             site=sites[0]
             if site['peer_id']:
                 peer = self.get_peer_name(site['peer_id']) 
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'site', site['site_id'], peer)
-            self.api.plshell.DeleteSite(self.api.plauth, site['site_id'])
+                self.api.driver.UnBindObjectFromPeer('site', site['site_id'], peer)
+            self.api.driver.DeleteSite(site['site_id'])
            
         else:
             raise UnknownSfaType(type)
@@ -92,7 +89,7 @@ class RemovePeerObject(Method):
         return 1
 
     def get_peer_name(self, peer_id):
-        peers = self.api.plshell.GetPeers(self.api.plauth, [peer_id], ['peername', 'shortname', 'hrn_root'])
+        peers = self.api.driver.GetPeers([peer_id], ['peername', 'shortname', 'hrn_root'])
         if not peers:
             raise SfaInvalidArgument, "No such peer"
         peer = peers[0]
index 1669517..b34d424 100644 (file)
@@ -1,10 +1,11 @@
-from sfa.util.faults import *
+import datetime
+
+from sfa.util.faults import InsufficientRights
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter
 from sfa.trust.credential import Credential
 from sfa.util.sfatime import utcparse
-import datetime
 
 class RenewSliver(Method):
     """
@@ -39,6 +40,5 @@ class RenewSliver(Method):
             raise InsufficientRights('Renewsliver: Credential expires before requested expiration time')
         if requested_time > datetime.datetime.utcnow() + datetime.timedelta(days=max_renew_days):
             raise Exception('Cannot renew > %s days from now' % max_renew_days)
-        manager = self.api.get_interface_manager()
-        return manager.RenewSliver(self.api, slice_xrn, valid_creds, expiration_time, call_id)    
+        return self.api.manager.RenewSliver(self.api, slice_xrn, valid_creds, expiration_time, call_id)    
     
index 49104b2..6277e1b 100644 (file)
@@ -1,8 +1,5 @@
-### $Id: resolve.py 17157 2010-02-21 04:19:34Z tmack $
-### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/methods/resolve.py $
-import traceback
 import types
-from sfa.util.faults import *
+
 from sfa.util.xrn import Xrn, urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
@@ -43,8 +40,5 @@ class Resolve(Method):
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrns, self.name))
  
         # send the call to the right manager
-        manager = self.api.get_interface_manager()
-        return manager.resolve(self.api, xrns, type)
-
-
+        return self.api.manager.resolve(self.api, xrns, type)
             
index c6e5272..d978197 100644 (file)
@@ -1,7 +1,5 @@
-from sfa.util.faults import *
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter
-from sfa.trust.credential import Credential
 
 class ResolveGENI(Method):
     """
index 00142b6..9788608 100644 (file)
@@ -1,5 +1,3 @@
-from sfa.util.faults import *
-from sfa.util.method import Method
 from sfa.util.parameter import Parameter
 from sfa.methods.Stop import Stop
 
index 231bec5..2f4bc0c 100644 (file)
@@ -1,4 +1,3 @@
-from sfa.util.faults import *
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
@@ -25,8 +24,7 @@ class SliverStatus(Method):
 
         self.api.logger.info("interface: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, hrn, self.name))
     
-        manager = self.api.get_interface_manager()
-        status = manager.SliverStatus(self.api, hrn, valid_creds, call_id)
+        status = self.api.manager.SliverStatus(self.api, hrn, valid_creds, call_id)
 
         return status
     
index e1ca60e..7f8aefd 100644 (file)
@@ -1,11 +1,6 @@
-### $Id: stop_slice.py 17732 2010-04-19 21:10:45Z tmack $
-### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/methods/stop_slice.py $
-
-from sfa.util.faults import *
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
-from sfa.trust.auth import Auth
 from sfa.trust.credential import Credential
 
 class Start(Method):
@@ -35,7 +30,6 @@ class Start(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
 
-        manager = self.api.get_interface_manager() 
-        manager.start_slice(self.api, xrn, creds)
+        self.api.manager.start_slice(self.api, xrn, creds)
  
         return 1 
index 579a77d..48974ab 100644 (file)
@@ -1,11 +1,6 @@
-### $Id: stop_slice.py 17732 2010-04-19 21:10:45Z tmack $
-### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/methods/stop_slice.py $
-
-from sfa.util.faults import *
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
-from sfa.trust.auth import Auth
 from sfa.trust.credential import Credential
  
 class Stop(Method):
@@ -35,7 +30,6 @@ class Stop(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
 
-        manager = self.api.get_interface_manager() 
-        manager.stop_slice(self.api, xrn, creds)
+        self.api.manager.stop_slice(self.api, xrn, creds)
  
         return 1 
index aa881ea..a90a44e 100644 (file)
@@ -1,7 +1,5 @@
-import time
-from sfa.util.faults import *
 from sfa.util.method import Method
-from sfa.util.parameter import Parameter, Mixed
+from sfa.util.parameter import Parameter
 from sfa.trust.credential import Credential
 
 class Update(Method):
@@ -37,7 +35,5 @@ class Update(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
        
-        manager = self.api.get_interface_manager()
-        return manager.update(self.api, record_dict)
+        return self.api.manager.update(self.api, record_dict)
 
index 8307572..f9baae4 100644 (file)
@@ -1,5 +1,3 @@
-from sfa.util.faults import *
-from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
 from sfa.methods.CreateSliver import CreateSliver
 
index 59d6001..23c8d60 100644 (file)
@@ -1,9 +1,6 @@
-from types import StringTypes
-from sfa.util.faults import *
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
-from sfa.trust.auth import Auth
 from sfa.server.aggregate import Aggregates
 
 class get_aggregates(Method):
index 9cec0ec..638332e 100644 (file)
@@ -1,11 +1,10 @@
 import os
 import tempfile
 import commands
-from sfa.util.faults import *
+from sfa.util.faults import NonExistingRecord, RecordNotFound
 from sfa.util.xrn import hrn_to_urn
 from sfa.util.method import Method
-from sfa.util.parameter import Parameter, Mixed
-from sfa.trust.auth import Auth
+from sfa.util.parameter import Parameter
 from sfa.util.table import SfaTable
 from sfa.trust.certificate import Keypair
 from sfa.trust.gid import create_uuid
@@ -26,10 +25,10 @@ class get_key(Method):
         # verify that the callers's ip address exist in the db and is an inteface
         # for a node in the db
         (ip, port) = self.api.remote_addr
-        interfaces = self.api.plshell.GetInterfaces(self.api.plauth, {'ip': ip}, ['node_id'])
+        interfaces = self.api.driver.GetInterfaces({'ip': ip}, ['node_id'])
         if not interfaces:
             raise NonExistingRecord("no such ip %(ip)s" % locals())
-        nodes = self.api.plshell.GetNodes(self.api.plauth, [interfaces[0]['node_id']], ['node_id', 'hostname'])
+        nodes = self.api.driver.GetNodes([interfaces[0]['node_id']], ['node_id', 'hostname'])
         if not nodes:
             raise NonExistingRecord("no such node using ip %(ip)s" % locals())
         node = nodes[0]
index b404bb9..65d9444 100644 (file)
@@ -1,9 +1,6 @@
-from types import StringTypes
-from sfa.util.faults import *
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
-from sfa.trust.auth import Auth
 from sfa.server.registry import Registries
 
 class get_registries(Method):
@@ -28,7 +25,7 @@ class get_registries(Method):
     def call(self, cred, xrn = None):
         hrn, type = urn_to_hrn(xrn)
         self.api.auth.check(cred, 'list')
-        registries = Registries(self.api).interfaces.values()
+        registries = Registries(self.api).values()
         if hrn:
             registries = [reg for reg in registries if reg['hrn'] == hrn] 
         return registries
index 704fd42..460ab4d 100644 (file)
@@ -1,4 +1,4 @@
-from sfa.util.faults import *
+#from sfa.util.faults import *
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
 from sfa.trust.auth import Auth
index e89f18b..42ef240 100644 (file)
@@ -1,17 +1,10 @@
-### $Id: register.py 15001 2009-09-11 20:18:54Z tmack $
-### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/methods/register.py $
 
-from sfa.trust.certificate import Keypair, convert_public_key
-from sfa.trust.gid import *
-
-from sfa.util.faults import *
+from sfa.util.faults import SfaInvalidArgument
 from sfa.util.xrn import get_authority
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
 from sfa.util.record import SfaRecord
 from sfa.util.table import SfaTable
-from sfa.trust.auth import Auth
-from sfa.trust.gid import create_uuid
 from sfa.trust.credential import Credential
 
 class register_peer_object(Method):
index 41d74dc..465ed05 100644 (file)
@@ -1,11 +1,8 @@
-from sfa.util.faults import *
+from sfa.util.faults import UnknownSfaType, SfaInvalidArgument
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
-from sfa.trust.auth import Auth
-from sfa.util.record import SfaRecord
 from sfa.util.table import SfaTable
 from sfa.trust.credential import Credential
-from types import StringTypes
 
 class remove_peer_object(Method):
     """
@@ -58,33 +55,33 @@ class remove_peer_object(Method):
     def remove_plc_record(self, record):
         type = record['type']        
         if type == "user":
-            persons = self.api.plshell.GetPersons(self.api.plauth, {'person_id' : record['pointer']})
+            persons = self.api.driver.GetPersons({'person_id' : record['pointer']})
             if not persons:
                 return 1
             person = persons[0]
             if person['peer_id']:
                 peer = self.get_peer_name(person['peer_id']) 
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'person', person['person_id'], peer)
-            self.api.plshell.DeletePerson(self.api.plauth, person['person_id'])
+                self.api.driver.UnBindObjectFromPeer('person', person['person_id'], peer)
+            self.api.driver.DeletePerson(person['person_id'])
            
         elif type == "slice":
-            slices=self.api.plshell.GetSlices(self.api.plauth, {'slice_id' : record['pointer']})
+            slices=self.api.driver.GetSlices({'slice_id' : record['pointer']})
             if not slices:
                 return 1
             slice=slices[0]
             if slice['peer_id']:
                 peer = self.get_peer_name(slice['peer_id']) 
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', slice['slice_id'], peer)
-            self.api.plshell.DeleteSlice(self.api.plauth, slice['slice_id'])
+                self.api.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
+            self.api.driver.DeleteSlice(slice['slice_id'])
         elif type == "authority":
-            sites=self.api.plshell.GetSites(self.api.plauth, {'site_id' : record['pointer']})
+            sites=self.api.driver.GetSites({'site_id' : record['pointer']})
             if not sites:
                 return 1
             site=sites[0]
             if site['peer_id']:
                 peer = self.get_peer_name(site['peer_id']) 
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'site', site['site_id'], peer)
-            self.api.plshell.DeleteSite(self.api.plauth, site['site_id'])
+                self.api.driver.UnBindObjectFromPeer('site', site['site_id'], peer)
+            self.api.driver.DeleteSite(site['site_id'])
            
         else:
             raise UnknownSfaType(type)
@@ -92,7 +89,7 @@ class remove_peer_object(Method):
         return 1
 
     def get_peer_name(self, peer_id):
-        peers = self.api.plshell.GetPeers(self.api.plauth, [peer_id], ['peername', 'shortname', 'hrn_root'])
+        peers = self.api.driver.GetPeers([peer_id], ['peername', 'shortname', 'hrn_root'])
         if not peers:
             raise SfaInvalidArgument, "No such peer"
         peer = peers[0]
index 9d02364..387981d 100644 (file)
@@ -1,12 +1,6 @@
-### $Id: reset_slices.py 15428 2009-10-23 15:28:03Z tmack $
-### $URL: https://svn.planet-lab.org/svn/sfa/trunk/sfa/methods/reset_slices.py $
-
-from sfa.util.faults import *
 from sfa.util.xrn import urn_to_hrn
 from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
-from sfa.trust.auth import Auth
-from sfa.plc.slices import Slices
 
 class reset_slice(Method):
     """
@@ -31,22 +25,5 @@ class reset_slice(Method):
     def call(self, cred, xrn, origin_hrn=None):
         hrn, type = urn_to_hrn(xrn)
         self.api.auth.check(cred, 'resetslice', hrn)
-        # send the call to the right manager
-        manager_base = 'sfa.managers'
-        if self.api.interface in ['component']:
-            mgr_type = self.api.config.SFA_CM_TYPE
-            manager_module = manager_base + ".component_manager_%s" % mgr_type
-            manager = __import__(manager_module, fromlist=[manager_base])
-            manager.reset_slice(self.api, xrn)
-        elif self.api.interface in ['aggregate']:
-            mgr_type = self.api.config.SFA_AGGREGATE_TYPE
-            manager_module = manager_base + ".aggregate_manager_%s" % mgr_type
-            manager = __import__(manager_module, fromlist=[manager_base])
-            manager.reset_slice(self.api, xrn)
-        elif self.api.interface in ['slicemgr']:
-            mgr_type = self.api.config.SFA_SM_TYPE
-            manager_module = manager_base + ".slice_manager_%s" % mgr_type
-            manager = __import__(manager_module, fromlist=[manager_base])
-            manager.reset_slice(self.api, xrn) 
-
+        self.api.manager.reset_slice (self.api, xrn)
         return 1 
index 83f9324..bda3bbc 100644 (file)
@@ -1,23 +1,23 @@
 #!/usr/bin/python
-from sfa.util.xrn import *
-from sfa.util.plxrn import *
-#from sfa.rspecs.sfa_rspec import SfaRSpec
-#from sfa.rspecs.pg_rspec  import PGRSpec
-#from sfa.rspecs.rspec_version import RSpecVersion
+from sfa.util.xrn import hrn_to_urn, urn_to_hrn, urn_to_sliver_id
+from sfa.util.plxrn import PlXrn, hostname_to_urn, hrn_to_pl_slicename
+
 from sfa.rspecs.rspec import RSpec
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.link import Link
+from sfa.rspecs.elements.login import Login
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.services import Services
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.util.topology import Topology
 from sfa.rspecs.version_manager import VersionManager
-from sfa.util.bwlimit import get_tc_rate
+from sfa.plc.vlink import get_tc_rate
 
 class Aggregate:
 
     api = None
-    sites = {}
-    nodes = {}
-    interfaces = {}
-    links = {}
-    node_tags = {}
-    pl_initscripts = {} 
-    prepared=False
     #panos new user options variable
     user_options = {}
 
@@ -25,138 +25,214 @@ class Aggregate:
         self.api = api
         self.user_options = user_options
 
-    def prepare_sites(self, force=False):
-        if not self.sites or force:  
-            for site in self.api.plshell.GetSites(self.api.plauth):
-                self.sites[site['site_id']] = site
-    
-    def prepare_nodes(self, force=False):
-        if not self.nodes or force:
-            for node in self.api.plshell.GetNodes(self.api.plauth, {'peer_id': None}):
-                # add site/interface info to nodes.
-                # assumes that sites, interfaces and tags have already been prepared.
-                site = self.sites[node['site_id']]
-                interfaces = [self.interfaces[interface_id] for interface_id in node['interface_ids']]
-                tags = [self.node_tags[tag_id] for tag_id in node['node_tag_ids']]
-                node['network'] = self.api.hrn
-                node['network_urn'] = hrn_to_urn(self.api.hrn, 'authority+am')
-                node['urn'] = hostname_to_urn(self.api.hrn, site['login_base'], node['hostname'])
-                node['site_urn'] = hrn_to_urn(PlXrn.site_hrn(self.api.hrn, site['login_base']), 'authority+sa')
-                node['site'] = site
-                node['interfaces'] = interfaces
-                node['tags'] = tags
-                self.nodes[node['node_id']] = node
-
-    def prepare_interfaces(self, force=False):
-        if not self.interfaces or force:
-            for interface in self.api.plshell.GetInterfaces(self.api.plauth):
-                self.interfaces[interface['interface_id']] = interface
-
-    def prepare_links(self, force=False):
-        if not self.links or force:
-            pass
-
-    def prepare_node_tags(self, force=False):
-        if not self.node_tags or force:
-            for node_tag in self.api.plshell.GetNodeTags(self.api.plauth):
-                self.node_tags[node_tag['node_tag_id']] = node_tag
-
-    def prepare_pl_initscripts(self, force=False):
-        if not self.pl_initscripts or force:
-            for initscript in self.api.plshell.GetInitScripts(self.api.plauth, {'enabled': True}):
-                self.pl_initscripts[initscript['initscript_id']] = initscript
-
-    def prepare(self, force=False):
-        if not self.prepared or force:
-            self.prepare_sites(force)
-            self.prepare_interfaces(force)
-            self.prepare_node_tags(force)
-            self.prepare_nodes(force)
-            self.prepare_links(force)
-            self.prepare_pl_initscripts()
-        self.prepared = True  
-
+    def get_sites(self, filter={}):
+        sites = {}
+        for site in self.api.driver.GetSites(filter):
+            sites[site['site_id']] = site
+        return sites
+
+    def get_interfaces(self, filter={}):
+        interfaces = {}
+        for interface in self.api.driver.GetInterfaces(filter):
+            iface = Interface()
+            iface['interface_id'] = interface['interface_id']
+            iface['node_id'] = interface['node_id']
+            iface['ipv4'] = interface['ip']
+            iface['bwlimit'] = interface['bwlimit']
+            interfaces[iface['interface_id']] = iface
+        return interfaces
+
+    def get_links(self, filter={}):
+        
+        if not self.api.config.SFA_AGGREGATE_TYPE.lower() == 'vini':
+            return []
+
+        topology = Topology() 
+        links = {}
+        for (site_id1, site_id2) in topology:
+            link = Link()
+            if not site_id1 in self.sites or site_id2 not in self.sites:
+                continue
+            site1 = self.sites[site_id1]
+            site2 = self.sites[site_id2]
+            # get hrns
+            site1_hrn = self.api.hrn + '.' + site1['login_base']
+            site2_hrn = self.api.hrn + '.' + site2['login_base']
+            # get the first node
+            node1 = self.nodes[site1['node_ids'][0]]
+            node2 = self.nodes[site2['node_ids'][0]]
+
+            # set interfaces
+            # just get first interface of the first node
+            if1_xrn = PlXrn(auth=self.api.hrn, interface='node%s:eth0' % (node1['node_id']))
+            if1_ipv4 = self.interfaces[node1['interface_ids'][0]]['ip']
+            if2_xrn = PlXrn(auth=self.api.hrn, interface='node%s:eth0' % (node2['node_id']))
+            if2_ipv4 = self.interfaces[node2['interface_ids'][0]]['ip']
+
+            if1 = Interface({'component_id': if1_xrn.urn, 'ipv4': if1_ipv4} )
+            if2 = Interface({'component_id': if2_xrn.urn, 'ipv4': if2_ipv4} )
+
+            # set link
+            link = Link({'capacity': '1000000', 'latency': '0', 'packet_loss': '0', 'type': 'ipv4'})
+            link['interface1'] = if1
+            link['interface2'] = if2
+            link['component_name'] = "%s:%s" % (site1['login_base'], site2['login_base'])
+            link['component_id'] = PlXrn(auth=self.api.hrn, interface=link['component_name']).get_urn()
+            link['component_manager_id'] =  hrn_to_urn(self.api.hrn, 'authority+am')
+            links[link['component_name']] = link
+
+        return links
+
+    def get_node_tags(self, filter={}):
+        node_tags = {}
+        for node_tag in self.api.driver.GetNodeTags(filter):
+            node_tags[node_tag['node_tag_id']] = node_tag
+        return node_tags
+
+    def get_pl_initscripts(self, filter={}):
+        pl_initscripts = {}
+        filter.update({'enabled': True})
+        for initscript in self.api.driver.GetInitScripts(filter):
+            pl_initscripts[initscript['initscript_id']] = initscript
+        return pl_initscripts
+
+
+    def get_slice_and_slivers(self, slice_xrn):
+        """
+        Returns a dict of slivers keyed on the sliver's node_id
+        """
+        slivers = {}
+        slice = None
+        if not slice_xrn:
+            return (slice, slivers)
+        slice_urn = hrn_to_urn(slice_xrn)
+        slice_hrn, _ = urn_to_hrn(slice_xrn)
+        slice_name = hrn_to_pl_slicename(slice_hrn)
+        slices = self.api.driver.GetSlices(slice_name)
+        if not slices:
+            return (slice, slivers)
+        slice = slices[0]
+
+        # sort slivers by node id    
+        for node_id in slice['node_ids']:
+            sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, slice['slice_id'], node_id),
+                             'name': 'plab-vserver', 
+                             'tags': []})
+            slivers[node_id]= sliver
+
+        # sort sliver attributes by node id    
+        tags = self.api.driver.GetSliceTags({'slice_tag_id': slice['slice_tag_ids']})
+        for tag in tags:
+            # most likely a default/global sliver attribute (node_id == None)
+            if tag['node_id'] not in slivers:
+                sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, slice['slice_id'], ""),
+                                 'name': 'plab-vserver',
+                                 'tags': []})
+                slivers[tag['node_id']] = sliver
+            slivers[tag['node_id']]['tags'].append(tag)
+        
+        return (slice, slivers)
+
+    def get_nodes (self, slice=None,slivers=[]):
+        filter = {}
+        tags_filter = {}
+        if slice and 'node_ids' in slice and slice['node_ids']:
+            filter['node_id'] = slice['node_ids']
+            tags_filter=filter.copy()
+        
+        filter.update({'peer_id': None})
+        nodes = self.api.driver.GetNodes(filter)
+       
+        site_ids = []
+        interface_ids = []
+        tag_ids = []
+        for node in nodes:
+            site_ids.append(node['site_id'])
+            interface_ids.extend(node['interface_ids'])
+            tag_ids.extend(node['node_tag_ids'])
+        # get sites
+        sites_dict  = self.get_sites({'site_id': site_ids}) 
+        # get interfaces
+        interfaces = self.get_interfaces({'interface_id':interface_ids}) 
+        # get slivers
+        # 
+        # thierry: no get_slivers, we have slivers as a result of
+        # get_slice_and_slivers passed as an argument
+        # 
+#        slivers = self.get_slivers(slice)
+
+        # get tags
+        node_tags = self.get_node_tags(tags_filter)
+        # get initscripts
+        pl_initscripts = self.get_pl_initscripts()
+
+        rspec_nodes = []
+        for node in nodes:
+            # skip whitelisted nodes
+            if node['slice_ids_whitelist']:
+                if not slice or slice['slice_id'] not in node['slice_ids_whitelist']:
+                    continue
+            rspec_node = Node()
+            # xxx how to retrieve site['login_base']
+            site_id=node['site_id']
+            site=sites_dict[site_id]
+            rspec_node['component_id'] = hostname_to_urn(self.api.hrn, site['login_base'], node['hostname'])
+            rspec_node['component_name'] = node['hostname']
+            rspec_node['component_manager_id'] = self.api.hrn
+            rspec_node['authority_id'] = hrn_to_urn(PlXrn.site_hrn(self.api.hrn, site['login_base']), 'authority+sa')
+            rspec_node['boot_state'] = node['boot_state']
+            rspec_node['exclusive'] = 'False'
+            rspec_node['hardware_types'].append(HardwareType({'name': 'plab-vserver'}))
+            # only doing this because protogeni rspec needs
+            # to advertise available initscripts 
+            rspec_node['pl_initscripts'] = pl_initscripts
+             # add site/interface info to nodes.
+            # assumes that sites, interfaces and tags have already been prepared.
+            site = sites_dict[node['site_id']]
+            location = Location({'longitude': site['longitude'], 'latitude': site['latitude']})
+            rspec_node['location'] = location
+            rspec_node['interfaces'] = []
+            for if_id in node['interface_ids']:
+                interface = Interface(interfaces[if_id]) 
+                interface['ipv4'] = interface['ipv4']
+                rspec_node['interfaces'].append(interface)
+            tags = [PLTag(node_tags[tag_id]) for tag_id in node['node_tag_ids']]
+            rspec_node['tags'] = tags
+            if node['node_id'] in slivers:
+                # add sliver info
+                sliver = slivers[node['node_id']]
+                rspec_node['sliver_id'] = sliver['sliver_id']
+                rspec_node['client_id'] = node['hostname']
+                rspec_node['slivers'] = [slivers[node['node_id']]]
+                
+                # slivers always provide the ssh service
+                login = Login({'authentication': 'ssh-keys', 'hostname': node['hostname'], port:'22'})
+                service = Services({'login': login})
+                rspec_node['services'].append(service)
+            rspec_nodes.append(rspec_node)
+        return rspec_nodes
+             
+        
     def get_rspec(self, slice_xrn=None, version = None):
-        self.prepare()
+
         version_manager = VersionManager()
         version = version_manager.get_version(version)
         if not slice_xrn:
             rspec_version = version_manager._get_version(version.type, version.version, 'ad')
         else:
             rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
-               
-        rspec = RSpec(version=rspec_version, user_options=self.user_options)
-        # get slice details if specified
-        slice = None
-        if slice_xrn:
-            slice_hrn, _ = urn_to_hrn(slice_xrn)
-            slice_name = hrn_to_pl_slicename(slice_hrn)
-            slices = self.api.plshell.GetSlices(self.api.plauth, slice_name)
-            if slices:
-                slice = slices[0]            
-
-        # filter out nodes with a whitelist:
-        valid_nodes = [] 
-        for node in self.nodes.values():
-            # only doing this because protogeni rspec needs
-            # to advertise available initscripts 
-            node['pl_initscripts'] = self.pl_initscripts
-
-            if slice and node['node_id'] in slice['node_ids']:
-                valid_nodes.append(node)
-            elif slice and slice['slice_id'] in node['slice_ids_whitelist']:
-                valid_nodes.append(node)
-            elif not slice and not node['slice_ids_whitelist']:
-                valid_nodes.append(node)
-    
-        rspec.version.add_nodes(valid_nodes)
-        rspec.version.add_interfaces(self.interfaces.values()) 
-        rspec.version.add_links(self.links.values())
-
-        # add slivers
-        if slice_xrn and slice:
-            slivers = []
-            tags = self.api.plshell.GetSliceTags(self.api.plauth, slice['slice_tag_ids'])
-
-            # add default tags
-            for tag in tags:
-                # if tag isn't bound to a node then it applies to all slivers
-                # and belongs in the <sliver_defaults> tag
-                if not tag['node_id']:
-                    rspec.version.add_default_sliver_attribute(tag['tagname'], tag['value'], self.api.hrn)
-                if tag['tagname'] == 'topo_rspec' and tag['node_id']:
-                    node = self.nodes[tag['node_id']]
-                    value = eval(tag['value'])
-                    for (id, realip, bw, lvip, rvip, vnet) in value:
-                        bps = get_tc_rate(bw)
-                        remote = self.nodes[id]
-                        site1 = self.sites[node['site_id']]
-                        site2 = self.sites[remote['site_id']]
-                        link1_name = '%s:%s' % (site1['login_base'], site2['login_base']) 
-                        link2_name = '%s:%s' % (site2['login_base'], site1['login_base']) 
-                        p_link = None
-                        if link1_name in self.links:
-                            link = self.links[link1_name] 
-                        elif link2_name in self.links:
-                            link = self.links[link2_name]
-                        v_link = Link()
-                        
-                        link.capacity = bps 
-            for node_id in slice['node_ids']:
-                try:
-                    sliver = {}
-                    sliver['hostname'] = self.nodes[node_id]['hostname']
-                    sliver['node_id'] = node_id
-                    sliver['slice_id'] = slice['slice_id']    
-                    sliver['tags'] = []
-                    slivers.append(sliver)
-
-                    # add tags for this node only
-                    for tag in tags:
-                        if tag['node_id'] and (tag['node_id'] == node_id):
-                            sliver['tags'].append(tag)
-                except:
-                    self.api.logger.log_exc('unable to add sliver %s to node %s' % (slice['name'], node_id))
-            rspec.version.add_slivers(slivers, sliver_urn=slice_xrn)
 
+        slice, slivers = self.get_slice_and_slivers(slice_xrn)
+        rspec = RSpec(version=rspec_version, user_options=self.user_options)
+        rspec.version.add_nodes(self.get_nodes(slice), slivers)
+        rspec.version.add_links(self.get_links(slice))
+        
+        # add sliver defaults
+        default_sliver_attribs = slivers.get(None, [])
+        for sliver_attrib in default_sliver_attribs:
+            rspec.version.add_default_sliver_attribute(sliver_attrib['name'], sliver_attrib['value'])  
+        
         return rspec.toxml()
+
+
index 9276fb0..e97565c 100644 (file)
@@ -6,7 +6,7 @@ from StringIO import StringIO
 from lxml import etree
 from xmlbuilder import XMLBuilder
 
-from sfa.util.faults import *
+from sfa.util.faults import InvalidRSpec
 from sfa.util.xrn import get_authority
 from sfa.util.plxrn import hrn_to_pl_slicename, hostname_to_urn
 
@@ -282,12 +282,11 @@ class Slicetag:
     
     def write(self, api):
         if self.was_added():
-            api.plshell.AddSliceTag(api.plauth, self.slice_id, 
-                                    self.tagname, self.value, self.node_id)
+            api.driver.AddSliceTag(self.slice_id, self.tagname, self.value, self.node_id)
         elif self.was_changed():
-            api.plshell.UpdateSliceTag(api.plauth, self.id, self.value)
+            api.driver.UpdateSliceTag(self.id, self.value)
         elif self.was_deleted():
-            api.plshell.DeleteSliceTag(api.plauth, self.id)
+            api.driver.DeleteSliceTag(self.id)
 
 
 class TagType:
@@ -560,7 +559,7 @@ class Network:
         Create a dictionary of site objects keyed by site ID
         """
         tmp = []
-        for site in api.plshell.GetSites(api.plauth, {'peer_id': None}):
+        for site in api.driver.GetSites({'peer_id': None}):
             t = site['site_id'], Site(self, site)
             tmp.append(t)
         return dict(tmp)
@@ -571,7 +570,7 @@ class Network:
         Create a dictionary of node objects keyed by node ID
         """
         tmp = []
-        for node in api.plshell.GetNodes(api.plauth, {'peer_id': None}):
+        for node in api.driver.GetNodes({'peer_id': None}):
             try:
                 t = node['node_id'], Node(self, node)
                 tmp.append(t)
@@ -585,7 +584,7 @@ class Network:
         Create a dictionary of node objects keyed by node ID
         """
         tmp = []
-        for iface in api.plshell.GetInterfaces(api.plauth):
+        for iface in api.driver.GetInterfaces():
             t = iface['interface_id'], Iface(self, iface)
             tmp.append(t)
         return dict(tmp)
@@ -595,7 +594,7 @@ class Network:
         Create a dictionary of slicetag objects keyed by slice tag ID
         """
         tmp = []
-        for tag in api.plshell.GetSliceTags(api.plauth, {'~tagname':Slicetag.ignore_tags}, Slicetag.filter_fields): 
+        for tag in api.driver.GetSliceTags({'~tagname':Slicetag.ignore_tags}, Slicetag.filter_fields): 
             t = tag['slice_tag_id'], Slicetag(tag)
             tmp.append(t)
         return dict(tmp)
@@ -605,7 +604,7 @@ class Network:
         Create a list of tagtype obects keyed by tag name
         """
         tmp = []
-        for tag in api.plshell.GetTagTypes(api.plauth, {'~tagname':TagType.ignore_tags}):
+        for tag in api.driver.GetTagTypes({'~tagname':TagType.ignore_tags}):
             t = tag['tagname'], TagType(tag)
             tmp.append(t)
         return dict(tmp)
@@ -615,7 +614,7 @@ class Network:
         Return a Slice object for a single slice
         """
         slicename = hrn_to_pl_slicename(hrn)
-        slice = api.plshell.GetSlices(api.plauth, [slicename])
+        slice = api.driver.GetSlices([slicename])
         if len(slice):
             self.slice = Slice(self, slicename, slice[0])
             return self.slice
index e85800c..b659ea9 100644 (file)
@@ -13,8 +13,7 @@ def get_peer(api, hrn):
     # get this site's authority (sfa root authority or sub authority)
     site_authority = get_authority(slice_authority).lower()
     # check if we are already peered with this site_authority, if so
-    peers = api.plshell.GetPeers(api.plauth, {}, \
-                    ['peer_id', 'peername', 'shortname', 'hrn_root'])
+    peers = api.driver.GetPeers( {}, ['peer_id', 'peername', 'shortname', 'hrn_root'])
     for peer_record in peers:
         names = [name.lower() for name in peer_record.values() if isinstance(name, StringTypes)]
         if site_authority in names:
diff --git a/sfa/plc/plcomponentdriver.py b/sfa/plc/plcomponentdriver.py
new file mode 100644 (file)
index 0000000..c991bd6
--- /dev/null
@@ -0,0 +1,102 @@
+import os
+import tempfile
+
+import sfa.client.xmlrpcprotocol as xmlrpcprotocol
+from sfa.plc.nodemanager import NodeManager
+
+from sfa.trust.credential import Credential
+from sfa.trust.certificate import Certificate, Keypair
+from sfa.trust.gid import GID
+
+####################
+class PlComponentDriver:
+    """
+    This class is the type for the toplevel 'api' object 
+    when running the component manager inside a planetlab node.
+    As such it runs an SFA-compliant interface and thus inherits SfaApi
+    However the fact that we run inside a planetlab nodes requires 
+    some tweaks as compared with a service running in the infrastructure.
+    """
+
+    def __init__ (self, config):
+        self.nodemanager = NodeManager(config)
+
+    def sliver_exists(self):
+        sliver_dict = self.nodemanager.GetXIDs()
+        ### xxx slicename is undefined
+        if slicename in sliver_dict.keys():
+            return True
+        else:
+            return False
+
+    def get_registry(self):
+        addr, port = self.config.SFA_REGISTRY_HOST, self.config.SFA_REGISTRY_PORT
+        url = "http://%(addr)s:%(port)s" % locals()
+        ### xxx this would require access to the api...
+        server = xmlrpcprotocol.server_proxy(url, self.key_file, self.cert_file)
+        return server
+
+    def get_node_key(self):
+        # this call requires no authentication,
+        # so we can generate a random keypair here
+        subject="component"
+        (kfd, keyfile) = tempfile.mkstemp()
+        (cfd, certfile) = tempfile.mkstemp()
+        key = Keypair(create=True)
+        key.save_to_file(keyfile)
+        cert = Certificate(subject=subject)
+        cert.set_issuer(key=key, subject=subject)
+        cert.set_pubkey(key)
+        cert.sign()
+        cert.save_to_file(certfile)
+        registry = self.get_registry()
+        # the registry will scp the key onto the node
+        registry.get_key()        
+
+    # override the method in SfaApi
+    def getCredential(self):
+        """
+        Get our credential from a remote registry
+        """
+        path = self.config.SFA_DATA_DIR
+        config_dir = self.config.config_path
+        cred_filename = path + os.sep + 'node.cred'
+        try:
+            credential = Credential(filename = cred_filename)
+            return credential.save_to_string(save_parents=True)
+        except IOError:
+            node_pkey_file = config_dir + os.sep + "node.key"
+            node_gid_file = config_dir + os.sep + "node.gid"
+            cert_filename = path + os.sep + 'server.cert'
+            if not os.path.exists(node_pkey_file) or \
+               not os.path.exists(node_gid_file):
+                self.get_node_key()
+
+            # get node's hrn
+            gid = GID(filename=node_gid_file)
+            hrn = gid.get_hrn()
+            # get credential from registry
+            cert_str = Certificate(filename=cert_filename).save_to_string(save_parents=True)
+            registry = self.get_registry()
+            cred = registry.GetSelfCredential(cert_str, hrn, 'node')
+            # xxx credfile is undefined
+            Credential(string=cred).save_to_file(credfile, save_parents=True)            
+
+            return cred
+
+    def clean_key_cred(self):
+        """
+        remove the existing keypair and cred  and generate new ones
+        """
+        files = ["server.key", "server.cert", "node.cred"]
+        for f in files:
+            # xxx KEYDIR is undefined, could be meant to be "/var/lib/sfa/" from sfa_component_setup.py
+            filepath = KEYDIR + os.sep + f
+            if os.path.isfile(filepath):
+                os.unlink(f)
+
+        # install the new key pair
+        # GetCredential will take care of generating the new keypair
+        # and credential
+        self.get_node_key()
+        self.getCredential()
similarity index 50%
rename from sfa/plc/api.py
rename to sfa/plc/pldriver.py
index 9891410..af3b213 100644 (file)
@@ -1,64 +1,13 @@
 #
-# SFA XML-RPC and SOAP interfaces
-#
+from sfa.util.faults import MissingSfaInfo
+from sfa.util.sfalogging import logger
+from sfa.util.table import SfaTable
+from sfa.util.defaultdict import defaultdict
 
-import sys
-import os
-import traceback
-import string
-import datetime
-import xmlrpclib
+from sfa.util.xrn import hrn_to_urn
+from sfa.util.plxrn import slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename, hrn_to_pl_login_base
 
-from sfa.util.faults import *
-from sfa.util.api import *
-from sfa.util.config import *
-from sfa.util.sfalogging import logger
-import sfa.util.xmlrpcprotocol as xmlrpcprotocol
-from sfa.trust.auth import Auth
-from sfa.trust.rights import Right, Rights, determine_rights
-from sfa.trust.credential import Credential,Keypair
-from sfa.trust.certificate import Certificate
-from sfa.util.xrn import get_authority, hrn_to_urn
-from sfa.util.plxrn import hostname_to_hrn, hrn_to_pl_slicename, hrn_to_pl_slicename, slicename_to_hrn
-from sfa.util.nodemanager import NodeManager
-try:
-    from collections import defaultdict
-except:
-    class defaultdict(dict):
-        def __init__(self, default_factory=None, *a, **kw):
-            if (default_factory is not None and
-                not hasattr(default_factory, '__call__')):
-                raise TypeError('first argument must be callable')
-            dict.__init__(self, *a, **kw)
-            self.default_factory = default_factory
-        def __getitem__(self, key):
-            try:
-                return dict.__getitem__(self, key)
-            except KeyError:
-                return self.__missing__(key)
-        def __missing__(self, key):
-            if self.default_factory is None:
-                raise KeyError(key)
-            self[key] = value = self.default_factory()
-            return value
-        def __reduce__(self):
-            if self.default_factory is None:
-                args = tuple()
-            else:
-                args = self.default_factory,
-            return type(self), args, None, None, self.items()
-        def copy(self):
-            return self.__copy__()
-        def __copy__(self):
-            return type(self)(self.default_factory, self)
-        def __deepcopy__(self, memo):
-            import copy
-            return type(self)(self.default_factory,
-                              copy.deepcopy(self.items()))
-        def __repr__(self):
-            return 'defaultdict(%s, %s)' % (self.default_factory,
-                                            dict.__repr__(self))
-## end of http://code.activestate.com/recipes/523034/ }}}
+from sfa.plc.plshell import PlShell
 
 def list_to_dict(recs, key):
     """
@@ -68,186 +17,19 @@ def list_to_dict(recs, key):
     keys = [rec[key] for rec in recs]
     return dict(zip(keys, recs))
 
-class SfaAPI(BaseAPI):
+class PlDriver (PlShell):
 
-    # flat list of method names
-    import sfa.methods
-    methods = sfa.methods.all
-    
-    def __init__(self, config = "/etc/sfa/sfa_config.py", encoding = "utf-8", 
-                 methods='sfa.methods', peer_cert = None, interface = None, 
-                key_file = None, cert_file = None, cache = None):
-        BaseAPI.__init__(self, config=config, encoding=encoding, methods=methods, \
-                         peer_cert=peer_cert, interface=interface, key_file=key_file, \
-                         cert_file=cert_file, cache=cache)
+    def __init__ (self, config):
+        PlShell.__init__ (self, config)
  
-        self.encoding = encoding
-        from sfa.util.table import SfaTable
-        self.SfaTable = SfaTable
-        # Better just be documenting the API
-        if config is None:
-            return
-
-        # Load configuration
-        self.config = Config(config)
-        self.auth = Auth(peer_cert)
-        self.interface = interface
-        self.key_file = key_file
-        self.key = Keypair(filename=self.key_file)
-        self.cert_file = cert_file
-        self.cert = Certificate(filename=self.cert_file)
-        self.credential = None
+        self.hrn = config.SFA_INTERFACE_HRN
+        # xxx thgen fixme - use SfaTable hardwired for now 
+        # will need to extend generic to support multiple storage systems
+        #self.SfaTable = SfaTable
         # Initialize the PLC shell only if SFA wraps a myPLC
-        rspec_type = self.config.get_aggregate_type()
-        if (rspec_type == 'pl' or rspec_type == 'vini' or \
-            rspec_type == 'eucalyptus' or rspec_type == 'max'):
-            self.plshell = self.getPLCShell()
-            self.plshell_version = "4.3"
-
-        self.hrn = self.config.SFA_INTERFACE_HRN
-        self.time_format = "%Y-%m-%d %H:%M:%S"
-
-    
-    def getPLCShell(self):
-        self.plauth = {'Username': self.config.SFA_PLC_USER,
-                       'AuthMethod': 'password',
-                       'AuthString': self.config.SFA_PLC_PASSWORD}
-
-        # The native shell (PLC.Shell.Shell) is more efficient than xmlrpc,
-        # but it leaves idle db connections open. use xmlrpc until we can figure
-        # out why PLC.Shell.Shell doesn't close db connection properly     
-        #try:
-        #    sys.path.append(os.path.dirname(os.path.realpath("/usr/bin/plcsh")))
-        #    self.plshell_type = 'direct'
-        #    import PLC.Shell
-        #    shell = PLC.Shell.Shell(globals = globals())
-        #except:
-        
-        self.plshell_type = 'xmlrpc' 
-        url = self.config.SFA_PLC_URL
-        shell = xmlrpclib.Server(url, verbose = 0, allow_none = True)
-        return shell
-
-    def get_server(self, interface, cred, timeout=30):
-        """
-        Returns a connection to the specified interface. Use the specified
-        credential to determine the caller and look for the caller's key/cert 
-        in the registry hierarchy cache. 
-        """       
-        from sfa.trust.hierarchy import Hierarchy
-        if not isinstance(cred, Credential):
-            cred_obj = Credential(string=cred)
-        else:
-            cred_obj = cred
-        caller_gid = cred_obj.get_gid_caller()
-        hierarchy = Hierarchy()
-        auth_info = hierarchy.get_auth_info(caller_gid.get_hrn())
-        key_file = auth_info.get_privkey_filename()
-        cert_file = auth_info.get_gid_filename()
-        server = interface.get_server(key_file, cert_file, timeout)
-        return server
-               
-        
-    def getCredential(self):
-        """
-        Return a valid credential for this interface. 
-        """
-        type = 'authority'
-        path = self.config.SFA_DATA_DIR
-        filename = ".".join([self.interface, self.hrn, type, "cred"])
-        cred_filename = path + os.sep + filename
-        cred = None
-        if os.path.isfile(cred_filename):
-            cred = Credential(filename = cred_filename)
-            # make sure cred isnt expired
-            if not cred.get_expiration or \
-               datetime.datetime.utcnow() < cred.get_expiration():    
-                return cred.save_to_string(save_parents=True)
-
-        # get a new credential
-        if self.interface in ['registry']:
-            cred =  self.__getCredentialRaw()
-        else:
-            cred =  self.__getCredential()
-        cred.save_to_file(cred_filename, save_parents=True)
-
-        return cred.save_to_string(save_parents=True)
-
-
-    def getDelegatedCredential(self, creds):
-        """
-        Attempt to find a credential delegated to us in
-        the specified list of creds.
-        """
-        if creds and not isinstance(creds, list): 
-            creds = [creds]
-        delegated_creds = filter_creds_by_caller(creds, [self.hrn, self.hrn + '.slicemanager'])
-        if not delegated_creds:
-            return None
-        return delegated_creds[0]
-    def __getCredential(self):
-        """ 
-        Get our credential from a remote registry 
-        """
-        from sfa.server.registry import Registries
-        registries = Registries()
-        registry = registries.get_server(self.hrn, self.key_file, self.cert_file)
-        cert_string=self.cert.save_to_string(save_parents=True)
-        # get self credential
-        self_cred = registry.GetSelfCredential(cert_string, self.hrn, 'authority')
-        # get credential
-        cred = registry.GetCredential(self_cred, self.hrn, 'authority')
-        return Credential(string=cred)
-
-    def __getCredentialRaw(self):
-        """
-        Get our current credential directly from the local registry.
-        """
-
-        hrn = self.hrn
-        auth_hrn = self.auth.get_authority(hrn)
-    
-        # is this a root or sub authority
-        if not auth_hrn or hrn == self.config.SFA_INTERFACE_HRN:
-            auth_hrn = hrn
-        auth_info = self.auth.get_auth_info(auth_hrn)
-        table = self.SfaTable()
-        records = table.findObjects({'hrn': hrn, 'type': 'authority+sa'})
-        if not records:
-            raise RecordNotFound
-        record = records[0]
-        type = record['type']
-        object_gid = record.get_gid_object()
-        new_cred = Credential(subject = object_gid.get_subject())
-        new_cred.set_gid_caller(object_gid)
-        new_cred.set_gid_object(object_gid)
-        new_cred.set_issuer_keys(auth_info.get_privkey_filename(), auth_info.get_gid_filename())
-        
-        r1 = determine_rights(type, hrn)
-        new_cred.set_privileges(r1)
-        new_cred.encode()
-        new_cred.sign()
-
-        return new_cred
-   
-
-    def loadCredential (self):
-        """
-        Attempt to load credential from file if it exists. If it doesnt get
-        credential from registry.
-        """
-
-        # see if this file exists
-        # XX This is really the aggregate's credential. Using this is easier than getting
-        # the registry's credential from iteslf (ssl errors).   
-        ma_cred_filename = self.config.SFA_DATA_DIR + os.sep + self.interface + self.hrn + ".ma.cred"
-        try:
-            self.credential = Credential(filename = ma_cred_filename)
-        except IOError:
-            self.credential = self.getCredentialFromRegistry()
-
-
+        rspec_type = config.get_aggregate_type()
+        assert (rspec_type == 'pl' or rspec_type == 'vini' or \
+                    rspec_type == 'eucalyptus' or rspec_type == 'max')
 
     ##
     # Convert SFA fields to PLC fields for use when registering up updating
@@ -329,16 +111,16 @@ class SfaAPI(BaseAPI):
         # get pl records
         nodes, sites, slices, persons, keys = {}, {}, {}, {}, {}
         if node_ids:
-            node_list = self.plshell.GetNodes(self.plauth, node_ids)
+            node_list = self.GetNodes(node_ids)
             nodes = list_to_dict(node_list, 'node_id')
         if site_ids:
-            site_list = self.plshell.GetSites(self.plauth, site_ids)
+            site_list = self.GetSites(site_ids)
             sites = list_to_dict(site_list, 'site_id')
         if slice_ids:
-            slice_list = self.plshell.GetSlices(self.plauth, slice_ids)
+            slice_list = self.GetSlices(slice_ids)
             slices = list_to_dict(slice_list, 'slice_id')
         if person_ids:
-            person_list = self.plshell.GetPersons(self.plauth, person_ids)
+            person_list = self.GetPersons(person_ids)
             persons = list_to_dict(person_list, 'person_id')
             for person in persons:
                 key_ids.extend(persons[person]['key_ids'])
@@ -347,7 +129,7 @@ class SfaAPI(BaseAPI):
                       'slice': slices, 'user': persons}
 
         if key_ids:
-            key_list = self.plshell.GetKeys(self.plauth, key_ids)
+            key_list = self.GetKeys(key_ids)
             keys = list_to_dict(key_list, 'key_id')
 
         # fill record info
@@ -386,7 +168,7 @@ class SfaAPI(BaseAPI):
         for record in records:
             if 'site_id' in record:
                 site_ids.append(record['site_id'])
-            if 'site_ids' in records:
+            if 'site_ids' in record:
                 site_ids.extend(record['site_ids'])
             if 'person_ids' in record:
                 person_ids.extend(record['person_ids'])
@@ -398,16 +180,16 @@ class SfaAPI(BaseAPI):
         # get pl records
         slices, persons, sites, nodes = {}, {}, {}, {}
         if site_ids:
-            site_list = self.plshell.GetSites(self.plauth, site_ids, ['site_id', 'login_base'])
+            site_list = self.GetSites(site_ids, ['site_id', 'login_base'])
             sites = list_to_dict(site_list, 'site_id')
         if person_ids:
-            person_list = self.plshell.GetPersons(self.plauth, person_ids, ['person_id', 'email'])
+            person_list = self.GetPersons(person_ids, ['person_id', 'email'])
             persons = list_to_dict(person_list, 'person_id')
         if slice_ids:
-            slice_list = self.plshell.GetSlices(self.plauth, slice_ids, ['slice_id', 'name'])
+            slice_list = self.GetSlices(slice_ids, ['slice_id', 'name'])
             slices = list_to_dict(slice_list, 'slice_id')       
         if node_ids:
-            node_list = self.plshell.GetNodes(self.plauth, node_ids, ['node_id', 'hostname'])
+            node_list = self.GetNodes(node_ids, ['node_id', 'hostname'])
             nodes = list_to_dict(node_list, 'node_id')
        
         # convert ids to hrns
@@ -448,7 +230,8 @@ class SfaAPI(BaseAPI):
             
         return records   
 
-    def fill_record_sfa_info(self, records):
+    # aggregates is basically api.aggregates
+    def fill_record_sfa_info(self, records, aggregates):
 
         def startswith(prefix, values):
             return [value for value in values if value.startswith(prefix)]
@@ -467,7 +250,7 @@ class SfaAPI(BaseAPI):
         site_pis = {}
         if site_ids:
             pi_filter = {'|roles': ['pi'], '|site_ids': site_ids} 
-            pi_list = self.plshell.GetPersons(self.plauth, pi_filter, ['person_id', 'site_ids'])
+            pi_list = self.GetPersons(pi_filter, ['person_id', 'site_ids'])
             for pi in pi_list:
                 # we will need the pi's hrns also
                 person_ids.append(pi['person_id'])
@@ -485,7 +268,9 @@ class SfaAPI(BaseAPI):
         # we obtain
         
         # get the sfa records
-        table = self.SfaTable()
+        # xxx thgen fixme - use SfaTable hardwired for now 
+        # table = self.SfaTable()
+        table = SfaTable()
         person_list, persons = [], {}
         person_list = table.find({'type': 'user', 'pointer': person_ids})
         # create a hrns keyed on the sfa record's pointer.
@@ -497,7 +282,7 @@ class SfaAPI(BaseAPI):
 
         # get the pl records
         pl_person_list, pl_persons = [], {}
-        pl_person_list = self.plshell.GetPersons(self.plauth, person_ids, ['person_id', 'roles'])
+        pl_person_list = self.GetPersons(person_ids, ['person_id', 'roles'])
         pl_persons = list_to_dict(pl_person_list, 'person_id')
 
         # fill sfa info
@@ -527,9 +312,9 @@ class SfaAPI(BaseAPI):
                 
             elif (type.startswith("authority")):
                 record['url'] = None
-                if record['hrn'] in self.aggregates:
+                if record['hrn'] in aggregates:
                     
-                    record['url'] = self.aggregates[record['hrn']].get_url()
+                    record['url'] = aggregates[record['hrn']].get_url()
 
                 if record['pointer'] != -1:
                     record['PI'] = []
@@ -559,7 +344,7 @@ class SfaAPI(BaseAPI):
                 # xxx TODO: PostalAddress, Phone
             record.update(sfa_info)
 
-    def fill_record_info(self, records):
+    def fill_record_info(self, records, aggregates):
         """
         Given a SFA record, fill in the PLC specific and SFA specific
         fields in the record. 
@@ -568,10 +353,10 @@ class SfaAPI(BaseAPI):
             records = [records]
 
         self.fill_record_pl_info(records)
-        self.fill_record_sfa_info(records)
+        self.fill_record_sfa_info(records, aggregates)
 
     def update_membership_list(self, oldRecord, record, listName, addFunc, delFunc):
-        # get a list of the HRNs tht are members of the old and new records
+        # get a list of the HRNs that are members of the old and new records
         if oldRecord:
             oldList = oldRecord.get(listName, [])
         else:
@@ -585,7 +370,9 @@ class SfaAPI(BaseAPI):
         # build a list of the new person ids, by looking up each person to get
         # their pointer
         newIdList = []
-        table = self.SfaTable()
+        # xxx thgen fixme - use SfaTable hardwired for now 
+        #table = self.SfaTable()
+        table = SfaTable()
         records = table.find({'type': 'user', 'hrn': newList})
         for rec in records:
             newIdList.append(rec['pointer'])
@@ -603,112 +390,18 @@ class SfaAPI(BaseAPI):
     # add people who are in the new list, but not the oldList
         for personId in newIdList:
             if not (personId in oldIdList):
-                addFunc(self.plauth, personId, containerId)
+                addFunc(personId, containerId)
 
         # remove people who are in the old list, but not the new list
         for personId in oldIdList:
             if not (personId in newIdList):
-                delFunc(self.plauth, personId, containerId)
+                delFunc(personId, containerId)
 
     def update_membership(self, oldRecord, record):
         if record.type == "slice":
             self.update_membership_list(oldRecord, record, 'researcher',
-                                        self.plshell.AddPersonToSlice,
-                                        self.plshell.DeletePersonFromSlice)
+                                        self.AddPersonToSlice,
+                                        self.DeletePersonFromSlice)
         elif record.type == "authority":
             # xxx TODO
             pass
-
-
-
-class ComponentAPI(BaseAPI):
-
-    def __init__(self, config = "/etc/sfa/sfa_config.py", encoding = "utf-8", methods='sfa.methods',
-                 peer_cert = None, interface = None, key_file = None, cert_file = None):
-
-        BaseAPI.__init__(self, config=config, encoding=encoding, methods=methods, peer_cert=peer_cert,
-                         interface=interface, key_file=key_file, cert_file=cert_file)
-        self.encoding = encoding
-
-        # Better just be documenting the API
-        if config is None:
-            return
-
-        self.nodemanager = NodeManager(self.config)
-
-    def sliver_exists(self):
-        sliver_dict = self.nodemanager.GetXIDs()
-        if slicename in sliver_dict.keys():
-            return True
-        else:
-            return False
-
-    def get_registry(self):
-        addr, port = self.config.SFA_REGISTRY_HOST, self.config.SFA_REGISTRY_PORT
-        url = "http://%(addr)s:%(port)s" % locals()
-        server = xmlrpcprotocol.get_server(url, self.key_file, self.cert_file)
-        return server
-
-    def get_node_key(self):
-        # this call requires no authentication,
-        # so we can generate a random keypair here
-        subject="component"
-        (kfd, keyfile) = tempfile.mkstemp()
-        (cfd, certfile) = tempfile.mkstemp()
-        key = Keypair(create=True)
-        key.save_to_file(keyfile)
-        cert = Certificate(subject=subject)
-        cert.set_issuer(key=key, subject=subject)
-        cert.set_pubkey(key)
-        cert.sign()
-        cert.save_to_file(certfile)
-        registry = self.get_registry()
-        # the registry will scp the key onto the node
-        registry.get_key()        
-
-    def getCredential(self):
-        """
-        Get our credential from a remote registry
-        """
-        path = self.config.SFA_DATA_DIR
-        config_dir = self.config.config_path
-        cred_filename = path + os.sep + 'node.cred'
-        try:
-            credential = Credential(filename = cred_filename)
-            return credential.save_to_string(save_parents=True)
-        except IOError:
-            node_pkey_file = config_dir + os.sep + "node.key"
-            node_gid_file = config_dir + os.sep + "node.gid"
-            cert_filename = path + os.sep + 'server.cert'
-            if not os.path.exists(node_pkey_file) or \
-               not os.path.exists(node_gid_file):
-                self.get_node_key()
-
-            # get node's hrn
-            gid = GID(filename=node_gid_file)
-            hrn = gid.get_hrn()
-            # get credential from registry
-            cert_str = Certificate(filename=cert_filename).save_to_string(save_parents=True)
-            registry = self.get_registry()
-            cred = registry.GetSelfCredential(cert_str, hrn, 'node')
-            Credential(string=cred).save_to_file(credfile, save_parents=True)            
-
-            return cred
-
-    def clean_key_cred(self):
-        """
-        remove the existing keypair and cred  and generate new ones
-        """
-        files = ["server.key", "server.cert", "node.cred"]
-        for f in files:
-            filepath = KEYDIR + os.sep + f
-            if os.path.isfile(filepath):
-                os.unlink(f)
-
-        # install the new key pair
-        # GetCredential will take care of generating the new keypair
-        # and credential
-        self.get_node_key()
-        self.getCredential()
-
-    
diff --git a/sfa/plc/plshell.py b/sfa/plc/plshell.py
new file mode 100644 (file)
index 0000000..863472f
--- /dev/null
@@ -0,0 +1,46 @@
+import xmlrpclib
+
+class PlShell:
+    """
+    A simple xmlrpc shell to a myplc instance
+    This class can receive all PLCAPI calls to the underlying testbed
+    For safety this is limited to a set of hard-coded calls
+    """
+    
+    direct_calls = ['AddNode', 'AddPerson', 'AddPersonKey', 'AddPersonToSite',
+                    'AddPersonToSlice', 'AddRoleToPerson', 'AddSite', 'AddSiteTag', 'AddSlice',
+                    'AddSliceTag', 'AddSliceToNodes', 'BindObjectToPeer', 'DeleteKey',
+                    'DeleteNode', 'DeletePerson', 'DeletePersonFromSlice', 'DeleteSite',
+                    'DeleteSlice', 'DeleteSliceFromNodes', 'DeleteSliceTag', 'GetInitScripts',
+                    'GetInterfaces', 'GetKeys', 'GetNodeTags', 'GetPeers',
+                    'GetPersons', 'GetSlices', 'GetSliceTags', 'GetTagTypes',
+                    'UnBindObjectFromPeer', 'UpdateNode', 'UpdatePerson', 'UpdateSite',
+                    'UpdateSlice', 'UpdateSliceTag',
+                    # also used as-is in importer
+                    'GetSites','GetNodes',
+                    ]
+    # support for other names - this is experimental
+    alias_calls = { 'get_authorities':'GetSites',
+                    'get_nodes':'GetNodes',
+                    }
+
+    def __init__ ( self, config ) :
+        self.plauth = {'Username': config.SFA_PLC_USER,
+                       'AuthMethod': 'password',
+                       'AuthString': config.SFA_PLC_PASSWORD}
+        
+        self.url = config.SFA_PLC_URL
+        self.plauth = {'Username': 'root@test.onelab.eu',
+                       'AuthMethod': 'password',
+                       'AuthString': 'test++'}
+        self.proxy_server = xmlrpclib.Server(self.url, verbose = 0, allow_none = True)
+
+    def __getattr__(self, name):
+        def func(*args, **kwds):
+            actual_name=None
+            if name in PlShell.direct_calls: actual_name=name
+            if name in PlShell.alias_calls: actual_name=PlShell.alias_calls[name]
+            if not actual_name:
+                raise Exception, "Illegal method call %s for PL driver"%(name)
+            return getattr(self.proxy_server, actual_name)(self.plauth, *args, **kwds)
+        return func
index 78ba4b9..9e188d7 100644 (file)
@@ -1,19 +1,13 @@
-import datetime
-import time
-import traceback
-import sys
-
 from types import StringTypes
-from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn, urn_to_hrn
-from sfa.util.plxrn import hrn_to_pl_slicename, hrn_to_pl_login_base
-from sfa.util.rspec import *
-from sfa.util.specdict import *
-from sfa.util.faults import *
-from sfa.util.record import SfaRecord
-from sfa.util.policy import Policy
-from sfa.util.prefixTree import prefixTree
 from collections import defaultdict
 
+from sfa.util.xrn import get_leaf, get_authority, urn_to_hrn
+from sfa.util.plxrn import hrn_to_pl_slicename
+from sfa.util.policy import Policy
+from sfa.rspecs.rspec import RSpec
+from sfa.plc.vlink import VLink
+from sfa.util.xrn import Xrn
+
 MAXINT =  2L**31-1
 
 class Slices:
@@ -27,6 +21,8 @@ class Slices:
         self.origin_hrn = origin_hrn
         self.registry = api.registries[api.hrn]
         self.credential = api.getCredential()
+        self.nodes = []
+        self.persons = []
 
     def get_slivers(self, xrn, node=None):
         hrn, type = urn_to_hrn(xrn)
@@ -34,11 +30,11 @@ class Slices:
         slice_name = hrn_to_pl_slicename(hrn)
         # XX Should we just call PLCAPI.GetSliceTicket(slice_name) instead
         # of doing all of this?
-        #return self.api.GetSliceTicket(self.auth, slice_name) 
+        #return self.api.driver.GetSliceTicket(self.auth, slice_name) 
         
         # from PLCAPI.GetSlivers.get_slivers()
         slice_fields = ['slice_id', 'name', 'instantiation', 'expires', 'person_ids', 'slice_tag_ids']
-        slices = self.api.plshell.GetSlices(self.api.plauth, slice_name, slice_fields)
+        slices = self.api.driver.GetSlices(slice_name, slice_fields)
         # Build up list of users and slice attributes
         person_ids = set()
         all_slice_tag_ids = set()
@@ -48,7 +44,7 @@ class Slices:
         person_ids = list(person_ids)
         all_slice_tag_ids = list(all_slice_tag_ids)
         # Get user information
-        all_persons_list = self.api.plshell.GetPersons(self.api.plauth, {'person_id':person_ids,'enabled':True}, ['person_id', 'enabled', 'key_ids'])
+        all_persons_list = self.api.driver.GetPersons({'person_id':person_ids,'enabled':True}, ['person_id', 'enabled', 'key_ids'])
         all_persons = {}
         for person in all_persons_list:
             all_persons[person['person_id']] = person        
@@ -59,12 +55,12 @@ class Slices:
             key_ids.update(person['key_ids'])
         key_ids = list(key_ids)
         # Get user account keys
-        all_keys_list = self.api.plshell.GetKeys(self.api.plauth, key_ids, ['key_id', 'key', 'key_type'])
+        all_keys_list = self.api.driver.GetKeys(key_ids, ['key_id', 'key', 'key_type'])
         all_keys = {}
         for key in all_keys_list:
             all_keys[key['key_id']] = key
         # Get slice attributes
-        all_slice_tags_list = self.api.plshell.GetSliceTags(self.api.plauth, all_slice_tag_ids)
+        all_slice_tags_list = self.api.driver.GetSliceTags(all_slice_tag_ids)
         all_slice_tags = {}
         for slice_tag in all_slice_tags_list:
             all_slice_tags[slice_tag['slice_tag_id']] = slice_tag
@@ -147,7 +143,7 @@ class Slices:
         site_authority = get_authority(slice_authority).lower()
 
         # check if we are already peered with this site_authority, if so
-        peers = self.api.plshell.GetPeers(self.api.plauth, {}, ['peer_id', 'peername', 'shortname', 'hrn_root'])
+        peers = self.api.driver.GetPeers({}, ['peer_id', 'peername', 'shortname', 'hrn_root'])
         for peer_record in peers:
             names = [name.lower() for name in peer_record.values() if isinstance(name, StringTypes)]
             if site_authority in names:
@@ -170,7 +166,7 @@ class Slices:
 
     def verify_slice_nodes(self, slice, requested_slivers, peer):
         
-        nodes = self.api.plshell.GetNodes(self.api.plauth, slice['node_ids'], ['hostname'])
+        nodes = self.api.driver.GetNodes(slice['node_ids'], ['hostname'])
         current_slivers = [node['hostname'] for node in nodes]
 
         # remove nodes not in rspec
@@ -181,48 +177,95 @@ class Slices:
 
         try:
             if peer:
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', slice['slice_id'], peer['shortname'])
-            self.api.plshell.AddSliceToNodes(self.api.plauth, slice['name'], added_nodes)
-            self.api.plshell.DeleteSliceFromNodes(self.api.plauth, slice['name'], deleted_nodes)
+                self.api.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
+            self.api.driver.AddSliceToNodes(slice['name'], added_nodes)
+            self.api.driver.DeleteSliceFromNodes(slice['name'], deleted_nodes)
 
         except: 
             self.api.logger.log_exc('Failed to add/remove slice from nodes')
 
+    def free_egre_key(self):
+        used = set()
+        for tag in self.api.driver.GetSliceTags({'tagname': 'egre_key'}):
+                used.add(int(tag['value']))
+
+        for i in range(1, 256):
+            if i not in used:
+                key = i
+                break
+        else:
+            raise KeyError("No more EGRE keys available")
+
+        return str(key)
+
+    def verify_slice_links(self, slice, links, aggregate):
+        # nodes is undefined here
+        if not links:
+            return
+
+        slice_tags = []
+        
+        # set egre key
+        slice_tags.append({'name': 'egre_key', 'value': self.free_egre_key()})
+    
+        # set netns
+        slice_tags.append({'name': 'netns', 'value': '1'})
+
+        # set cap_net_admin 
+        # need to update the attribute string?
+        slice_tags.append({'name': 'capabilities', 'value': 'CAP_NET_ADMIN'}) 
+        
+        for link in links:
+            # get the ip address of the first node in the link
+            ifname1 = Xrn(link['interface1']['component_id']).get_leaf()
+            (node, device) = ifname1.split(':')
+            node_id = int(node.replace('node', ''))
+            node = aggregate.nodes[node_id]
+            if1 = aggregate.interfaces[node['interface_ids'][0]]
+            ipaddr = if1['ip']
+            topo_rspec = VLink.get_topo_rspec(link, ipaddr)
+            # set topo_rspec tag
+            slice_tags.append({'name': 'topo_rspec', 'value': str([topo_rspec]), 'node_id': node_id})
+            # set vini_topo tag
+            slice_tags.append({'name': 'vini_topo', 'value': 'manual', 'node_id': node_id})
+            #self.api.driver.AddSliceTag(slice['name'], 'topo_rspec', str([topo_rspec]), node_id) 
+
+        self.verify_slice_attributes(slice, slice_tags, append=True, admin=True)
+                        
+        
+
     def handle_peer(self, site, slice, persons, peer):
         if peer:
             # bind site
             try:
                 if site:
-                    self.api.plshell.BindObjectToPeer(self.api.plauth, 'site', \
-                       site['site_id'], peer['shortname'], slice['site_id'])
+                    self.api.driver.BindObjectToPeer('site', site['site_id'], peer['shortname'], slice['site_id'])
             except Exception,e:
-                self.api.plshell.DeleteSite(self.api.plauth, site['site_id'])
+                self.api.driver.DeleteSite(site['site_id'])
                 raise e
             
             # bind slice
             try:
                 if slice:
-                    self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', \
-                       slice['slice_id'], peer['shortname'], slice['slice_id'])
+                    self.api.driver.BindObjectToPeer('slice', slice['slice_id'], peer['shortname'], slice['slice_id'])
             except Exception,e:
-                self.api.plshell.DeleteSlice(self.api.plauth, slice['slice_id'])
+                self.api.driver.DeleteSlice(slice['slice_id'])
                 raise e 
 
             # bind persons
             for person in persons:
                 try:
-                    self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', \
-                        person['person_id'], peer['shortname'], person['peer_person_id'])
+                    self.api.driver.BindObjectToPeer('person', 
+                                                     person['person_id'], peer['shortname'], person['peer_person_id'])
 
                     for (key, remote_key_id) in zip(person['keys'], person['key_ids']):
                         try:
-                            self.api.plshell.BindObjectToPeer(self.api.plauth, 'key',\
-                                key['key_id'], peer['shortname'], remote_key_id)
+                            self.api.driver.BindObjectToPeer( 'key', key['key_id'], peer['shortname'], remote_key_id)
                         except:
-                            self.api.plshell.DeleteKey(self.api.plauth, key['key_id'])
+                            self.api.driver.DeleteKey(key['key_id'])
                             self.api.logger("failed to bind key: %s to peer: %s " % (key['key_id'], peer['shortname']))
                 except Exception,e:
-                    self.api.plshell.DeletePerson(self.api.plauth, person['person_id'])
+                    self.api.driver.DeletePerson(person['person_id'])
                     raise e       
 
         return slice
@@ -234,7 +277,7 @@ class Slices:
         slicename = hrn_to_pl_slicename(slice_hrn)
         authority_name = slicename.split('_')[0]
         login_base = authority_name[:20]
-        sites = self.api.plshell.GetSites(self.api.plauth, login_base)
+        sites = self.api.driver.GetSites(login_base)
         if not sites:
             # create new site record
             site = {'name': 'geni.%s' % authority_name,
@@ -246,9 +289,9 @@ class Slices:
                     'peer_site_id': None}
             if peer:
                 site['peer_site_id'] = slice_record.get('site_id', None)
-            site['site_id'] = self.api.plshell.AddSite(self.api.plauth, site)
+            site['site_id'] = self.api.driver.AddSite(site)
             # exempt federated sites from monitor policies
-            self.api.plshell.AddSiteTag(self.api.plauth, site['site_id'], 'exempt_site_until', "20200101")
+            self.api.driver.AddSiteTag(site['site_id'], 'exempt_site_until', "20200101")
             
             # is this still necessary?
             # add record to the local registry 
@@ -260,7 +303,7 @@ class Slices:
             site =  sites[0]
             if peer:
                 # unbind from peer so we can modify if necessary. Will bind back later
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'site', site['site_id'], peer['shortname']) 
+                self.api.driver.UnBindObjectFromPeer('site', site['site_id'], peer['shortname']) 
         
         return site        
 
@@ -268,13 +311,13 @@ class Slices:
         slicename = hrn_to_pl_slicename(slice_hrn)
         parts = slicename.split("_")
         login_base = parts[0]
-        slices = self.api.plshell.GetSlices(self.api.plauth, [slicename]) 
+        slices = self.api.driver.GetSlices([slicename]) 
         if not slices:
             slice = {'name': slicename,
                      'url': slice_record.get('url', slice_hrn), 
                      'description': slice_record.get('description', slice_hrn)}
             # add the slice                          
-            slice['slice_id'] = self.api.plshell.AddSlice(self.api.plauth, slice)
+            slice['slice_id'] = self.api.driver.AddSlice(slice)
             slice['node_ids'] = []
             slice['person_ids'] = []
             if peer:
@@ -289,12 +332,10 @@ class Slices:
             if peer:
                 slice['peer_slice_id'] = slice_record.get('slice_id', None)
                 # unbind from peer so we can modify if necessary. Will bind back later
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice',\
-                             slice['slice_id'], peer['shortname'])
+                self.api.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
                #Update existing record (e.g. expires field) it with the latest info.
             if slice_record and slice['expires'] != slice_record['expires']:
-                self.api.plshell.UpdateSlice(self.api.plauth, slice['slice_id'],\
-                             {'expires' : slice_record['expires']})
+                self.api.driver.UpdateSlice( slice['slice_id'], {'expires' : slice_record['expires']})
        
         return slice
 
@@ -320,13 +361,13 @@ class Slices:
         existing_user_ids = []
         if users_by_email:
             # get existing users by email 
-            existing_users = self.api.plshell.GetPersons(self.api.plauth, \
-                {'email': users_by_email.keys()}, ['person_id', 'key_ids', 'email'])
+            existing_users = self.api.driver.GetPersons({'email': users_by_email.keys()}, 
+                                                        ['person_id', 'key_ids', 'email'])
             existing_user_ids.extend([user['email'] for user in existing_users])
 
         if users_by_site:
             # get a list of user sites (based on requeste user urns
-            site_list = self.api.plshell.GetSites(self.api.plauth, users_by_site.keys(), \
+            site_list = self.api.driver.GetSites(users_by_site.keys(), \
                 ['site_id', 'login_base', 'person_ids'])
             sites = {}
             site_user_ids = []
@@ -336,8 +377,8 @@ class Slices:
                 sites[site['site_id']] = site
                 site_user_ids.extend(site['person_ids'])
 
-            existing_site_persons_list = self.api.plshell.GetPersons(self.api.plauth, \
-              site_user_ids,  ['person_id', 'key_ids', 'email', 'site_ids'])
+            existing_site_persons_list = self.api.driver.GetPersons(site_user_ids,  
+                                                                    ['person_id', 'key_ids', 'email', 'site_ids'])
 
             # all requested users are either existing users or new (added) users      
             for login_base in users_by_site:
@@ -365,8 +406,8 @@ class Slices:
         requested_user_ids = users_dict.keys()
         # existing slice users
         existing_slice_users_filter = {'person_id': slice_record.get('person_ids', [])}
-        existing_slice_users = self.api.plshell.GetPersons(self.api.plauth, \
-             existing_slice_users_filter, ['person_id', 'key_ids', 'email'])
+        existing_slice_users = self.api.driver.GetPersons(existing_slice_users_filter,
+                                                          ['person_id', 'key_ids', 'email'])
         existing_slice_user_ids = [user['email'] for user in existing_slice_users]
         
         # users to be added, removed or updated
@@ -378,7 +419,7 @@ class Slices:
         # Remove stale users (only if we are not appending).
         if append == False:
             for removed_user_id in removed_user_ids:
-                self.api.plshell.DeletePersonFromSlice(self.api.plauth, removed_user_id, slice_record['name'])
+                self.api.driver.DeletePersonFromSlice(removed_user_id, slice_record['name'])
         # update_existing users
         updated_users_list = [user for user in existing_slice_users if user['email'] in \
           updated_user_ids]
@@ -397,20 +438,20 @@ class Slices:
                 'keys': [],
                 'key_ids': added_user.get('key_ids', []),
             }
-            person['person_id'] = self.api.plshell.AddPerson(self.api.plauth, person)
+            person['person_id'] = self.api.driver.AddPerson(person)
             if peer:
                 person['peer_person_id'] = added_user['person_id']
             added_persons.append(person)
            
             # enable the account 
-            self.api.plshell.UpdatePerson(self.api.plauth, person['person_id'], {'enabled': True})
+            self.api.driver.UpdatePerson(person['person_id'], {'enabled': True})
             
             # add person to site
-            self.api.plshell.AddPersonToSite(self.api.plauth, added_user_id, login_base)
+            self.api.driver.AddPersonToSite(added_user_id, login_base)
 
             for key_string in added_user.get('keys', []):
                 key = {'key':key_string, 'key_type':'ssh'}
-                key['key_id'] = self.api.plshell.AddPersonKey(self.api.plauth, person['person_id'], key)
+                key['key_id'] = self.api.driver.AddPersonKey(person['person_id'], key)
                 person['keys'].append(key)
 
             # add the registry record
@@ -421,7 +462,7 @@ class Slices:
     
         for added_slice_user_id in added_slice_user_ids.union(added_user_ids):
             # add person to the slice 
-            self.api.plshell.AddPersonToSlice(self.api.plauth, added_slice_user_id, slice_record['name'])
+            self.api.driver.AddPersonToSlice(added_slice_user_id, slice_record['name'])
             # if this is a peer record then it should already be bound to a peer.
             # no need to return worry about it getting bound later 
 
@@ -433,7 +474,7 @@ class Slices:
         key_ids = []
         for person in persons:
             key_ids.extend(person['key_ids'])
-        keylist = self.api.plshell.GetKeys(self.api.plauth, key_ids, ['key_id', 'key'])
+        keylist = self.api.driver.GetKeys(key_ids, ['key_id', 'key'])
         keydict = {}
         for key in keylist:
             keydict[key['key']] = key['key_id']     
@@ -455,16 +496,16 @@ class Slices:
                     try:
                         if peer:
                             person = persondict[user['email']]
-                            self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'person', person['person_id'], peer['shortname'])
-                        key['key_id'] = self.api.plshell.AddPersonKey(self.api.plauth, user['email'], key)
+                            self.api.driver.UnBindObjectFromPeer('person', person['person_id'], peer['shortname'])
+                        key['key_id'] = self.api.driver.AddPersonKey(user['email'], key)
                         if peer:
                             key_index = user_keys.index(key['key'])
                             remote_key_id = user['key_ids'][key_index]
-                            self.api.plshell.BindObjectToPeer(self.api.plauth, 'key', key['key_id'], peer['shortname'], remote_key_id)
+                            self.api.driver.BindObjectToPeer('key', key['key_id'], peer['shortname'], remote_key_id)
                             
                     finally:
                         if peer:
-                            self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', person['person_id'], peer['shortname'], user['person_id'])
+                            self.api.driver.BindObjectToPeer('person', person['person_id'], peer['shortname'], user['person_id'])
         
         # remove old keys (only if we are not appending)
         if append == False: 
@@ -473,21 +514,24 @@ class Slices:
                 if keydict[existing_key_id] in removed_keys:
                     try:
                         if peer:
-                            self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'key', existing_key_id, peer['shortname'])
-                        self.api.plshell.DeleteKey(self.api.plauth, existing_key_id)
+                            self.api.driver.UnBindObjectFromPeer('key', existing_key_id, peer['shortname'])
+                        self.api.driver.DeleteKey(existing_key_id)
                     except:
                         pass   
 
-    def verify_slice_attributes(self, slice, requested_slice_attributes):
+    def verify_slice_attributes(self, slice, requested_slice_attributes, append=False, admin=False):
         # get list of attributes users ar able to manage
-        slice_attributes = self.api.plshell.GetTagTypes(self.api.plauth, {'category': '*slice*', '|roles': ['user']})
+        filter = {'category': '*slice*'}
+        if not admin:
+            filter['|roles'] = ['user']
+        slice_attributes = self.api.driver.GetTagTypes(filter)
         valid_slice_attribute_names = [attribute['tagname'] for attribute in slice_attributes]
 
         # get sliver attributes
         added_slice_attributes = []
         removed_slice_attributes = []
         ignored_slice_attribute_names = []
-        existing_slice_attributes = self.api.plshell.GetSliceTags(self.api.plauth, {'slice_id': slice['slice_id']})
+        existing_slice_attributes = self.api.driver.GetSliceTags({'slice_id': slice['slice_id']})
 
         # get attributes that should be removed
         for slice_tag in existing_slice_attributes:
@@ -505,7 +549,7 @@ class Slices:
                         attribute_found=True
                         break
 
-            if not attribute_found:
+            if not attribute_found and not append:
                 removed_slice_attributes.append(slice_tag)
         
         # get attributes that should be added:
@@ -525,7 +569,7 @@ class Slices:
         # remove stale attributes
         for attribute in removed_slice_attributes:
             try:
-                self.api.plshell.DeleteSliceTag(self.api.plauth, attribute['slice_tag_id'])
+                self.api.driver.DeleteSliceTag(attribute['slice_tag_id'])
             except Exception, e:
                 self.api.logger.warn('Failed to remove sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
                                 % (name, value,  node_id, str(e)))
@@ -533,8 +577,7 @@ class Slices:
         # add requested_attributes
         for attribute in added_slice_attributes:
             try:
-                name, value, node_id = attribute['name'], attribute['value'], attribute.get('node_id', None)
-                self.api.plshell.AddSliceTag(self.api.plauth, slice['name'], name, value, node_id)
+                self.api.driver.AddSliceTag(slice['name'], attribute['name'], attribute['value'], attribute.get('node_id', None))
             except Exception, e:
                 self.api.logger.warn('Failed to add sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
                                 % (name, value,  node_id, str(e)))
@@ -557,7 +600,7 @@ class Slices:
         slice = self.verify_slice(registry, credential, hrn, site_id, remote_site_id, peer, sfa_peer)
 
         # find out where this slice is currently running
-        nodelist = self.api.plshell.GetNodes(self.api.plauth, slice['node_ids'], ['hostname'])
+        nodelist = self.api.driver.GetNodes(slice['node_ids'], ['hostname'])
         hostnames = [node['hostname'] for node in nodelist]
 
         # get netspec details
@@ -597,9 +640,9 @@ class Slices:
 
         try:
             if peer:
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', slice['slice_id'], peer)
+                self.api.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
 
-            self.api.plshell.AddSliceToNodes(self.api.plauth, slicename, added_nodes) 
+            self.api.driver.AddSliceToNodes(slicename, added_nodes) 
 
             # Add recognized slice tags
             for node_name in node_names:
@@ -609,12 +652,12 @@ class Slices:
                     if (isinstance(value, list)):
                         value = value[0]
 
-                    self.api.plshell.AddSliceTag(self.api.plauth, slicename, slice_tag, value, node_name)
+                    self.api.driver.AddSliceTag(slicename, slice_tag, value, node_name)
 
-            self.api.plshell.DeleteSliceFromNodes(self.api.plauth, slicename, deleted_nodes)
+            self.api.driver.DeleteSliceFromNodes(slicename, deleted_nodes)
         finally:
             if peer:
-                self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', slice['slice_id'], peer, slice['peer_slice_id'])
+                self.api.driver.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
 
         return 1
 
diff --git a/sfa/plc/vlink.py b/sfa/plc/vlink.py
new file mode 100644 (file)
index 0000000..625963d
--- /dev/null
@@ -0,0 +1,115 @@
+import re
+from sfa.util.xrn import Xrn
+# Taken from bwlimit.py
+#
+# See tc_util.c and http://physics.nist.gov/cuu/Units/binary.html. Be
+# warned that older versions of tc interpret "kbps", "mbps", "mbit",
+# and "kbit" to mean (in this system) "kibps", "mibps", "mibit", and
+# "kibit" and that if an older version is installed, all rates will
+# be off by a small fraction.
+suffixes = {
+    "":         1,
+    "bit":  1,
+    "kibit":    1024,
+    "kbit": 1000,
+    "mibit":    1024*1024,
+    "mbit": 1000000,
+    "gibit":    1024*1024*1024,
+    "gbit": 1000000000,
+    "tibit":    1024*1024*1024*1024,
+    "tbit": 1000000000000,
+    "bps":  8,
+    "kibps":    8*1024,
+    "kbps": 8000,
+    "mibps":    8*1024*1024,
+    "mbps": 8000000,
+    "gibps":    8*1024*1024*1024,
+    "gbps": 8000000000,
+    "tibps":    8*1024*1024*1024*1024,
+    "tbps": 8000000000000
+}
+
+
+def get_tc_rate(s):
+    """
+    Parses an integer or a tc rate string (e.g., 1.5mbit) into bits/second
+    """
+
+    if type(s) == int:
+        return s
+    m = re.match(r"([0-9.]+)(\D*)", s)
+    if m is None:
+        return -1
+    suffix = m.group(2).lower()
+    if suffixes.has_key(suffix):
+        return int(float(m.group(1)) * suffixes[suffix])
+    else:
+        return -1
+
+def format_tc_rate(rate):
+    """
+    Formats a bits/second rate into a tc rate string
+    """
+
+    if rate >= 1000000000 and (rate % 1000000000) == 0:
+        return "%.0fgbit" % (rate / 1000000000.)
+    elif rate >= 1000000 and (rate % 1000000) == 0:
+        return "%.0fmbit" % (rate / 1000000.)
+    elif rate >= 1000:
+        return "%.0fkbit" % (rate / 1000.)
+    else:
+        return "%.0fbit" % rate
+
+class VLink:
+    @staticmethod
+    def get_link_id(if1, if2):
+        if if1['id'] < if2['id']:
+            link = (if1['id']<<7) + if2['id']
+        else:
+            link = (if2['id']<<7) + if1['id']
+        return link
+
+    @staticmethod
+    def get_iface_id(if1, if2):
+        if if1['id'] < if2['id']:
+            iface = 1
+        else:
+            iface = 2
+        return iface
+
+    @staticmethod
+    def get_virt_ip(if1, if2):
+        link_id = VLink.get_link_id(if1, if2)
+        iface_id = VLink.get_iface_id(if1, if2)
+        first = link_id >> 6
+        second = ((link_id & 0x3f)<<2) + iface_id
+        return "192.168.%d.%s" % (first, second)
+
+    @staticmethod
+    def get_virt_net(link):
+        link_id = VLink.get_link_id(link['interface1'], link['interface2'])
+        first = link_id >> 6
+        second = (link_id & 0x3f)<<2
+        return "192.168.%d.%d/30" % (first, second)
+
+    @staticmethod
+    def get_interface_id(interface):
+        if_name = Xrn(interface['component_id']).get_leaf()
+        node, dev = if_name.split(":")
+        node_id = int(node.replace("node", ""))
+        return node_id
+
+        
+    @staticmethod
+    def get_topo_rspec(link, ipaddr):
+        link['interface1']['id'] = VLink.get_interface_id(link['interface1'])
+        link['interface2']['id'] = VLink.get_interface_id(link['interface2'])
+        my_ip = VLink.get_virt_ip(link['interface1'], link['interface2'])
+        remote_ip = VLink.get_virt_ip(link['interface2'], link['interface1'])
+        net = VLink.get_virt_net(link)
+        bw = format_tc_rate(long(link['capacity']))
+        return (link['interface2']['id'], ipaddr, bw, my_ip, remote_ip, net)
+
+    @staticmethod 
+    def topo_rspec_to_link(topo_rspec):
+        pass          
diff --git a/sfa/rspecs/elements/bwlimit.py b/sfa/rspecs/elements/bwlimit.py
new file mode 100644 (file)
index 0000000..027bb5b
--- /dev/null
@@ -0,0 +1,8 @@
+from sfa.rspecs.elements.element import Element
+
+class BWlimit(Element):
+    fields = { 
+        'units': None,
+        'value': None,
+    }
+        
diff --git a/sfa/rspecs/elements/component_manager.py b/sfa/rspecs/elements/component_manager.py
new file mode 100644 (file)
index 0000000..ec9d85c
--- /dev/null
@@ -0,0 +1,7 @@
+from sfa.rspecs.elements.element import Element
+
+class ComponentManager(Element):
+    fields = {
+        'name': None,
+    }
+    
diff --git a/sfa/rspecs/elements/disk_image.py b/sfa/rspecs/elements/disk_image.py
new file mode 100644 (file)
index 0000000..3a810a5
--- /dev/null
@@ -0,0 +1,4 @@
+from sfa.rspecs.elements.element import Element
+
+class DiskImage(Element):
+    fields = {}        
index 8217c11..6757f8a 100644 (file)
@@ -1,85 +1,9 @@
-from lxml import etree
+class Element(dict):
 
-class Element:
-    def __init__(self, root_node, namespaces = None):
-        self.root_node = root_node
-        self.namespaces = namespaces
+    fields = {}
 
-    def xpath(self, xpath):
-        return this.root_node.xpath(xpath, namespaces=self.namespaces) 
+    def __init__(self, fields={}, element=None):
+        self.element = element
+        dict.__init__(self, self.fields) 
+        self.update(fields)
 
-    def add_element(self, name, attrs={}, parent=None, text=""):
-        """
-        Generic wrapper around etree.SubElement(). Adds an element to
-        specified parent node. Adds element to root node is parent is
-        not specified.
-        """
-        if parent == None:
-            parent = self.root_node
-        element = etree.SubElement(parent, name)
-        if text:
-            element.text = text
-        if isinstance(attrs, dict):
-            for attr in attrs:
-                element.set(attr, attrs[attr])
-        return element
-
-    def remove_element(self, element_name, root_node = None):
-        """
-        Removes all occurences of an element from the tree. Start at
-        specified root_node if specified, otherwise start at tree's root.
-        """
-        if not root_node:
-            root_node = self.root_node
-
-        if not element_name.startswith('//'):
-            element_name = '//' + element_name
-
-        elements = root_node.xpath('%s ' % element_name, namespaces=self.namespaces)
-        for element in elements:
-            parent = element.getparent()
-            parent.remove(element)
-
-    
-    def add_attribute(self, elem, name, value):
-        """
-        Add attribute to specified etree element
-        """
-        opt = etree.SubElement(elem, name)
-        opt.text = value
-
-    def remove_attribute(self, elem, name, value):
-        """
-        Removes an attribute from an element
-        """
-        if not elem == None:
-            opts = elem.iterfind(name)
-            if opts is not None:
-                for opt in opts:
-                    if opt.text == value:
-                        elem.remove(opt)
-
-    def get_attributes(self, elem=None, depth=None):
-        if elem == None:
-            elem = self.root_node
-        attrs = dict(elem.attrib)
-        attrs['text'] = str(elem.text).strip()
-        if depth is None or isinstance(depth, int) and depth > 0: 
-            for child_elem in list(elem):
-                key = str(child_elem.tag)
-                if key not in attrs:
-                    attrs[key] = [self.get_attributes(child_elem, recursive)]
-                else:
-                    attrs[key].append(self.get_attributes(child_elem, recursive))
-        return attrs
-    
-    def attributes_list(self, elem):
-        # convert a list of attribute tags into list of tuples
-        # (tagnme, text_value)
-        opts = []
-        if not elem == None:
-            for e in elem:
-                opts.append((e.tag, e.text))
-        return opts
-
-    
diff --git a/sfa/rspecs/elements/execute.py b/sfa/rspecs/elements/execute.py
new file mode 100644 (file)
index 0000000..43e6e62
--- /dev/null
@@ -0,0 +1,7 @@
+from sfa.rspecs.elements.element import Element
+
+class Execute(Element):
+    fields = {
+        'shell': None,
+        'command': None,
+    }
diff --git a/sfa/rspecs/elements/hardware_type.py b/sfa/rspecs/elements/hardware_type.py
new file mode 100644 (file)
index 0000000..8dd959c
--- /dev/null
@@ -0,0 +1,7 @@
+from sfa.rspecs.elements.element import Element
+
+class HardwareType(Element):
+    
+    fields = {
+        'name': None,
+    }        
diff --git a/sfa/rspecs/elements/install.py b/sfa/rspecs/elements/install.py
new file mode 100644 (file)
index 0000000..1df60b6
--- /dev/null
@@ -0,0 +1,8 @@
+from sfa.rspecs.elements.element import Element
+class Install(Element):
+    fields = {
+        'file_type': None,
+        'url': None,
+        'install_path': None,
+    }
diff --git a/sfa/rspecs/elements/interface.py b/sfa/rspecs/elements/interface.py
new file mode 100644 (file)
index 0000000..7617ade
--- /dev/null
@@ -0,0 +1,12 @@
+from sfa.rspecs.elements.element import Element
+
+class Interface(Element):
+    fields = {'component_id': None,
+              'role': None,
+              'client_id': None,
+              'ipv4': None,
+              'bwlimit': None,
+              'node_id': None,
+              'interface_id': None
+    
+    }    
index 682232e..02a8d10 100644 (file)
@@ -1,14 +1,16 @@
-from sfa.rspec.elements.interface import Interface
+from sfa.rspecs.elements.element import Element    
 
-class Link:
-    def __init__(self):
-        self.component_id = None
-        self.component_name = None
-        self.component_manager_id = None
-        self.type = None
-        self.endpoint1 = Interface()
-        self.endpoint2 = Interface()
-        self.capacity = None
-        self.latency = None
-        self.packet_loss = None
-        self.description = None
+class Link(Element):
+    fields = {
+        'client_id': None, 
+        'component_id': None,
+        'component_name': None,
+        'component_manager': None,
+        'type': None,
+        'interface1': None,
+        'interface2': None,
+        'capacity': None,
+        'latency': None,
+        'packet_loss': None,
+        'description': None,
+    }
diff --git a/sfa/rspecs/elements/link_type.py b/sfa/rspecs/elements/link_type.py
new file mode 100644 (file)
index 0000000..882903d
--- /dev/null
@@ -0,0 +1,6 @@
+from sfa.rspecs.elements.element import Element
+
+class LinkType(Element):
+    fields = {
+        'name': None,
+    } 
diff --git a/sfa/rspecs/elements/location.py b/sfa/rspecs/elements/location.py
new file mode 100644 (file)
index 0000000..a5a9260
--- /dev/null
@@ -0,0 +1,9 @@
+from sfa.rspecs.elements.element import Element
+
+class Location(Element):
+    
+    fields = {
+        'country': None,
+        'longitude': None,
+        'latitude': None,
+    }
diff --git a/sfa/rspecs/elements/login.py b/sfa/rspecs/elements/login.py
new file mode 100644 (file)
index 0000000..a64c759
--- /dev/null
@@ -0,0 +1,8 @@
+from sfa.rspecs.elements.element import Element
+
+class Login(Element):
+    fields = {
+        'authentication': None,
+        'hostname': None,
+        'port': None
+    }
index 6a358a4..362b9ff 100644 (file)
@@ -1,11 +1,9 @@
 from sfa.rspecs.elements.element import Element
-from sfa.util.sfalogging import logger
-
 class Network(Element):
-
-    def get_networks(*args, **kwds):
-        logger.info("sfa.rspecs.networks: get_networks not implemented")
-
-    def add_networks(*args, **kwds):
-        logger.info("sfa.rspecs.networks: add_network not implemented")
-        
+    
+    fields = {
+        'name': None,
+    }
+                
+      
index db6e119..f90fff1 100644 (file)
@@ -1,13 +1,26 @@
 from sfa.rspecs.elements.element import Element
-from sfa.util.faults import SfaNotImplemented 
-from sfa.util.sfalogging import logger
  
 class Node(Element):
-
-    def get_nodes(*args):
-        logger.info("sfa.rspecs.nodes: get_nodes not implemented") 
     
-    def add_nodes(*args):
-        logger.info("sfa.rspecs.nodes: add_nodes not implemented") 
+    fields = {
+        'component_id': None,
+        'component_name': None,
+        'component_manager_id': None,
+        'client_id': None,
+        'sliver_id': None,
+        'authority_id': None,    
+        'exclusive': None,
+        'location': None,
+        'bw_unallocated': None,
+        'bw_limit': None,
+        'boot_state': None,    
+        'slivers': [],
+        'hardware_types': [],
+        'disk_images': [],
+        'interfaces': [],
+        'services': [],
+        'tags': [],
+        'pl_initscripts': [],
+    }
                 
       
diff --git a/sfa/rspecs/elements/pltag.py b/sfa/rspecs/elements/pltag.py
new file mode 100644 (file)
index 0000000..51b1e76
--- /dev/null
@@ -0,0 +1,9 @@
+from sfa.rspecs.elements.element import Element
+
+class PLTag(Element):
+
+    fields = {
+        'name': None,
+        'value': None,
+    }
+        
diff --git a/sfa/rspecs/elements/property.py b/sfa/rspecs/elements/property.py
new file mode 100644 (file)
index 0000000..97a1ffc
--- /dev/null
@@ -0,0 +1,12 @@
+from sfa.rspecs.elements.element import Element
+
+class Property(Element):
+    
+    fields = {
+        'source_id': None,
+        'dest_id': None,
+        'capacity': None,
+        'latency': None,
+        'packet_loss': None,
+    }
+       
diff --git a/sfa/rspecs/elements/services.py b/sfa/rspecs/elements/services.py
new file mode 100644 (file)
index 0000000..a48be27
--- /dev/null
@@ -0,0 +1,10 @@
+from sfa.rspecs.elements.element import Element
+
+class Services(Element):
+
+    fields = {
+        'install': [],
+        'execute': [],
+        'login': [],
+    }
+
index 67105dc..bf2cc1f 100644 (file)
@@ -1,29 +1,9 @@
 from sfa.rspecs.elements.element import Element
-from sfa.util.sfalogging import logger
 
-class Slivers(Element):
-
-    def get_slivers(*args, **kwds):
-        logger.debug("sfa.rspecs.slivers: get_slivers not implemented")
-
-    def add_slivers(*args, **kwds):
-        logger.debug("sfa.rspecs.slivers: add_slivers not implemented")
-
-    def remove_slivers(*args, **kwds):
-        logger.debug("sfa.rspecs.slivers: remove_slivers not implemented")
-
-    def get_sliver_defaults(*args, **kwds):    
-        logger.debug("sfa.rspecs.slivers: get_sliver_defaults not implemented")
-    
-    def add_default_sliver_attribute(*args, **kwds):
-        logger.debug("sfa.rspecs.slivers: add_default_sliver_attributes not implemented")
-
-    def add_sliver_attribute(*args, **kwds):
-        logger.debug("sfa.rspecs.slivers: add_sliver_attribute not implemented")
-
-    def remove_default_sliver_attribute(*args, **kwds):
-        logger.debug("sfa.rspecs.slivers: remove_default_sliver_attributes not implemented")
-
-    def remove_sliver_attribute(*args, **kwds):
-        logger.debuv("sfa.rspecs.slivers: remove_sliver_attribute not implemented")
-        
+class Sliver(Element):
+    fields = {
+        'sliver_id': None,
+        'client_id': None,
+        'name': None,
+        'tags': [],
+    }
diff --git a/sfa/rspecs/elements/tag.py b/sfa/rspecs/elements/tag.py
new file mode 100644 (file)
index 0000000..8b13789
--- /dev/null
@@ -0,0 +1 @@
+
diff --git a/sfa/rspecs/elements/versions/__init__.py b/sfa/rspecs/elements/versions/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sfa/rspecs/elements/versions/element_version.py b/sfa/rspecs/elements/versions/element_version.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sfa/rspecs/elements/versions/pgv2Link.py b/sfa/rspecs/elements/versions/pgv2Link.py
new file mode 100644 (file)
index 0000000..db28f6c
--- /dev/null
@@ -0,0 +1,120 @@
+from lxml import etree
+from sfa.util.plxrn import PlXrn
+from sfa.util.xrn import Xrn
+from sfa.rspecs.elements.link import Link
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.link_type import LinkType
+from sfa.rspecs.elements.component_manager import ComponentManager
+from sfa.rspecs.elements.property import Property    
+from sfa.rspecs.rspec_elements import RSpecElement, RSpecElements
+
+class PGv2Link:
+    elements = {
+        'link': RSpecElement(RSpecElements.LINK, '//default:link | //link'),
+        'component_manager': RSpecElement(RSpecElements.COMPONENT_MANAGER, './default:component_manager | ./component_manager'),
+        'link_type': RSpecElement(RSpecElements.LINK_TYPE, './default:link_type | ./link_type'),
+        'property': RSpecElement(RSpecElements.PROPERTY, './default:property | ./property'),
+        'interface_ref': RSpecElement(RSpecElements.INTERFACE_REF, './default:interface_ref | ./interface_ref'), 
+    }
+    
+    @staticmethod
+    def add_links(xml, links):
+        for link in links:
+            link_elem = etree.SubElement(xml, 'link')
+            for attrib in ['component_name', 'component_id', 'client_id']:
+                if attrib in link and link[attrib] is not None:
+                    link_elem.set(attrib, link[attrib])
+            if 'component_manager' in link and link['component_manager']:
+                cm_element = etree.SubElement(link_elem, 'component_manager', name=link['component_manager'])
+            for if_ref in [link['interface1'], link['interface2']]:
+                if_ref_elem = etree.SubElement(link_elem, 'interface_ref')
+                for attrib in Interface.fields:
+                    if attrib in if_ref and if_ref[attrib]:
+                        if_ref_elem.attrib[attrib] = if_ref[attrib]  
+            prop1 = etree.SubElement(link_elem, 'property', source_id = link['interface1']['component_id'],
+                dest_id = link['interface2']['component_id'], capacity=link['capacity'], 
+                latency=link['latency'], packet_loss=link['packet_loss'])
+            prop2 = etree.SubElement(link_elem, 'property', source_id = link['interface2']['component_id'],
+                dest_id = link['interface1']['component_id'], capacity=link['capacity'], 
+                latency=link['latency'], packet_loss=link['packet_loss'])
+            if 'type' in link and link['type']:
+                type_elem = etree.SubElement(link_elem, 'link_type', name=link['type'])             
+    @staticmethod 
+    def get_links(xml):
+        links = []
+        link_elems = xml.xpath(PGv2Link.elements['link'].path, namespaces=xml.namespaces)
+        for link_elem in link_elems:
+            # set client_id, component_id, component_name
+            link = Link(link_elem.attrib, link_elem)
+            # set component manager
+            cm = link_elem.xpath('./default:component_manager', namespaces=xml.namespaces)
+            if len(cm) >  0:
+                cm = cm[0]
+                if  'name' in cm.attrib:
+                    link['component_manager'] = cm.attrib['name'] 
+            # set link type
+            link_types = link_elem.xpath(PGv2Link.elements['link_type'].path, namespaces=xml.namespaces)
+            if len(link_types) > 0:
+                link_type = link_types[0]
+                if 'name' in link_type.attrib:
+                    link['type'] = link_type.attrib['name']
+          
+            # get capacity, latency and packet_loss from first property  
+            props = link_elem.xpath(PGv2Link.elements['property'].path, namespaces=xml.namespaces)
+            if len(props) > 0:
+                prop = props[0]
+                for attrib in ['capacity', 'latency', 'packet_loss']:
+                    if attrib in prop.attrib:
+                        link[attrib] = prop.attrib[attrib]
+                             
+            # get interfaces 
+            if_elems = link_elem.xpath(PGv2Link.elements['interface_ref'].path, namespaces=xml.namespaces)
+            ifs = []
+            for if_elem in if_elems:
+                if_ref = Interface(if_elem.attrib, if_elem)
+                ifs.append(if_ref)
+            if len(ifs) > 1:
+                link['interface1'] = ifs[0]
+                link['interface2'] = ifs[1] 
+            links.append(link)
+        return links 
+
+    @staticmethod
+    def add_link_requests(xml, link_tuples, append=False):
+        if not isinstance(link_tuples, set):
+            link_tuples = set(link_tuples)
+
+        available_links = PGv2Link.get_links(xml)
+        recently_added = []
+        for link in available_links:
+            if_name1 =  Xrn(link['interface1']['component_id']).get_leaf()
+            if_name2 =  Xrn(link['interface2']['component_id']).get_leaf()
+             
+            requested_link = None
+            l_tup_1 = (if_name1, if_name2)
+            l_tup_2 = (if_name2, if_name1)
+            if link_tuples.issuperset([(if_name1, if_name2)]):
+                requested_link = (if_name1, if_name2)        
+            elif link_tuples.issuperset([(if_name2, if_name2)]):
+                requested_link = (if_name2, if_name1)
+            if requested_link:
+                # add client id to link ane interface elements 
+                link.element.set('client_id', link['component_name'])
+                link['interface1'].element.set('client_id', Xrn(link['interface1']['component_id']).get_leaf()) 
+                link['interface2'].element.set('client_id', Xrn(link['interface2']['component_id']).get_leaf()) 
+                recently_added.append(link['component_name'])
+
+        if not append:
+            # remove all links that don't have a client id 
+            for link in PGv2Link.get_links(xml):
+                if not link['client_id'] or link['component_name'] not in recently_added:
+                    parent = link.element.getparent()
+                    parent.remove(link.element)                  
+             
+    @staticmethod
+    def get_link_requests(xml):
+        link_requests = []
+        for link in PGv2Link.get_links(xml):
+            if link['client_id'] != None:
+                link_requests.append(link)
+        return link_requests           
diff --git a/sfa/rspecs/elements/versions/pgv2Node.py b/sfa/rspecs/elements/versions/pgv2Node.py
new file mode 100644 (file)
index 0000000..b293b04
--- /dev/null
@@ -0,0 +1,147 @@
+
+from lxml import etree
+from sfa.util.plxrn import PlXrn
+from sfa.util.xrn import Xrn
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.network import Network
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.disk_image import DiskImage
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.bwlimit import BWlimit
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.rspecs.rspec_elements import RSpecElement, RSpecElements
+from sfa.rspecs.elements.versions.pgv2Service import PGv2Service     
+
+class PGv2Node:
+    elements = {
+        'node': RSpecElement(RSpecElements.NODE, '//default:node | //node'),
+        'sliver': RSpecElement(RSpecElements.SLIVER, './default:sliver_type | ./sliver_type'),
+        'interface': RSpecElement(RSpecElements.INTERFACE, './default:interface | ./interface'),
+        'location': RSpecElement(RSpecElements.LOCATION, './default:location | ./location'),
+        'hardware_type': RSpecElement(RSpecElements.HARDWARE_TYPE, './default:hardware_type | ./hardware_type'),
+        'available': RSpecElement(RSpecElements.AVAILABLE, './default:available | ./available'),
+    } 
+    
+    @staticmethod
+    def add_nodes(xml, nodes):
+        node_elems = []
+        for node in nodes:
+            node_elem = etree.SubElement(xml, 'node')
+            node_elems.append(node_elem)
+            if node.get('component_manager_id'):
+                node_elem.set('component_manager_id', node['component_manager_id'])
+            if node.get('component_id'):
+                node_elem.set('component_id', node['component_id'])
+                component_name = Xrn(node['component_id']).get_leaf()
+                node_elem.set('component_nama', component_name)
+            if node.get('client_id'):
+                node_elem.set('client_id', node['client_id'])
+            if node.get('sliver_id'):
+                node_elem.set('sliver_id', node['sliver_id'])
+            if node.get('exclusive'):
+                node_elem.set('exclusive', node['exclusive'])
+            hardware_types = node.get('hardware_type', [])
+            for hardware_type in hardware_types:
+                hw_type_elem = etree.SubElement(node_elem, 'hardware_type')
+                if hardware_type.get('name'):
+                    hw_type_elem.set('name', hardware_type['name'])
+            if node.get('boot_state', '').lower() == 'boot':
+                available_elem = etree.SubElement(node_elem, 'available', now='True')
+            else:
+                available_elem = etree.SubElement(node_elem, 'available', now='False')
+            
+            if node.get('services'):
+                PGv2Services.add_services(node_elem, node.get('services'))
+    
+            slivers = node.get('slivers', [])
+            pl_initscripts = node.get('pl_initscripts', {})
+            for sliver in slivers:
+                sliver_elem = etree.SubElement(node_elem, 'sliver_type')
+                if sliver.get('name'):
+                    sliver_elem.set('name', sliver['name'])
+                if sliver.get('client_id'):
+                    sliver_elem.set('client_id', sliver['client_id'])      
+                for pl_initscript in pl_initscripts.values():
+                    etree.SubElement(sliver_elem, '{%s}initscript' % xml.namespaces['planetlab'], \
+                      name=pl_initscript['name'])
+            location = node.get('location')
+            #only add locaiton if long and lat are not null
+            if location.get('longitute') and location.get('latitude'):
+                location_elem = etree.SubElement(node_elem, country=location['country'],
+                  latitude=location['latitude'], longitude=location['longiutde'])
+        return node_elems
+
+    @staticmethod
+    def get_nodes(xml):
+        nodes = []
+        node_elems = xml.xpath(PGv2Node.elements['node'].path)
+        for node_elem in node_elems:
+            node = Node(node_elem.attrib, node_elem)
+            nodes.append(node) 
+            if 'component_id' in node_elem.attrib:
+                node['authority_id'] = Xrn(node_elem.attrib['component_id']).get_authority_urn()
+
+            # set hardware type
+            node['hardware_types'] = []
+            hardware_type_elems = node_elem.xpath(PGv2Node.elements['hardware_type'].path, xml.namespaces)
+            for hardware_type_elem in hardware_type_elems:
+                node['hardware_types'].append(HardwareType(hardware_type_elem.attrib, hardware_type_elem))
+            
+            # set location
+            location_elems = node_elem.xpath(PGv2Node.elements['location'].path, xml.namespaces)
+            if len(location_elems) > 0:
+                node['location'] = Location(location_elems[0].attrib, location_elems[0])
+            
+            # set services
+            services_elems = node_elem.xpath(PGv2Service.elements['services'].path, xml.namespaces)
+            node['services'] = []
+            for services_elem in services_elems:
+                # services element has no useful info, but the child elements do  
+                for child in services_elem.iterchildren():
+                    pass
+                    
+            # set interfaces
+            interface_elems = node_elem.xpath(PGv2Node.elements['interface'].path, xml.namespaces)
+            node['interfaces'] = []
+            for interface_elem in interface_elems:
+                node['interfaces'].append(Interface(interface_elem.attrib, interface_elem))
+
+            # set available
+            available = node_elem.xpath(PGv2Node.elements['available'].path, xml.namespaces)
+            if len(available) > 0:
+                if available[0].attrib.get('now', '').lower() == 'true': 
+                    node['boot_state'] = 'boot'
+                else: 
+                    node['boot_state'] = 'disabled' 
+
+            # set the slivers
+            sliver_elems = node_elem.xpath(PGv2Node.elements['sliver'].path, xml.namespaces)
+            node['slivers'] = []
+            for sliver_elem in sliver_elems:
+                node['slivers'].append(Sliver(sliver_elem.attrib, sliver_elem))            
+
+        return nodes
+
+
+    @staticmethod
+    def add_slivers(xml, slivers):
+        pass
+   
+    @staticmethod
+    def get_nodes_with_slivers(xml):
+        nodes = PGv2Node.get_nodes(xml)
+        nodes_with_slivers = [node for node in nodes if node['slivers']]
+        return nodes_with_slivers 
+
+if __name__ == '__main__':
+    from sfa.rspecs.rspec import RSpec
+    import pdb
+    r = RSpec('/tmp/emulab.rspec')
+    r2 = RSpec(version = 'ProtoGENI')
+    nodes = PGv2Node.get_nodes(r.xml)
+    PGv2Node.add_nodes(r2.xml.root, nodes)
+    #pdb.set_trace()
+        
+                                    
diff --git a/sfa/rspecs/elements/versions/pgv2Services.py b/sfa/rspecs/elements/versions/pgv2Services.py
new file mode 100644 (file)
index 0000000..fe946b3
--- /dev/null
@@ -0,0 +1,65 @@
+from lxml import etree
+from sfa.util.plxrn import PlXrn
+from sfa.util.xrn import Xrn
+from sfa.rspecs.elements.execute import Execute  
+from sfa.rspecs.elements.install import Install  
+from sfa.rspecs.elements.login import Login
+from sfa.rspecs.rspec_elements import RSpecElement, RSpecElements
+
+class PGv2Services:
+    elements = {
+        'services': RSpecElement(RSpecElements.SERVICES, '//default:services | //services'),
+        'install': RSpecElement(RSpecElements.INSTALL, './default:install | ./install'),
+        'execute': RSpecElement(RSpecElements.EXECUTE, './default:execute | ./execute'),
+        'login': RSpecElement(RSpecElements.LOGIN, './default:login | ./login'),
+    }  
+    
+    @staticmethod
+    def add_services(xml, services):
+        for service in services:
+            service_elem = etree.SubElement(xml, 'service')
+            for install in service.get('install', []):
+                install_elem = etree.SubElement(service_elem, 'install')
+                for field in Install.fields:
+                    if field in install:
+                        install_elem.set(field, install[field])
+            for execute in service.get('execute', []):
+                execute_elem = etree.SubElement(service_elem, 'execute')
+                for field in Execute.fields:
+                    if field in execute:
+                        execute_elem.set(field, execute[field])
+            for login in service.get('login', []):
+                login_elem = etree.SubElement(service_elem, 'login')
+                for field in Login.fields:
+                    if field in login:
+                        login_elem.set(field, login[field]) 
+
+              
+    @staticmethod
+    def get_services(xml):
+        services = []
+        for services_elem in xml.xpath(PGv2Services.elements['services'].path):
+            service = Services(services_elem.attrib, services_elem)
+            
+            # get install elements
+            service['install'] = []
+            for install_elem in xml.xpath(PGv2Services.elements['install'].path):
+                install = Install(install_elem.attrib, install_elem)
+                service['install'].append(install)
+            
+            # get execute elements
+            service['execute'] = []
+            for execute_elem in xml.xpath(PGv2Services.elements['execute'].path):
+                execute = Execute(execute_elem.attrib, execute_elem)
+                service['execute'].append(execute)
+
+            # get login elements
+            service['login'] = []
+            for login_elem in xml.xpath(PGv2Services.elements['login'].path):
+                login = Login(login_elem.attrib, login_elem)
+                service['login'].append(login)             
+
+            services.append(service)  
+        return services
+
diff --git a/sfa/rspecs/elements/versions/pgv2SliverType.py b/sfa/rspecs/elements/versions/pgv2SliverType.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sfa/rspecs/elements/versions/sfav1Network.py b/sfa/rspecs/elements/versions/sfav1Network.py
new file mode 100644 (file)
index 0000000..b529ad5
--- /dev/null
@@ -0,0 +1,32 @@
+
+
+from lxml import etree
+from sfa.util.plxrn import PlXrn
+from sfa.util.xrn import Xrn
+from sfa.rspecs.rspec_elements import RSpecElement, RSpecElements
+
+class SFAv1Network:
+    elements = {
+        'network': RSpecElement(RSpecElements.NETWORK, '//network'),
+    }
+
+    @staticmethod
+    def add_network(xml, network):
+        found = False
+        network_objs = SFAv1Network.get_networks(xml)
+        for network_obj in network_objs:
+            if network_obj['name'] == network['name']:
+                found = True
+                network_elem = network_obj.element
+        if not found:
+            network_elem = etree.SubElement(xml, 'network', name = network['name'])
+        return network_elem  
+    
+    @staticmethod
+    def get_networks(xml):
+        networks = []
+        network_elems = xml.xpath(SFAv1Network.elements['network'].path)
+        for network_elem in network_elems:
+            network = Network({'name': network_elem.attrib.get('name', None)}, network_elem)
+            networks.append(network)
+        return networks
diff --git a/sfa/rspecs/elements/versions/sfav1Node.py b/sfa/rspecs/elements/versions/sfav1Node.py
new file mode 100644 (file)
index 0000000..0b67c91
--- /dev/null
@@ -0,0 +1,142 @@
+
+from lxml import etree
+from sfa.util.plxrn import PlXrn
+from sfa.util.xrn import Xrn
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.network import Network 
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.disk_image import DiskImage
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.bwlimit import BWlimit
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.rspecs.rspec_elements import RSpecElement, RSpecElements
+from sfa.rspecs.elements.versions.sfav1Network import SFAv1Network
+from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
+
+class SFAv1Node:
+
+    elements = {
+        'node': RSpecElement(RSpecElements.NODE, '//default:node | //node'),
+        'sliver': RSpecElement(RSpecElements.SLIVER, './default:sliver | ./sliver'),
+        'interface': RSpecElement(RSpecElements.INTERFACE, './default:interface | ./interface'),
+        'location': RSpecElement(RSpecElements.LOCATION, './default:location | ./location'),
+        'bw_limit': RSpecElement(RSpecElements.BWLIMIT, './default:bw_limit | ./bw_limit'),
+    }
+    
+    @staticmethod
+    def add_nodes(xml, nodes):
+        network_elems = SFAv1Network.get_networks(xml)
+        if len(network_elems) > 0:
+            network_elem = network_elems[0]
+        elif len(nodes) > 0 and nodes[0].get('component_manager_id'):
+            network_elem = SFAv1Network.add_network(xml.root, {'name': nodes[0]['component_manager_id']})
+            
+
+        node_elems = []       
+        for node in nodes:
+            node_elem = etree.SubElement(network_elem, 'node')
+            node_elems.append(node_elem)
+            network = None 
+            if 'component_manager_id' in node and node['component_manager_id']:
+                node_elem.set('component_manager_id', node['component_manager_id'])
+                network = Xrn(node['component_manager_id']).get_hrn()
+            if 'component_id' in node and node['component_id']:
+                node_elem.set('component_id', node['component_id'])
+                xrn = Xrn(node['component_id'])
+                node_elem.set('component_name', xrn.get_leaf())
+                hostname_tag = etree.SubElement(node_elem, 'hostname').text = xrn.get_leaf()
+            if 'authority_id' in node and node['authority_id']:
+                node_elem.set('site_id', node['authority_id'])
+            if 'boot_state' in node and node['boot_state']:
+                node_elem.set('boot_state', node['boot_state'])
+            if 'location' in node and node['location']:
+                location_elem = etree.SubElement(node_elem, 'location')
+                for field in Location.fields:
+                    if field in node['location'] and node['location'][field]:
+                        location_elem.set(field, node['location'][field])
+            if 'interfaces' in node and node['interfaces']:
+                i = 0
+                for interface in node['interfaces']:
+                    if 'bwlimit' in interface and interface['bwlimit']:
+                        bwlimit = etree.SubElement(node_elem, 'bw_limit', units='kbps').text = str(interface['bwlimit']/1000)
+                    comp_id = PlXrn(auth=network, interface='node%s:eth%s' % (interface['node_id'], i)).get_urn()
+                    ipaddr = interface['ipv4']
+                    interface_elem = etree.SubElement(node_elem, 'interface', component_id=comp_id, ipv4=ipaddr)
+                    i+=1
+            if 'bw_unallocated' in node and node['bw_unallocated']:
+                bw_unallocated = etree.SubElement(node_elem, 'bw_unallocated', units='kbps').text = str(int(node['bw_unallocated'])/1000)
+
+            if node.get('services'):
+                PGv2Services.add_services(node_elem, node.get('services'))
+
+            if 'tags' in node:
+                for tag in node['tags']:
+                   # expose this hard wired list of tags, plus the ones that are marked 'sfa' in their category
+                   if tag['name'] in ['fcdistro', 'arch']:
+                        tag_element = etree.SubElement(node_elem, tag['name']).text=tag['value']
+
+            if node.get('slivers'):
+                for sliver in node['slivers']:
+                    sliver_elem = etree.SubElement(node_elem, 'sliver')
+                    if sliver.get('sliver_id'): 
+                        sliver_id_leaf = Xrn(sliver.get('sliver_id')).get_leaf()
+                        sliver_id_parts = sliver_id_leaf.split(':')
+                        name = sliver_id_parts[0] 
+                        sliver_elem.set('name', name) 
+
+    @staticmethod 
+    def add_slivers(xml, slivers):
+        pass
+
+    @staticmethod
+    def get_nodes(xml):
+        nodes = []
+        node_elems = xml.xpath(SFAv1Node.elements['node'].path)
+        for node_elem in node_elems:
+            node = Node(node_elem.attrib, node_elem)
+            if 'site_id' in node_elem.attrib:
+                node['authority_id'] = node_elem.attrib['site_id']
+            if 'authority_id' in node_elem.attrib:
+                node['authority_id'] = node_elem.attrib['authority_id']
+            # set the location
+            location_elems = node_elem.xpath(SFAv1Node.elements['location'].path, xml.namespaces)
+            if len(location_elems) > 0:
+                node['location'] = Location(location_elems[0].attrib, location_elems[0])
+            
+            # set the bwlimit
+            bwlimit_elems = node_elem.xpath(SFAv1Node.elements['bw_limit'].path, xml.namespaces)
+            if len(bwlimit_elems) > 0:
+                bwlimit = BWlimit(bwlimit_elems[0].attrib, bwlimit_elems[0])
+                node['bwlimit'] = bwlimit
+
+            # set the interfaces
+            interface_elems = node_elem.xpath(SFAv1Node.elements['interface'].path, xml.namespaces)
+            node['interfaces'] = []
+            for interface_elem in interface_elems:
+                node['interfaces'].append(Interface(interface_elem.attrib, interface_elem))
+            
+            # set the slivers
+            sliver_elems = node_elem.xpath(SFAv1Node.elements['sliver'].path, xml.namespaces)
+            node['slivers'] = []
+            for sliver_elem in sliver_elems:
+                node['slivers'].append(Sliver(sliver_elem.attrib, sliver_elem))
+            
+            # set tags
+            node['tags'] = [] 
+            for child in node_elem.iterchildren():
+                if child.tag not in SFAv1Node.elements:
+                    tag = PLTag({'name': child.tag, 'value': child.text}, child)  
+                    node['tags'].append(tag) 
+            nodes.append(node)
+        return nodes
+        
+    @staticmethod
+    def get_nodes_with_slivers(xml):
+        nodes = SFAv1Node.get_nodes(xml)
+        nodes_with_slivers = [node for node in nodes if node['slivers']]
+        return nodes_with_slivers
+    
+             
diff --git a/sfa/rspecs/elements/versions/sfav1Sliver.py b/sfa/rspecs/elements/versions/sfav1Sliver.py
new file mode 100644 (file)
index 0000000..f12c977
--- /dev/null
@@ -0,0 +1,18 @@
+
+from lxml import etree
+
+from sfa.rspecs.elements.sliver import Sliver
+
+from sfa.util.xrn import Xrn
+from sfa.util.plxrn import PlXrn
+class SFAv1Sliver:
+
+    @staticmethod
+    def add_slivers(xml, slivers):
+        for sliver in slivers:
+            sliver_elem = etree.SubElement(xml, 'sliver')
+            if sliver.get('component_id'):
+                name_full = Xrn(sliver.get('component_id')).get_leaf()
+                name = name_full.split(':')
+                sliver_elem.set('name', name)
+                     
index 42e7ccd..1c57d7d 100755 (executable)
@@ -1,7 +1,7 @@
 #!/usr/bin/python 
 from lxml import etree
 from StringIO import StringIO
-from sfa.util.xrn import *
+from sfa.util.xrn import Xrn, urn_to_hrn
 from sfa.rspecs.rspec import RSpec
 from sfa.rspecs.version_manager import VersionManager
 
index 73185fe..a04ff28 100755 (executable)
@@ -1,11 +1,11 @@
 #!/usr/bin/python 
 from datetime import datetime, timedelta
-from sfa.rspecs.xml import XML, XpathFilter
-from sfa.rspecs.version_manager import VersionManager
-from sfa.util.xrn import *
-from sfa.util.plxrn import hostname_to_urn
+
+from sfa.util.xml import XML, XpathFilter
+from sfa.util.faults import InvalidRSpecElement
+
 from sfa.rspecs.rspec_elements import RSpecElement, RSpecElements 
-from sfa.util.faults import SfaNotImplemented, InvalidRSpec, InvalidRSpecElement
+from sfa.rspecs.version_manager import VersionManager
 
 class RSpec:
  
@@ -70,7 +70,7 @@ class RSpec:
 
     def get(self, element_type, filter={}, depth=0):
         elements = self.get_elements(element_type, filter)
-        elements = [self.get_element_attributes(element, depth=depth) for element in elements]
+        elements = [self.xml.get_element_attributes(elem, depth=depth) for elem in elements]
         return elements
 
     def get_elements(self, element_type, filter={}):
@@ -83,7 +83,7 @@ class RSpec:
             raise InvalidRSpecElement(element_type, extra=msg)
         rspec_element = self.get_rspec_element(element_type)
         xpath = rspec_element.path + XpathFilter.xpath(filter)
-        return self.xpath(xpath)
+        return self.xml.xpath(xpath)
 
     def merge(self, in_rspec):
         self.version.merge(in_rspec)
index 2eb0d8c..ce3cac7 100644 (file)
@@ -1,12 +1,30 @@
 from sfa.util.enumeration import Enum
 
 # recognized top level rspec elements
-RSpecElements = Enum('NETWORK', 'NODE', 'SLIVER', 'INTERFACE', 'LINK', 'VLINK')
+RSpecElements = Enum(
+    AVAILABLE='AVAILABLE',
+    BWLIMIT='BWLIMIT',
+    EXECUTE='EXECUTE',
+    NETWORK='NETWORK', 
+    COMPONENT_MANAGER='COMPONENT_MANAGER',
+    HARDWARE_TYPE='HARDWARE_TYPE', 
+    INSTALL='INSTALL', 
+    INTERFACE='INTERFACE', 
+    INTERFACE_REF='INTERFACE_REF',
+    LOCATION='LOCATION', 
+    LOGIN='LOGIN', 
+    LINK='LINK', 
+    LINK_TYPE='LINK_TYPE', 
+    NODE='NODE', 
+    PROPERTY='PROPERTY',
+    SERVICES='SERVICES',
+    SLIVER='SLIVER', 
+    SLIVER_TYPE='SLIVER_TYPE', 
+)
 
 class RSpecElement:
-    def __init__(self, element_type, name, path):
+    def __init__(self, element_type, path):
         if not element_type in RSpecElements:
             raise InvalidRSpecElement(element_type)
         self.type = element_type
-        self.name = name
         self.path = path
index 6ba56c1..7bcc787 100755 (executable)
@@ -1,8 +1,6 @@
 #!/usr/bin/python
 
-from lxml import etree
-from StringIO import StringIO
-from sfa.util.xrn import *
+from sfa.util.xrn import hrn_to_urn
 from sfa.rspecs.rspec import RSpec
 from sfa.rspecs.version_manager import VersionManager
 
index 177b32c..b57cd9b 100644 (file)
@@ -1,11 +1,12 @@
 from lxml import etree
 from copy import deepcopy
 from StringIO import StringIO
-from sfa.util.xrn import *
+from sfa.util.xrn import urn_to_sliver_id
 from sfa.util.plxrn import hostname_to_urn, xrn_to_hostname 
 from sfa.rspecs.rspec_version import BaseVersion
 from sfa.rspecs.rspec_elements import RSpecElement, RSpecElements
-
+from sfa.rspecs.elements.versions.pgv2Link import PGv2Link
 class PGv2(BaseVersion):
     type = 'ProtoGENI'
     content_type = 'ad'
@@ -17,11 +18,7 @@ class PGv2(BaseVersion):
         'planetlab': "http://www.planet-lab.org/resources/sfa/ext/planetlab/1",
     }
     namespaces = dict(extensions.items() + [('default', namespace)])
-    elements = [
-        RSpecElement(RSpecElements.NETWORK, 'network', '//default:node[@component_manager_id][1]'),
-        RSpecElement(RSpecElements.NODE, 'node', '//default:node | //node'),
-        RSpecElement(RSpecElements.SLIVER, 'sliver', '//default:node/default:sliver_type | //node/sliver_type'),
-    ]
+    elements = []
 
     def get_network(self):
         network = None
@@ -95,6 +92,18 @@ class PGv2(BaseVersion):
 
         return slice_attributes
 
+    def get_links(self, network=None):
+        return PGv2Link.get_links(self.xml)
+
+    def get_link_requests(self):
+        return PGv2Link.get_link_requests(self.xml)  
+
+    def add_links(self, links):
+        PGv2Link.add_links(self.xml.root, links)
+
+    def add_link_requests(self, link_tuples, append=False):
+        PGv2Link.add_link_requests(self.xml.root, link_tuples, append)
+
     def attributes_list(self, elem):
         opts = []
         if elem is not None:
@@ -129,7 +138,7 @@ class PGv2(BaseVersion):
             node_type_tag = etree.SubElement(node_tag, 'hardware_type', name='plab-pc')
             node_type_tag = etree.SubElement(node_tag, 'hardware_type', name='pc')
             available_tag = etree.SubElement(node_tag, 'available', now='true')
-            sliver_type_tag = etree.SubElement(node_tag, 'sliver_type', name='plab-vnode')
+            sliver_type_tag = etree.SubElement(node_tag, 'sliver_type', name='plab-vserver')
 
             pl_initscripts = node.get('pl_initscripts', {})
             for pl_initscript in pl_initscripts.values():
@@ -149,7 +158,7 @@ class PGv2(BaseVersion):
         # this is untested
         self.xml.root.append(deepcopy(source_node_tag))
 
-    def add_slivers(self, slivers, sliver_urn=None, no_dupes=False):
+    def add_slivers(self, slivers, sliver_urn=None, no_dupes=False, append=False):
 
         # all nodes hould already be present in the rspec. Remove all
         # nodes that done have slivers
@@ -164,19 +173,18 @@ class PGv2(BaseVersion):
         for node in nodes:
             urn = node.get('component_id')
             hostname = xrn_to_hostname(urn)
-            if hostname not in slivers_dict:
+            if hostname not in slivers_dict and not append:
                 parent = node.getparent()
                 parent.remove(node)
             else:
                 sliver_info = slivers_dict[hostname]
-                sliver_type_elements = node.xpath('./sliver_type', namespaces=self.namespaces)
+                sliver_type_elements = node.xpath('./default:sliver_type', namespaces=self.namespaces)
                 available_sliver_types = [element.attrib['name'] for element in sliver_type_elements]
-                valid_sliver_types = ['emulab-openvz', 'raw-pc']
+                valid_sliver_types = ['emulab-openvz', 'raw-pc', 'plab-vserver', 'plab-vnode']
                 requested_sliver_type = None
                 for valid_sliver_type in valid_sliver_types:
-                    if valid_sliver_type in available_sliver_type:
+                    if valid_sliver_type in available_sliver_types:
                         requested_sliver_type = valid_sliver_type
-
                 if requested_sliver_type:
                     # remove existing sliver_type tags,it needs to be recreated
                     sliver_elem = node.xpath('./default:sliver_type | ./sliver_type', namespaces=self.namespaces)
@@ -204,6 +212,7 @@ class PGv2(BaseVersion):
                     parent = node.getparent()
                     parent.remove(node)
 
+    
 
     def remove_slivers(self, slivers, network=None, no_dupes=False):
         for sliver in slivers:
@@ -218,28 +227,6 @@ class PGv2(BaseVersion):
     def add_interfaces(self, interfaces, no_dupes=False):
         pass
 
-    def add_links(self, links, no_dupes=False):
-        for link in links:
-            link_elem = etree.SubElement(self.xml.root, 'link' )
-            link_elem.set('component_name', link.component_name) 
-            link_elem.set('component_id', link.component_id)
-            cm_elem = etree.SubElement(link_elem, 'component_manager')
-            cm_elem.set('name', link.component_manager_name)
-            for endpoint in [link.endpoint1, link.enpoint2]:
-                interface_ref = etree.SubElement(link_elem, 'interface_ref', component_id=endpoint.id)
-                
-            property_attrs = {'capicity': link.capacity, 
-                              'latency': link.latency, 
-                              'packet_loss': link.packet_loss}    
-            property1 = etree.SubElement(link_elem, 'property', source_id=link.endpoint1.id, \
-              dest_id = link.endpoint2.id, capacity = link.capacity, latency=link.latency, \
-              packet_loss = link.packet_loss)
-            
-            property2 = etree.SubElement(link_elem, 'property', source_id=link.endpoint2.id, \
-              dest_id = link.endpoint1.id, capacity = link.capacity, latency=link.latency, \
-              packet_loss = link.packet_loss)
-            link_type = etree.SubElement(link_elem, 'link_type', name=link.type)
-
     def merge(self, in_rspec):
         """
         Merge contents for specified rspec with current rspec
index 1b147c9..3917b39 100644 (file)
@@ -1,7 +1,10 @@
+from copy import deepcopy
 from lxml import etree
 from sfa.util.xrn import hrn_to_urn, urn_to_hrn
+from sfa.util.plxrn import PlXrn
 from sfa.rspecs.rspec_version import BaseVersion
 from sfa.rspecs.rspec_elements import RSpecElement, RSpecElements
+from sfa.rspecs.elements.versions.pgv2Link import PGv2Link
 
 class SFAv1(BaseVersion):
     enabled = True
@@ -12,11 +15,7 @@ class SFAv1(BaseVersion):
     namespace = None
     extensions = {}
     namespaces = None
-    elements = [
-        RSpecElement(RSpecElements.NETWORK, 'network', '//network'),
-        RSpecElement(RSpecElements.NODE, 'node', '//node'),
-        RSpecElement(RSpecElements.SLIVER, 'sliver', '//node/sliver'),
-    ] 
+    elements = [] 
     template = '<RSpec type="%s"></RSpec>' % type
 
     def get_network_elements(self):
@@ -116,18 +115,10 @@ class SFAv1(BaseVersion):
         return nodes
 
     def get_links(self, network=None):
-        if network:
-            links = self.xml.xpath('//network[@name="%s"]/link' % network)
-        else:
-            links = self.xml.xpath('//link')
-        linklist = []
-        for link in links:
-            (end1, end2) = link.get("endpoints").split()
-            name = link.find("description")
-            linklist.append((name.text,
-                             self.get_site_nodes(end1, network),
-                             self.get_site_nodes(end2, network)))
-        return linklist
+        return PGv2Link.get_links(self.xml)
+
+    def get_link_requests(self):
+        return PGv2Link.get_link_requests(self.xml) 
 
     def get_link(self, fromnode, tonode, network=None):
         fromsite = fromnode.getparent()
@@ -222,9 +213,14 @@ class SFAv1(BaseVersion):
                 node_tag.set('component_name', node['hostname']) 
                 hostname_tag = etree.SubElement(node_tag, 'hostname').text = node['hostname']
             if 'interfaces' in node:
+                i = 0
                 for interface in node['interfaces']:
                     if 'bwlimit' in interface and interface['bwlimit']:
                         bwlimit = etree.SubElement(node_tag, 'bw_limit', units='kbps').text = str(interface['bwlimit']/1000)
+                    comp_id = PlXrn(auth=network, interface='node%s:eth%s' % (node['node_id'], i)).get_urn() 
+                    ipaddr = interface['ip'] 
+                    interface_tag = etree.SubElement(node_tag, 'interface', component_id=comp_id, ipv4=ipaddr)
+                    i+=1
             if 'bw_unallocated' in node:
                 bw_unallocated = etree.SubElement(node_tag, 'bw_unallocated', units='kbps').text = str(node['bw_unallocated']/1000) 
             if 'tags' in node:
@@ -251,18 +247,17 @@ class SFAv1(BaseVersion):
         pass
 
     def add_links(self, links):
-        for link in links:
-            network_tag = self.xml.root
-            if link.component_manager_id != None:
-                network_hrn, type = urn_to_hrn(link.component_manager_id)
-                network_tag = self.add_network(network) 
+        networks = self.get_network_elements()
+        if len(networks) > 0:
+            xml = networks[0]
+        else:
+            xml = self.xml    
+        PGv2Link.add_links(xml, links)
 
-            link_elem = etree.SubElement(network_tag, 'link')
-            link_elem.set('endpoints', '%s %s' % (link.endpoint1.name, link.endpoint2.name))
-            description = etree.SubElement(link_elem, 'description').text = link.description
-            bw_unallocated = etree.SubElement(link_elem, 'bw_unallocated', units='kbps').text = link.capacity  
+    def add_link_requests(self, links):
+        PGv2Link.add_link_requests(self.xml, links)
 
-    def add_slivers(self, slivers, network=None, sliver_urn=None, no_dupes=False):
+    def add_slivers(self, slivers, network=None, sliver_urn=None, no_dupes=False, append=False):
         # add slice name to network tag
         network_tags = self.xml.xpath('//network')
         if network_tags:
@@ -283,10 +278,11 @@ class SFAv1(BaseVersion):
                     etree.SubElement(sliver_elem, tag['tagname']).text = value=tag['value']
             
         # remove all nodes without slivers
-        for node in nodes_without_slivers:
-            node_elem = self.get_node_element(node, network)
-            parent = node_elem.getparent()
-            parent.remove(node_elem)
+        if not append:
+            for node in nodes_without_slivers:
+                node_elem = self.get_node_element(node)
+                parent = node_elem.getparent()
+                parent.remove(node_elem)
 
     def remove_slivers(self, slivers, network=None, no_dupes=False):
         for sliver in slivers:
diff --git a/sfa/rspecs/xml.py b/sfa/rspecs/xml.py
deleted file mode 100755 (executable)
index ac6526c..0000000
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/python 
-from lxml import etree
-from StringIO import StringIO
-from datetime import datetime, timedelta
-from sfa.util.xrn import *
-from sfa.util.plxrn import hostname_to_urn
-from sfa.util.faults import SfaNotImplemented, InvalidRSpec, InvalidRSpecElement
-
-class XpathFilter:
-    @staticmethod
-    def xpath(filter={}):
-        xpath = ""
-        if filter:
-            filter_list = []
-            for (key, value) in filter.items():
-                if key == 'text':
-                    key = 'text()'
-                else:
-                    key = '@'+key
-                if isinstance(value, str):
-                    filter_list.append('%s="%s"' % (key, value))
-                elif isinstance(value, list):
-                    filter_list.append('contains("%s", %s)' % (' '.join(map(str, value)), key))
-            if filter_list:
-                xpath = ' and '.join(filter_list)
-                xpath = '[' + xpath + ']'
-        return xpath
-
-class XML:
-    def __init__(self, xml=None):
-        self.root = None
-        self.namespaces = None
-        self.default_namespace = None
-        self.schema = None
-        if isinstance(xml, basestring):
-            self.parse_xml(xml)
-        elif isinstance(xml, etree._ElementTree):
-            self.root = xml.getroot()
-        elif isinstance(xml, etree._Element):
-            self.root = xml 
-
-    def parse_xml(self, xml):
-        """
-        parse rspec into etree
-        """
-        parser = etree.XMLParser(remove_blank_text=True)
-        try:
-            tree = etree.parse(xml, parser)
-        except IOError:
-            # 'rspec' file doesnt exist. 'rspec' is proably an xml string
-            try:
-                tree = etree.parse(StringIO(xml), parser)
-            except Exception, e:
-                raise InvalidRSpec(str(e))
-        self.root = tree.getroot()
-        # set namespaces map
-        self.namespaces = dict(self.root.nsmap)
-        # If the 'None' exist, then it's pointing to the default namespace. This makes 
-        # it hard for us to write xpath queries for the default naemspace because lxml 
-        # wont understand a None prefix. We will just associate the default namespeace 
-        # with a key named 'default'.     
-        if None in self.namespaces:
-            default_namespace = self.namespaces.pop(None)
-            self.namespaces['default'] = default_namespace
-
-        # set schema 
-        for key in self.root.attrib.keys():
-            if key.endswith('schemaLocation'):
-                # schema location should be at the end of the list
-                schema_parts  = self.root.attrib[key].split(' ')
-                self.schema = schema_parts[1]    
-                namespace, schema  = schema_parts[0], schema_parts[1]
-                break
-
-    def validate(self, schema):
-        """
-        Validate against rng schema
-        """
-        relaxng_doc = etree.parse(schema)
-        relaxng = etree.RelaxNG(relaxng_doc)
-        if not relaxng(self.root):
-            error = relaxng.error_log.last_error
-            message = "%s (line %s)" % (error.message, error.line)
-            raise InvalidRSpec(message)
-        return True
-
-    def xpath(self, xpath, namespaces=None):
-        if not namespaces:
-            namespaces = self.namespaces
-        return self.root.xpath(xpath, namespaces=namespaces)
-
-    def set(self, key, value):
-        return self.root.set(key, value)
-
-    def add_attribute(self, elem, name, value):
-        """
-        Add attribute to specified etree element    
-        """
-        opt = etree.SubElement(elem, name)
-        opt.text = value
-
-    def add_element(self, name, attrs={}, parent=None, text=""):
-        """
-        Generic wrapper around etree.SubElement(). Adds an element to 
-        specified parent node. Adds element to root node is parent is 
-        not specified. 
-        """
-        if parent == None:
-            parent = self.root
-        element = etree.SubElement(parent, name)
-        if text:
-            element.text = text
-        if isinstance(attrs, dict):
-            for attr in attrs:
-                element.set(attr, attrs[attr])  
-        return element
-
-    def remove_attribute(self, elem, name, value):
-        """
-        Removes an attribute from an element
-        """
-        if elem is not None:
-            opts = elem.iterfind(name)
-            if opts is not None:
-                for opt in opts:
-                    if opt.text == value:
-                        elem.remove(opt)
-
-    def remove_element(self, element_name, root_node = None):
-        """
-        Removes all occurences of an element from the tree. Start at 
-        specified root_node if specified, otherwise start at tree's root.   
-        """
-        if not root_node:
-            root_node = self.root
-
-        if not element_name.startswith('//'):
-            element_name = '//' + element_name
-
-        elements = root_node.xpath('%s ' % element_name, namespaces=self.namespaces)
-        for element in elements:
-            parent = element.getparent()
-            parent.remove(element)
-
-    def attributes_list(self, elem):
-        # convert a list of attribute tags into list of tuples
-        # (tagnme, text_value)
-        opts = []
-        if elem is not None:
-            for e in elem:
-                opts.append((e.tag, str(e.text).strip()))
-        return opts
-
-    def get_element_attributes(self, elem=None, depth=0):
-        if elem == None:
-            elem = self.root_node
-        if not hasattr(elem, 'attrib'):
-            # this is probably not an element node with attribute. could be just and an
-            # attribute, return it
-            return elem
-        attrs = dict(elem.attrib)
-        attrs['text'] = str(elem.text).strip()
-        attrs['parent'] = elem.getparent()
-        if isinstance(depth, int) and depth > 0:
-            for child_elem in list(elem):
-                key = str(child_elem.tag)
-                if key not in attrs:
-                    attrs[key] = [self.get_element_attributes(child_elem, depth-1)]
-                else:
-                    attrs[key].append(self.get_element_attributes(child_elem, depth-1))
-        else:
-            attrs['child_nodes'] = list(elem)
-        return attrs
-
-    def merge(self, in_xml):
-        pass
-
-    def __str__(self):
-        return self.toxml()
-
-    def toxml(self):
-        return etree.tostring(self.root, pretty_print=True)  
-        
-    def save(self, filename):
-        f = open(filename, 'w')
-        f.write(self.toxml())
-        f.close()
-if __name__ == '__main__':
-    rspec = RSpec('/tmp/resources.rspec')
-    print rspec
-
diff --git a/sfa/rspecs/xml_interface.py b/sfa/rspecs/xml_interface.py
deleted file mode 100755 (executable)
index 98fe416..0000000
+++ /dev/null
@@ -1,182 +0,0 @@
-#!/usr/bin/python 
-from lxml import etree
-from StringIO import StringIO
-from datetime import datetime, timedelta
-from sfa.util.xrn import *
-from sfa.util.plxrn import hostname_to_urn
-from sfa.util.faults import SfaNotImplemented, InvalidRSpec, InvalidRSpecElement
-
-class XpathFilter:
-    @staticmethod
-    def xpath(filter={}):
-        xpath = ""
-        if filter:
-            filter_list = []
-            for (key, value) in filter.items():
-                if key == 'text':
-                    key = 'text()'
-                else:
-                    key = '@'+key
-                if isinstance(value, str):
-                    filter_list.append('%s="%s"' % (key, value))
-                elif isinstance(value, list):
-                    filter_list.append('contains("%s", %s)' % (' '.join(map(str, value)), key))
-            if filter_list:
-                xpath = ' and '.join(filter_list)
-                xpath = '[' + xpath + ']'
-        return xpath
-
-class XMLInterface:
-    def __init__(self, xml=""):
-        self.header = None 
-        self.template = None 
-        self.xml = None
-        self.namespaces = None
-        if xml:
-            self.parse_xml(xml)
-        else:
-            self.create()
-
-    def create(self):
-        """
-        Create root element
-        """
-        self.parse_rspec(self.template)
-    
-    def parse_xml(self, xml):
-        """
-        parse rspec into etree
-        """
-        parser = etree.XMLParser(remove_blank_text=True)
-        try:
-            tree = etree.parse(xml, parser)
-        except IOError:
-            # 'rspec' file doesnt exist. 'rspec' is proably an xml string
-            try:
-                tree = etree.parse(StringIO(xml), parser)
-            except Exception, e:
-                raise InvalidRSpec(str(e))
-        self.xml = tree.getroot()  
-
-    def validate(self, schema):
-        """
-        Validate against rng schema
-        """
-        relaxng_doc = etree.parse(schema)
-        relaxng = etree.RelaxNG(relaxng_doc)
-        if not relaxng(self.xml):
-            error = relaxng.error_log.last_error
-            message = "%s (line %s)" % (error.message, error.line)
-            raise InvalidRSpec(message)
-        return True
-
-    def xpath(self, xpath):
-        return self.xml.xpath(xpath, namespaces=self.namespaces)
-
-    def add_attribute(self, elem, name, value):
-        """
-        Add attribute to specified etree element    
-        """
-        opt = etree.SubElement(elem, name)
-        opt.text = value
-
-    def add_element(self, name, attrs={}, parent=None, text=""):
-        """
-        Generic wrapper around etree.SubElement(). Adds an element to 
-        specified parent node. Adds element to root node is parent is 
-        not specified. 
-        """
-        if parent == None:
-            parent = self.xml
-        element = etree.SubElement(parent, name)
-        if text:
-            element.text = text
-        if isinstance(attrs, dict):
-            for attr in attrs:
-                element.set(attr, attrs[attr])  
-        return element
-
-    def remove_attribute(self, elem, name, value):
-        """
-        Removes an attribute from an element
-        """
-        if elem is not None:
-            opts = elem.iterfind(name)
-            if opts is not None:
-                for opt in opts:
-                    if opt.text == value:
-                        elem.remove(opt)
-
-    def remove_element(self, element_name, root_node = None):
-        """
-        Removes all occurences of an element from the tree. Start at 
-        specified root_node if specified, otherwise start at tree's root.   
-        """
-        if not root_node:
-            root_node = self.xml
-
-        if not element_name.startswith('//'):
-            element_name = '//' + element_name
-
-        elements = root_node.xpath('%s ' % element_name, namespaces=self.namespaces)
-        for element in elements:
-            parent = element.getparent()
-            parent.remove(element)
-
-    def attributes_list(self, elem):
-        # convert a list of attribute tags into list of tuples
-        # (tagnme, text_value)
-        opts = []
-        if elem is not None:
-            for e in elem:
-                opts.append((e.tag, str(e.text).strip()))
-        return opts
-
-    def get_element_attributes(self, elem=None, depth=0):
-        if elem == None:
-            elem = self.root_node
-        if not hasattr(elem, 'attrib'):
-            # this is probably not an element node with attribute. could be just and an
-            # attribute, return it
-            return elem
-        attrs = dict(elem.attrib)
-        attrs['text'] = str(elem.text).strip()
-        attrs['parent'] = elem.getparent()
-        if isinstance(depth, int) and depth > 0:
-            for child_elem in list(elem):
-                key = str(child_elem.tag)
-                if key not in attrs:
-                    attrs[key] = [self.get_element_attributes(child_elem, depth-1)]
-                else:
-                    attrs[key].append(self.get_element_attributes(child_elem, depth-1))
-        else:
-            attrs['child_nodes'] = list(elem)
-        return attrs
-
-    def merge(self, in_xml):
-        pass
-
-    def cleanup(self):
-        """
-        Optional method which inheriting classes can choose to implent. 
-        """
-        pass 
-
-    def __str__(self):
-        return self.toxml()
-
-    def toxml(self, cleanup=False):
-        if cleanup:
-            self.cleanup()
-        return self.header + etree.tostring(self.xml, pretty_print=True)  
-        
-    def save(self, filename):
-        f = open(filename, 'w')
-        f.write(self.toxml())
-        f.close()
-if __name__ == '__main__':
-    rspec = RSpec('/tmp/resources.rspec')
-    print rspec
-
index 84528e7..fbc14ec 100644 (file)
@@ -47,8 +47,8 @@ class LDAPapi :
                                ldapfilter="(|"
                                for hrn in hrns:
                                        splited_hrn=hrn.split(".")
-                                       if splited_hrn[0] != "SFA_REGISTRY_ROOT_AUTH" :
-                                               print >>sys.stderr,"i know nothing about",hrn
+                                       if splited_hrn[0] != self.authname :
+                                               print >>sys.stderr,"i know nothing about",hrn, " my authname is ", self.authname, " not ", splited_hrn[0]
                                        else :
                                                login=splited_hrn[1]
                                                ldapfilter+="(uid="
@@ -87,4 +87,4 @@ class LDAPapi :
                                'date_created' : 'none',
                                'last_updated': 'none'
                                } )
-               return results
\ No newline at end of file
+               return results
similarity index 60%
rename from sfa/senslab/api.py
rename to sfa/senslab/slabdriver.py
index 3ebc7b6..40e56be 100644 (file)
@@ -1,69 +1,19 @@
-#
-# SFA XML-RPC and SOAP interfaces
-#
-
-import sys
-import os
-import traceback
-import string
-import datetime
-import xmlrpclib
-
-from sfa.util.faults import *
-from sfa.util.api import *
-from sfa.util.config import *
+from sfa.util.faults import MissingSfaInfo
 from sfa.util.sfalogging import logger
-import sfa.util.xmlrpcprotocol as xmlrpcprotocol
-from sfa.trust.auth import Auth
-from sfa.trust.rights import Right, Rights, determine_rights
-from sfa.trust.credential import Credential,Keypair
-from sfa.trust.certificate import Certificate
-from sfa.util.xrn import get_authority, hrn_to_urn
-from sfa.util.plxrn import hostname_to_hrn, hrn_to_pl_slicename, hrn_to_pl_slicename, slicename_to_hrn
-from sfa.util.nodemanager import NodeManager
+from sfa.util.table import SfaTable
+from sfa.util.defaultdict import defaultdict
 
+from sfa.util.xrn import hrn_to_urn
+from sfa.util.plxrn import slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename, hrn_to_pl_login_base
+
+## thierry: everything that is API-related (i.e. handling incoming requests) 
+# is taken care of 
+# SlabDriver should be really only about talking to the senslab testbed
+
+## thierry : please avoid wildcard imports :)
 from sfa.senslab.OARrestapi import *
 from sfa.senslab.SenslabImportUsers import *
 
-try:
-    from collections import defaultdict
-except:
-    class defaultdict(dict):
-        def __init__(self, default_factory=None, *a, **kw):
-            if (default_factory is not None and
-                not hasattr(default_factory, '__call__')):
-                raise TypeError('first argument must be callable')
-            dict.__init__(self, *a, **kw)
-            self.default_factory = default_factory
-        def __getitem__(self, key):
-            try:
-                return dict.__getitem__(self, key)
-            except KeyError:
-                return self.__missing__(key)
-        def __missing__(self, key):
-            if self.default_factory is None:
-                raise KeyError(key)
-            self[key] = value = self.default_factory()
-            return value
-        def __reduce__(self):
-            if self.default_factory is None:
-                args = tuple()
-            else:
-                args = self.default_factory,
-            return type(self), args, None, None, self.items()
-        def copy(self):
-            return self.__copy__()
-        def __copy__(self):
-            return type(self)(self.default_factory, self)
-        def __deepcopy__(self, memo):
-            import copy
-            return type(self)(self.default_factory,
-                              copy.deepcopy(self.items()))
-        def __repr__(self):
-            return 'defaultdict(%s, %s)' % (self.default_factory,
-                                            dict.__repr__(self))
-## end of http://code.activestate.com/recipes/523034/ }}}
-
 def list_to_dict(recs, key):
     """
     convert a list of dictionaries into a dictionary keyed on the 
@@ -74,173 +24,26 @@ def list_to_dict(recs, key):
     #print>>sys.stderr, " \r\n \t\t list_to_dict : rec %s  \r\n \t\t list_to_dict keys %s" %(recs,keys)   
     return dict(zip(keys, recs))
 
-class SfaAPI(BaseAPI):
+# thierry : note
+# this inheritance scheme is so that the driver object can receive
+# GetNodes or GetSites sorts of calls directly
+# and thus minimize the differences in the managers with the pl version
+class SlabDriver (OARapi, SenslabImportUsers):
 
-    # flat list of method names
-    import sfa.methods
-    methods = sfa.methods.all
-    
-    def __init__(self, config = "/etc/sfa/sfa_config.py", encoding = "utf-8", 
-                 methods='sfa.methods', peer_cert = None, interface = None, 
-                key_file = None, cert_file = None, cache = None):
-        BaseAPI.__init__(self, config=config, encoding=encoding, methods=methods, \
-                         peer_cert=peer_cert, interface=interface, key_file=key_file, \
-                         cert_file=cert_file, cache=cache)
+    def __init__(self, config):
+        self.config=config
+        self.hrn = config.SFA_INTERFACE_HRN
  
-        self.encoding = encoding
-        from sfa.util.table import SfaTable
-        self.SfaTable = SfaTable
-        # Better just be documenting the API
-        if config is None:
-            return
-       print >>sys.stderr, "\r\n_____________ SFA SENSLAB API \r\n" 
-        # Load configuration
-        self.config = Config(config)
-        self.auth = Auth(peer_cert)
-        self.interface = interface
-        self.key_file = key_file
-        self.key = Keypair(filename=self.key_file)
-        self.cert_file = cert_file
-        self.cert = Certificate(filename=self.cert_file)
-        self.credential = None
-        # Initialize the PLC shell only if SFA wraps a myPLC
-        rspec_type = self.config.get_aggregate_type()
-       self.oar = OARapi()
-       self.users = SenslabImportUsers()
-        self.hrn = self.config.SFA_INTERFACE_HRN
+       print >>sys.stderr, "\r\n_____________ SFA SENSLAB DRIVER \r\n" 
+        # thierry - just to not break the rest of this code
+       #self.oar = OARapi()
+       #self.users = SenslabImportUsers()
+       self.oar = self
+       self.users = self
         self.time_format = "%Y-%m-%d %H:%M:%S"
         #self.logger=sfa_logger()
-       print >>sys.stderr, "\r\n \t\t___________PSFA SENSLAN /API.PY  __init__ STOP ",self.interface #dir(self)
-       
-       
-
-    def getCredential(self):
-        """
-        Return a valid credential for this interface. 
-        """
-        type = 'authority'
-        path = self.config.SFA_DATA_DIR
-        filename = ".".join([self.interface, self.hrn, type, "cred"])
-        cred_filename = path + os.sep + filename
-       print>>sys.stderr, "|\r\n \r\n API.pPY getCredentialAUTHORITY  cred_filename %s" %(cred_filename)
-        cred = None
-        if os.path.isfile(cred_filename):
-            cred = Credential(filename = cred_filename)
-            # make sure cred isnt expired
-            if not cred.get_expiration or \
-               datetime.datetime.today() < cred.get_expiration():    
-                return cred.save_to_string(save_parents=True)
-
-        # get a new credential
-        if self.interface in ['registry']:
-            cred =  self.__getCredentialRaw()
-        else:
-            cred =  self.__getCredential()
-        cred.save_to_file(cred_filename, save_parents=True)
-
-        return cred.save_to_string(save_parents=True)
-
-
-    def get_server(self, interface, cred, timeout=30):
-        """
-        Returns a connection to the specified interface. Use the specified
-        credential to determine the caller and look for the caller's key/cert 
-        in the registry hierarchy cache. 
-        """       
-        from sfa.trust.hierarchy import Hierarchy
-        if not isinstance(cred, Credential):
-            cred_obj = Credential(string=cred)
-        else:
-            cred_obj = cred
-        caller_gid = cred_obj.get_gid_caller()
-        hierarchy = Hierarchy()
-        auth_info = hierarchy.get_auth_info(caller_gid.get_hrn())
-        key_file = auth_info.get_privkey_filename()
-        cert_file = auth_info.get_gid_filename()
-        server = interface.get_server(key_file, cert_file, timeout)
-        return server
-               
-
-    def getDelegatedCredential(self, creds):
-        """
-        Attempt to find a credential delegated to us in
-        the specified list of creds.
-        """
-       if creds and not isinstance(creds, list):
-               creds = [creds]
-        delegated_creds = filter_creds_by_caller(creds, [self.hrn, self.hrn + '.slicemanager'])
-        if not delegated_creds:
-               return None
-        return delegated_creds[0]
-    def __getCredential(self):
-        """ 
-        Get our credential from a remote registry 
-        """
-        from sfa.server.registry import Registries
-       registries = Registries()
-        registry = registries.get_server(self.hrn, self.key_file, self.cert_file)
-       #Sandrine 24 Oct 2 commented 2 following lines
-        #registries = Registries(self)
-        #registry = registries[self.hrn]
-       print>>sys.stderr, " SenslabAPI.PY __getCredential registries %s self.hrn %s \t registry %s " %(registries,self.hrn,registry)
-        cert_string=self.cert.save_to_string(save_parents=True)
-        # get self credential
-        self_cred = registry.GetSelfCredential(cert_string, self.hrn, 'authority')
-        # get credential
-        cred = registry.GetCredential(self_cred, self.hrn, 'authority')
-        return Credential(string=cred)
-
-    def __getCredentialRaw(self):
-        """
-        Get our current credential directly from the local registry.
-        """
-
-        hrn = self.hrn
-        auth_hrn = self.auth.get_authority(hrn)
-    
-        # is this a root or sub authority
-        if not auth_hrn or hrn == self.config.SFA_INTERFACE_HRN:
-            auth_hrn = hrn
-        auth_info = self.auth.get_auth_info(auth_hrn)
-        table = self.SfaTable()
-       records = table.findObjects({'hrn': hrn, 'type': 'authority+sa'})
-       #Sandrine 24 Oct 2 commented  following line
-        #records = table.findObjects(hrn)
+        #print >>sys.stderr, "\r\n \t\t___________PSFA SENSLAN /API.PY  __init__ STOP ",self.interface #dir(self)
        
-        if not records:
-            raise RecordNotFound
-        record = records[0]
-        type = record['type']
-        object_gid = record.get_gid_object()
-        new_cred = Credential(subject = object_gid.get_subject())
-        new_cred.set_gid_caller(object_gid)
-        new_cred.set_gid_object(object_gid)
-        new_cred.set_issuer_keys(auth_info.get_privkey_filename(), auth_info.get_gid_filename())
-        
-        r1 = determine_rights(type, hrn)
-        new_cred.set_privileges(r1)
-        new_cred.encode()
-        new_cred.sign()
-
-        return new_cred
-   
-
-    def loadCredential (self):
-        """
-        Attempt to load credential from file if it exists. If it doesnt get
-        credential from registry.
-        """
-
-        # see if this file exists
-        # XX This is really the aggregate's credential. Using this is easier than getting
-        # the registry's credential from iteslf (ssl errors).   
-        ma_cred_filename = self.config.SFA_DATA_DIR + os.sep + self.interface + self.hrn + ".ma.cred"
-        try:
-            self.credential = Credential(filename = ma_cred_filename)
-        except IOError:
-            self.credential = self.getCredentialFromRegistry()
-
     ##
     # Convert SFA fields to PLC fields for use when registering up updating
     # registry record in the PLC database
@@ -498,7 +301,7 @@ class SfaAPI(BaseAPI):
         # we obtain
         
         # get the sfa records
-        table = self.SfaTable()
+        table = SfaTable()
         person_list, persons = [], {}
         person_list = table.find({'type': 'user', 'pointer': person_ids})
         # create a hrns keyed on the sfa record's pointer.
@@ -601,7 +404,7 @@ class SfaAPI(BaseAPI):
         # build a list of the new person ids, by looking up each person to get
         # their pointer
         newIdList = []
-        table = self.SfaTable()
+        table = SfaTable()
         records = table.find({'type': 'user', 'hrn': newList})
         for rec in records:
             newIdList.append(rec['pointer'])
@@ -635,97 +438,6 @@ class SfaAPI(BaseAPI):
             # xxx TODO
             pass
 
-
-
-class ComponentAPI(BaseAPI):
-
-    def __init__(self, config = "/etc/sfa/sfa_config.py", encoding = "utf-8", methods='sfa.methods',
-                 peer_cert = None, interface = None, key_file = None, cert_file = None):
-
-        BaseAPI.__init__(self, config=config, encoding=encoding, methods=methods, peer_cert=peer_cert,
-                         interface=interface, key_file=key_file, cert_file=cert_file)
-        self.encoding = encoding
-
-        # Better just be documenting the API
-        if config is None:
-            return
-
-        self.nodemanager = NodeManager(self.config)
-
-    def sliver_exists(self):
-        sliver_dict = self.nodemanager.GetXIDs()
-        if slicename in sliver_dict.keys():
-            return True
-        else:
-            return False
-
-    def get_registry(self):
-        addr, port = self.config.SFA_REGISTRY_HOST, self.config.SFA_REGISTRY_PORT
-        url = "http://%(addr)s:%(port)s" % locals()
-        server = xmlrpcprotocol.get_server(url, self.key_file, self.cert_file)
-        return server
-
-    def get_node_key(self):
-        # this call requires no authentication,
-        # so we can generate a random keypair here
-        subject="component"
-        (kfd, keyfile) = tempfile.mkstemp()
-        (cfd, certfile) = tempfile.mkstemp()
-        key = Keypair(create=True)
-        key.save_to_file(keyfile)
-        cert = Certificate(subject=subject)
-        cert.set_issuer(key=key, subject=subject)
-        cert.set_pubkey(key)
-        cert.sign()
-        cert.save_to_file(certfile)
-        registry = self.get_registry()
-        # the registry will scp the key onto the node
-        registry.get_key()        
-
-    def getCredential(self):
-        """
-        Get our credential from a remote registry
-        """
-        path = self.config.SFA_DATA_DIR
-        config_dir = self.config.config_path
-        cred_filename = path + os.sep + 'node.cred'
-       print>>sys.stderr, "\r\n \r\n API.pPY COMPONENT getCredential  cred_filename %s" %(cred_filename)
-        try:
-            credential = Credential(filename = cred_filename)
-            return credential.save_to_string(save_parents=True)
-        except IOError:
-            node_pkey_file = config_dir + os.sep + "node.key"
-            node_gid_file = config_dir + os.sep + "node.gid"
-            cert_filename = path + os.sep + 'server.cert'
-            if not os.path.exists(node_pkey_file) or \
-               not os.path.exists(node_gid_file):
-                self.get_node_key()
-
-            # get node's hrn
-            gid = GID(filename=node_gid_file)
-            hrn = gid.get_hrn()
-            # get credential from registry
-            cert_str = Certificate(filename=cert_filename).save_to_string(save_parents=True)
-            registry = self.get_registry()
-            cred = registry.GetSelfCredential(cert_str, hrn, 'node')
-            Credential(string=cred).save_to_file(credfile, save_parents=True)            
-
-            return cred
-
-    def clean_key_cred(self):
-        """
-        remove the existing keypair and cred  and generate new ones
-        """
-        files = ["server.key", "server.cert", "node.cred"]
-        for f in files:
-            filepath = KEYDIR + os.sep + f
-            if os.path.isfile(filepath):
-                os.unlink(f)
-
-        # install the new key pair
-        # GetCredential will take care of generating the new keypair
-        # and credential
-        self.get_node_key()
-        self.getCredential()
-
-    
+### thierry
+# I don't think you plan on running a component manager at this point
+# let me clean up the mess of ComponentAPI that is deprecated anyways
index 62c3af7..9ace414 100644 (file)
@@ -92,7 +92,7 @@ class SfaTable(list):
                        for hrn in hrns:
                                head,sep,tail=hrn.partition(".")
                                if head != self.authname :
-                                       print >>sys.stderr,"i know nothing about",hrn
+                                       print >>sys.stderr,"i know nothing about",hrn, " my authname is ", self.authname, " not ", splited_hrn[0]
                                else :
                                        node_ids.append(tail)
 
index 59a3e6b..90fcaf4 100644 (file)
@@ -1,9 +1,9 @@
-from sfa.util.faults import *
-from sfa.util.server import SfaServer
+from sfa.server.sfaserver import SfaServer
 from sfa.util.xrn import hrn_to_urn
 from sfa.server.interface import Interfaces, Interface
 from sfa.util.config import Config     
 
+# this truly is a server-side object
 class Aggregate(SfaServer):
 
     ##
@@ -16,9 +16,10 @@ class Aggregate(SfaServer):
     def __init__(self, ip, port, key_file, cert_file):
         SfaServer.__init__(self, ip, port, key_file, cert_file,'aggregate')
 
-##
+#
 # Aggregates is a dictionary of aggregate connections keyed on the aggregate hrn
-
+# as such it's more of a client-side thing for aggregate servers to reach their peers
+#
 class Aggregates(Interfaces):
 
     default_dict = {'aggregates': {'aggregate': [Interfaces.default_fields]}}
index c83ac4a..9baa6c2 100644 (file)
@@ -1,16 +1,12 @@
 #
 # Component is a SfaServer that implements the Component interface
 #
-### $Id: 
-### $URL: 
-#
-
 import tempfile
 import os
 import time
 import sys
 
-from sfa.util.componentserver import ComponentServer
+from sfa.server.sfaserver import SfaServer
  
 # GeniLight client support is optional
 try:
@@ -21,7 +17,7 @@ except ImportError:
 ##
 # Component is a SfaServer that serves component operations.
 
-class Component(ComponentServer):
+class Component(SfaServer):
     ##
     # Create a new registry object.
     #
@@ -31,5 +27,4 @@ class Component(ComponentServer):
     # @param cert_file certificate filename containing public key (could be a GID file)
 
     def __init__(self, ip, port, key_file, cert_file):
-        ComponentServer.__init__(self, ip, port, key_file, cert_file)
-        self.server.interface = 'component'
+        SfaServer.__init__(self, ip, port, key_file, cert_file, interface='component')
index baeb2e7..3866a45 100644 (file)
@@ -1,13 +1,6 @@
-import traceback
-import os.path
-
-from sfa.util.faults import *
-from sfa.util.storage import XmlStorage
-from sfa.util.xrn import get_authority, hrn_to_urn
-from sfa.util.record import SfaRecord
-import sfa.util.xmlrpcprotocol as xmlrpcprotocol
-import sfa.util.soapprotocol as soapprotocol
-from sfa.trust.gid import GID
+#from sfa.util.faults import *
+import sfa.client.xmlrpcprotocol as xmlrpcprotocol
+from sfa.util.xml import XML
 
 # GeniLight client support is optional
 try:
@@ -15,10 +8,11 @@ try:
 except ImportError:
     GeniClientLight = None            
 
-
-
 class Interface:
-    
+    """
+    Interface to another SFA service, typically a peer, or the local aggregate
+    can retrieve a xmlrpclib.ServerProxy object for issuing calls there
+    """
     def __init__(self, hrn, addr, port, client_type='sfa'):
         self.hrn = hrn
         self.addr = addr
@@ -31,12 +25,13 @@ class Interface:
         url =  "http://%s" %  "/".join(address_parts)
         return url
 
-    def get_server(self, key_file, cert_file, timeout=30):
+    def server_proxy(self, key_file, cert_file, timeout=30):
         server = None 
         if  self.client_type ==  'geniclientlight' and GeniClientLight:
+            # xxx url and self.api are undefined
             server = GeniClientLight(url, self.api.key_file, self.api.cert_file)
         else:
-            server = xmlrpcprotocol.get_server(self.get_url(), key_file, cert_file, timeout) 
+            server = xmlrpcprotocol.server_proxy(self.get_url(), key_file, cert_file, timeout) 
  
         return server       
 ##
@@ -62,21 +57,20 @@ class Interfaces(dict):
     def __init__(self, conf_file):
         dict.__init__(self, {})
         # load config file
-        self.interface_info = XmlStorage(conf_file, self.default_dict)
-        self.interface_info.load()
-        records = self.interface_info.values()[0].values()[0]
-        if not isinstance(records, list):
-            records = [records]
-        
-        required_fields = self.default_fields.keys()
-        for record in records:
-            if not set(required_fields).issubset(record.keys()):
-                continue
-            # port is appended onto the domain, before the path. Should look like:
-            # http://domain:port/path
-            hrn, address, port = record['hrn'], record['addr'], record['port']
-            interface = Interface(hrn, address, port) 
-            self[hrn] = interface
+        required_fields = set(self.default_fields.keys())
+        self.interface_info = XML(conf_file).todict()
+        for value in self.interface_info.values():
+            if isinstance(value, list):
+                for record in value:
+                    if isinstance(record, dict) and \
+                      required_fields.issubset(record.keys()):
+                        hrn, address, port = record['hrn'], record['addr'], record['port']
+                        # sometime this is called at a very early stage with no config loaded
+                        # avoid to remember this instance in such a case
+                        if not address or not port:
+                            continue     
+                        interface = Interface(hrn, address, port)
+                        self[hrn] = interface   
 
-    def get_server(self, hrn, key_file, cert_file, timeout=30):
-        return self[hrn].get_server(key_file, cert_file, timeout)
+    def server_proxy(self, hrn, key_file, cert_file, timeout=30):
+        return self[hrn].server_proxy(key_file, cert_file, timeout)
index deaf89f..515b083 100755 (executable)
@@ -12,10 +12,10 @@ import traceback
 import xmlrpclib
 from mod_python import apache
 
-from sfa.plc.api import SfaAPI
 from sfa.util.sfalogging import logger
+from sfa.plc.server import SfaApi
 
-api = SfaAPI(interface='aggregate')
+api = SfaApi(interface='aggregate')
 
 def handler(req):
     try:
index 8879813..1be5480 100755 (executable)
@@ -12,10 +12,10 @@ import traceback
 import xmlrpclib
 from mod_python import apache
 
-from sfa.plc.api import SfaAPI
 from sfa.util.sfalogging import logger
+from sfa.plc.server import SfaApi
 
-api = SfaAPI(interface='registry')
+api = SfaApi(interface='registry')
 
 def handler(req):
     try:
index e0f2b92..cd655a7 100755 (executable)
@@ -12,10 +12,10 @@ import traceback
 import xmlrpclib
 from mod_python import apache
 
-from sfa.plc.api import SfaAPI
 from sfa.util.sfalogging import logger
+from sfa.plc.server import SfaApi
 
-api = SfaAPI(interface='slicemgr')
+api = SfaApi(interface='slicemgr')
 
 def handler(req):
     try:
index 0536c5c..bdad7df 100644 (file)
@@ -1,18 +1,14 @@
 #
 # Registry is a SfaServer that implements the Registry interface
 #
-### $Id$
-### $URL$
-#
-
-from sfa.util.server import SfaServer
-from sfa.util.faults import *
-from sfa.util.xrn import hrn_to_urn
+from sfa.server.sfaserver import SfaServer
 from sfa.server.interface import Interfaces, Interface
 from sfa.util.config import Config 
 
-##
+#
 # Registry is a SfaServer that serves registry and slice operations at PLC.
+# this truly is a server-side object
+#
 class Registry(SfaServer):
     ##
     # Create a new registry object.
@@ -25,10 +21,10 @@ class Registry(SfaServer):
     def __init__(self, ip, port, key_file, cert_file):
         SfaServer.__init__(self, ip, port, key_file, cert_file,'registry')
 
-##
-# Registries is a dictionary of registry connections keyed on the registry
-# hrn
-
+#
+# Registries is a dictionary of registry connections keyed on the registry hrn
+# as such it's more of a client-side thing for registry servers to reach their peers
+#
 class Registries(Interfaces):
     
     default_dict = {'registries': {'registry': [Interfaces.default_fields]}}
index 8297b2d..0fbe140 100755 (executable)
 import os
 import sys
 from optparse import OptionParser
-from sfa.trust.certificate import Keypair, Certificate
+
+from sfa.util.config import Config
+from sfa.util.table import SfaTable
+
 from sfa.trust.gid import GID, create_uuid
 from sfa.trust.hierarchy import Hierarchy
-from sfa.util.config import Config
-from collections import defaultdict
 
 def main():
     args = sys.argv
@@ -110,7 +111,6 @@ def sign(options):
     
 
 def export_gid(options):
-    from sfa.util.table import SfaTable
     # lookup the record for the specified hrn 
     hrn = options.export
     type = options.type
@@ -124,7 +124,7 @@ def export_gid(options):
         # check the authorities hierarchy 
         hierarchy = Hierarchy()
         try:
-            auth_info = hierarchy.get_auth_info()
+            auth_info = hierarchy.get_auth_info(hrn)
             gid = auth_info.gid_object 
         except:
             print "Record: %s not found" % hrn
@@ -148,8 +148,6 @@ def import_gid(options):
     Import the specified gid into the registry (db and authorities 
     hierarchy) overwriting any previous gid.
     """
-    from sfa.util.table import SfaTable
-    from sfa.util.record import SfaRecord
     # load the gid
     gidfile = os.path.abspath(options.importgid)
     if not gidfile or not os.path.isfile(gidfile):
@@ -167,7 +165,7 @@ def import_gid(options):
     table = SfaTable()
     records = table.find({'hrn': gid.get_hrn(), 'type': 'authority'})
     if not records:
-        print "%s not found in record database" % get.get_hrn()  
+        print "%s not found in record database" % gid.get_hrn()  
         sys.exit(1)
 
     # update the database record
index f821f4c..0d99e98 100644 (file)
@@ -3,15 +3,18 @@
 import sys
 import os
 import traceback
+import socket
+
+import sfa.client.xmlrpcprotocol as xmlrpcprotocol 
 from sfa.util.table import SfaTable
 from sfa.util.prefixTree import prefixTree
-from sfa.plc.api import SfaAPI
 from sfa.util.config import Config
+
+from sfa.generic import Generic
+
 from sfa.trust.certificate import Keypair
 from sfa.trust.hierarchy import Hierarchy
 from sfa.server.registry import Registries
-import sfa.util.xmlrpcprotocol as xmlrpcprotocol 
-import socket
 
 def main():
     config = Config()
@@ -30,8 +33,8 @@ def main():
     # and a valid credential
     authority = config.SFA_INTERFACE_HRN
     url = 'http://%s:%s/' %(config.SFA_REGISTRY_HOST, config.SFA_REGISTRY_PORT)
-    registry = xmlrpcprotocol.get_server(url, key_file, cert_file)
-    sfa_api = SfaAPI(key_file = key_file, cert_file = cert_file, interface='registry')
+    registry = xmlrpcprotocol.server_proxy(url, key_file, cert_file)
+    sfa_api = Generic.the_flavour()
     credential = sfa_api.getCredential()
 
     # get peer registries
similarity index 86%
rename from sfa/server/sfa-server.py
rename to sfa/server/sfa-start.py
index 4ed8c1f..d4a3131 100755 (executable)
@@ -1,13 +1,12 @@
 #!/usr/bin/python
 #
-# SFA PLC Wrapper
+# PlanetLab SFA implementation
 #
-# This wrapper implements the SFA Registry and Slice Interfaces on PLC.
+# This implements the SFA Registry and Slice Interfaces on PLC.
 # Depending on command line options, it starts some combination of a
 # Registry, an Aggregate Manager, and a Slice Manager.
 #
-# There are several items that need to be done before starting the wrapper
-# server.
+# There are several items that need to be done before starting the servers.
 #
 # NOTE:  Many configuration settings, including the PLC maintenance account
 # credentials, URI of the PLCAPI, and PLC DB URI and admin credentials are initialized
@@ -35,19 +34,21 @@ component_port=12346
 import os, os.path
 import traceback
 import sys
-import sfa.util.xmlrpcprotocol as xmlrpcprotocol
 from optparse import OptionParser
 
 from sfa.util.sfalogging import logger
+from sfa.util.xrn import get_authority, hrn_to_urn
+from sfa.util.config import Config
+import sfa.client.xmlrpcprotocol as xmlrpcprotocol
+
 from sfa.trust.certificate import Keypair, Certificate
 from sfa.trust.hierarchy import Hierarchy
 from sfa.trust.gid import GID
-from sfa.util.config import Config
-from sfa.plc.api import SfaAPI
+
+from sfa.server.sfaapi import SfaApi
+
 from sfa.server.registry import Registries
 from sfa.server.aggregate import Aggregates
-from sfa.util.xrn import get_authority, hrn_to_urn
-from sfa.util.sfalogging import logger
 
 # after http://www.erlenstar.demon.co.uk/unix/faq_2.html
 def daemon():
@@ -135,34 +136,6 @@ def init_self_signed_cert(hrn, key, server_cert_file):
     cert.sign()
     cert.save_to_file(server_cert_file)
 
-def init_server(options, config):
-    """
-    Execute the init method defined in the manager file 
-    """
-    def init_manager(manager_module, manager_base):
-        try: manager = __import__(manager_module, fromlist=[manager_base])
-        except: manager = None
-        if manager and hasattr(manager, 'init_server'):
-            manager.init_server()
-    
-    manager_base = 'sfa.managers'
-    if options.registry:
-        mgr_type = config.SFA_REGISTRY_TYPE
-        manager_module = manager_base + ".registry_manager_%s" % mgr_type
-        init_manager(manager_module, manager_base)    
-    if options.am:
-        mgr_type = config.SFA_AGGREGATE_TYPE
-        manager_module = manager_base + ".aggregate_manager_%s" % mgr_type
-        init_manager(manager_module, manager_base)    
-    if options.sm:
-        mgr_type = config.SFA_SM_TYPE
-        manager_module = manager_base + ".slice_manager_%s" % mgr_type
-        init_manager(manager_module, manager_base)    
-    if options.cm:
-        mgr_type = config.SFA_CM_TYPE
-        manager_module = manager_base + ".component_manager_%s" % mgr_type
-        init_manager(manager_module, manager_base)    
-
 def install_peer_certs(server_key_file, server_cert_file):
     """
     Attempt to install missing trusted gids and db records for 
@@ -172,7 +145,7 @@ def install_peer_certs(server_key_file, server_cert_file):
     # There should be a gid file in /etc/sfa/trusted_roots for every
     # peer registry found in in the registries.xml config file. If there
     # are any missing gids, request a new one from the peer registry.
-    api = SfaAPI(key_file = server_key_file, cert_file = server_cert_file)
+    api = SfaApi(key_file = server_key_file, cert_file = server_cert_file)
     registries = Registries()
     aggregates = Aggregates()
     interfaces = dict(registries.items() + aggregates.items())
@@ -193,7 +166,7 @@ def install_peer_certs(server_key_file, server_cert_file):
         try:
             # get gid from the registry
             url = interfaces[new_hrn].get_url()
-            interface = interfaces[new_hrn].get_server(server_key_file, server_cert_file, timeout=30)
+            interface = interfaces[new_hrn].server_proxy(server_key_file, server_cert_file, timeout=30)
             # skip non sfa aggregates
             server_version = api.get_cached_server_version(interface)
             if 'sfa' not in server_version:
@@ -229,7 +202,7 @@ def update_cert_records(gids):
     Make sure there is a record in the registry for the specified gids. 
     Removes old records from the db.
     """
-    # import SfaTable here so this module can be loaded by ComponentAPI
+    # import SfaTable here so this module can be loaded by PlcComponentApi
     from sfa.util.table import SfaTable
     from sfa.util.record import SfaRecord
     if not gids:
@@ -261,7 +234,7 @@ def update_cert_records(gids):
         
 def main():
     # Generate command line parser
-    parser = OptionParser(usage="sfa-server [options]")
+    parser = OptionParser(usage="sfa-start.py [options]")
     parser.add_option("-r", "--registry", dest="registry", action="store_true",
          help="run registry server", default=False)
     parser.add_option("-s", "--slicemgr", dest="sm", action="store_true",
@@ -285,8 +258,7 @@ def main():
     server_cert_file = os.path.join(hierarchy.basedir, "server.cert")
 
     init_server_key(server_key_file, server_cert_file, config, hierarchy)
-    init_server(options, config)
+
     if (options.daemon):  daemon()
     
     if options.trusted_certs:
index b0a5a47..16f358e 100755 (executable)
@@ -4,10 +4,11 @@ import os
 import tempfile
 from optparse import OptionParser
 
-from sfa.util.faults import *
+from sfa.util.faults import ConnectionKeyGIDMismatch
 from sfa.util.config import Config
-import sfa.util.xmlrpcprotocol as xmlrpcprotocol
+import sfa.client.xmlrpcprotocol as xmlrpcprotocol
 from sfa.util.plxrn import hrn_to_pl_slicename, slicename_to_hrn
+
 from sfa.trust.certificate import Keypair, Certificate
 from sfa.trust.credential import Credential
 from sfa.trust.gid import GID
@@ -27,7 +28,7 @@ def handle_gid_mismatch_exception(f):
 
     return wrapper
 
-def get_server(url=None, port=None, keyfile=None, certfile=None,verbose=False):
+def server_proxy(url=None, port=None, keyfile=None, certfile=None,verbose=False):
     """
     returns an xmlrpc connection to the service a the specified 
     address
@@ -47,7 +48,7 @@ def get_server(url=None, port=None, keyfile=None, certfile=None,verbose=False):
     if verbose:
         print "Contacting registry at: %(url)s" % locals()
 
-    server = xmlrpcprotocol.get_server(url, keyfile, certfile)
+    server = xmlrpcprotocol.server_proxy(url, keyfile, certfile)
     return server    
     
 
@@ -96,7 +97,7 @@ def get_node_key(registry=None, verbose=False):
     cert.sign()
     cert.save_to_file(certfile)
     
-    registry = get_server(url = registry, keyfile=keyfile, certfile=certfile)    
+    registry = server_proxy(url = registry, keyfile=keyfile, certfile=certfile)    
     registry.get_key()
 
 def create_server_keypair(keyfile=None, certfile=None, hrn="component", verbose=False):
@@ -144,7 +145,7 @@ def get_credential(registry=None, force=False, verbose=False):
         create_server_keypair(keyfile, certfile, hrn, verbose)
 
         # get credential from registry 
-        registry = get_server(url=registry, keyfile=keyfile, certfile=certfile)
+        registry = server_proxy(url=registry, keyfile=keyfile, certfile=certfile)
         cert = Certificate(filename=certfile)
         cert_str = cert.save_to_string(save_parents=True)
         cred = registry.GetSelfCredential(cert_str, 'node', hrn)
@@ -171,7 +172,7 @@ def get_trusted_certs(registry=None, verbose=False):
     cred = get_credential(registry=registry, verbose=verbose)
     # make sure server key cert pair exists
     create_server_keypair(keyfile=keyfile, certfile=certfile, hrn=hrn, verbose=verbose)
-    registry = get_server(url=registry, keyfile=keyfile, certfile=certfile)
+    registry = server_proxy(url=registry, keyfile=keyfile, certfile=certfile)
     # get the trusted certs and save them in the right place
     if verbose:
         print "Getting trusted certs from registry"
@@ -216,14 +217,15 @@ def get_gids(registry=None, verbose=False):
     cred = get_credential(registry=registry, verbose=verbose)
     # make sure server key cert pair exists
     create_server_keypair(keyfile=keyfile, certfile=certfile, hrn=hrn, verbose=verbose)
-    registry = get_server(url=registry, keyfile=keyfile, certfile=certfile)
+    registry = server_proxy(url=registry, keyfile=keyfile, certfile=certfile)
             
     if verbose:
         print "Getting current slices on this node"
     # get a list of slices on this node
-    from sfa.plc.api import ComponentAPI
-    api = ComponentAPI()
-    xids_tuple = api.nodemanager.GetXIDs()
+    from sfa.generic import Generic
+    generic=Generic.the_flavour()
+    api = generic.make_api(interface='component')
+    xids_tuple = api.driver.nodemanager.GetXIDs()
     slices = eval(xids_tuple[1])
     slicenames = slices.keys()
 
diff --git a/sfa/server/sfaapi.py b/sfa/server/sfaapi.py
new file mode 100644 (file)
index 0000000..7085037
--- /dev/null
@@ -0,0 +1,207 @@
+import os, os.path
+import datetime
+
+from sfa.util.faults import SfaAPIError
+from sfa.util.config import Config
+from sfa.util.cache import Cache
+from sfa.trust.auth import Auth
+from sfa.trust.certificate import Keypair, Certificate
+from sfa.trust.credential import Credential
+from sfa.trust.rights import determine_rights
+
+from sfa.server.xmlrpcapi import XmlrpcApi
+
+# thgen xxx fixme this is wrong all right, but temporary, will use generic
+from sfa.util.table import SfaTable
+
+####################
+class SfaApi (XmlrpcApi): 
+    
+    """
+    An SfaApi instance is a basic xmlrcp service
+    augmented with the local cryptographic material and hrn
+
+    It also has the notion of its own interface (a string describing
+    whether we run a registry, aggregate or slicemgr) and has 
+    the notion of neighbour sfa services as defined 
+    in /etc/sfa/{aggregates,registries}.xml
+
+    Finally it contains a cache instance
+
+    It gets augmented by the generic layer with 
+    (*) an instance of manager (actually a manager module for now)
+    (*) which in turn holds an instance of a testbed driver
+    For convenience api.manager.driver == api.driver
+    """
+
+    def __init__ (self, encoding="utf-8", methods='sfa.methods', 
+                  config = "/etc/sfa/sfa_config.py", 
+                  peer_cert = None, interface = None, 
+                  key_file = None, cert_file = None, cache = None):
+        
+        XmlrpcApi.__init__ (self, encoding)
+        
+        # we may be just be documenting the API
+        if config is None:
+            return
+        # Load configuration
+        self.config = Config(config)
+        self.credential = None
+        self.auth = Auth(peer_cert)
+        self.interface = interface
+        self.hrn = self.config.SFA_INTERFACE_HRN
+        self.key_file = key_file
+        self.key = Keypair(filename=self.key_file)
+        self.cert_file = cert_file
+        self.cert = Certificate(filename=self.cert_file)
+        self.cache = cache
+        if self.cache is None:
+            self.cache = Cache()
+
+        # load registries
+        from sfa.server.registry import Registries
+        self.registries = Registries() 
+
+        # load aggregates
+        from sfa.server.aggregate import Aggregates
+        self.aggregates = Aggregates()
+        
+        # filled later on by generic/Generic
+        self.manager=None
+
+    def server_proxy(self, interface, cred, timeout=30):
+        """
+        Returns a connection to the specified interface. Use the specified
+        credential to determine the caller and look for the caller's key/cert 
+        in the registry hierarchy cache. 
+        """       
+        from sfa.trust.hierarchy import Hierarchy
+        if not isinstance(cred, Credential):
+            cred_obj = Credential(string=cred)
+        else:
+            cred_obj = cred
+        caller_gid = cred_obj.get_gid_caller()
+        hierarchy = Hierarchy()
+        auth_info = hierarchy.get_auth_info(caller_gid.get_hrn())
+        key_file = auth_info.get_privkey_filename()
+        cert_file = auth_info.get_gid_filename()
+        server = interface.server_proxy(key_file, cert_file, timeout)
+        return server
+               
+        
+    def getCredential(self):
+        """
+        Return a valid credential for this interface. 
+        """
+        type = 'authority'
+        path = self.config.SFA_DATA_DIR
+        filename = ".".join([self.interface, self.hrn, type, "cred"])
+        cred_filename = os.path.join(path,filename)
+        cred = None
+        if os.path.isfile(cred_filename):
+            cred = Credential(filename = cred_filename)
+            # make sure cred isnt expired
+            if not cred.get_expiration or \
+               datetime.datetime.utcnow() < cred.get_expiration():    
+                return cred.save_to_string(save_parents=True)
+
+        # get a new credential
+        if self.interface in ['registry']:
+            cred =  self.__getCredentialRaw()
+        else:
+            cred =  self.__getCredential()
+        cred.save_to_file(cred_filename, save_parents=True)
+
+        return cred.save_to_string(save_parents=True)
+
+
+    def getDelegatedCredential(self, creds):
+        """
+        Attempt to find a credential delegated to us in
+        the specified list of creds.
+        """
+        from sfa.trust.hierarchy import Hierarchy
+        if creds and not isinstance(creds, list): 
+            creds = [creds]
+        hierarchy = Hierarchy()
+                
+        delegated_cred = None
+        for cred in creds:
+            if hierarchy.auth_exists(Credential(string=cred).get_gid_caller().get_hrn()):
+                delegated_cred = cred
+                break
+        return delegated_cred
+    def __getCredential(self):
+        """ 
+        Get our credential from a remote registry 
+        """
+        from sfa.server.registry import Registries
+        registries = Registries()
+        registry = registries.server_proxy(self.hrn, self.key_file, self.cert_file)
+        cert_string=self.cert.save_to_string(save_parents=True)
+        # get self credential
+        self_cred = registry.GetSelfCredential(cert_string, self.hrn, 'authority')
+        # get credential
+        cred = registry.GetCredential(self_cred, self.hrn, 'authority')
+        return Credential(string=cred)
+
+    def __getCredentialRaw(self):
+        """
+        Get our current credential directly from the local registry.
+        """
+
+        hrn = self.hrn
+        auth_hrn = self.auth.get_authority(hrn)
+    
+        # is this a root or sub authority
+        if not auth_hrn or hrn == self.config.SFA_INTERFACE_HRN:
+            auth_hrn = hrn
+        auth_info = self.auth.get_auth_info(auth_hrn)
+        # xxx thgen fixme - use SfaTable hardwired for now 
+        #table = self.SfaTable()
+        table = SfaTable()
+        records = table.findObjects({'hrn': hrn, 'type': 'authority+sa'})
+        if not records:
+            raise RecordNotFound
+        record = records[0]
+        type = record['type']
+        object_gid = record.get_gid_object()
+        new_cred = Credential(subject = object_gid.get_subject())
+        new_cred.set_gid_caller(object_gid)
+        new_cred.set_gid_object(object_gid)
+        new_cred.set_issuer_keys(auth_info.get_privkey_filename(), auth_info.get_gid_filename())
+        
+        r1 = determine_rights(type, hrn)
+        new_cred.set_privileges(r1)
+        new_cred.encode()
+        new_cred.sign()
+
+        return new_cred
+   
+    def loadCredential (self):
+        """
+        Attempt to load credential from file if it exists. If it doesnt get
+        credential from registry.
+        """
+
+        # see if this file exists
+        # XX This is really the aggregate's credential. Using this is easier than getting
+        # the registry's credential from iteslf (ssl errors).
+        filename = self.interface + self.hrn + ".ma.cred"
+        ma_cred_path = os.path.join(self.config.SFA_DATA_DIR,filename)
+        try:
+            self.credential = Credential(filename = ma_cred_path)
+        except IOError:
+            self.credential = self.getCredentialFromRegistry()
+
+    def get_cached_server_version(self, server):
+        cache_key = server.url + "-version"
+        server_version = None
+        if self.cache:
+            server_version = self.cache.get(cache_key)
+        if not server_version:
+            server_version = server.GetVersion()
+            # cache version for 24 hours
+            self.cache.add(cache_key, server_version, ttl= 60*60*24)
+        return server_version
diff --git a/sfa/server/sfaserver.py b/sfa/server/sfaserver.py
new file mode 100644 (file)
index 0000000..f392b78
--- /dev/null
@@ -0,0 +1,65 @@
+##
+# This module implements a general-purpose server layer for sfa.
+# The same basic server should be usable on the registry, component, or
+# other interfaces.
+#
+# TODO: investigate ways to combine this with existing PLC server?
+##
+
+import threading
+
+from sfa.server.threadedserver import ThreadedServer, SecureXMLRpcRequestHandler
+
+from sfa.util.sfalogging import logger
+from sfa.trust.certificate import Keypair, Certificate
+
+##
+# Implements an HTTPS XML-RPC server. Generally it is expected that SFA
+# functions will take a credential string, which is passed to
+# decode_authentication. Decode_authentication() will verify the validity of
+# the credential, and verify that the user is using the key that matches the
+# GID supplied in the credential.
+
+class SfaServer(threading.Thread):
+
+    ##
+    # Create a new SfaServer object.
+    #
+    # @param ip the ip address to listen on
+    # @param port the port to listen on
+    # @param key_file private key filename of registry
+    # @param cert_file certificate filename containing public key 
+    #   (could be a GID file)
+
+    def __init__(self, ip, port, key_file, cert_file,interface):
+        threading.Thread.__init__(self)
+        self.key = Keypair(filename = key_file)
+        self.cert = Certificate(filename = cert_file)
+        #self.server = SecureXMLRPCServer((ip, port), SecureXMLRpcRequestHandler, key_file, cert_file)
+        self.server = ThreadedServer((ip, port), SecureXMLRpcRequestHandler, key_file, cert_file)
+        self.server.interface=interface
+        self.trusted_cert_list = None
+        self.register_functions()
+        logger.info("Starting SfaServer, interface=%s"%interface)
+
+    ##
+    # Register functions that will be served by the XMLRPC server. This
+    # function should be overridden by each descendant class.
+
+    def register_functions(self):
+        self.server.register_function(self.noop)
+
+    ##
+    # Sample no-op server function. The no-op function decodes the credential
+    # that was passed to it.
+
+    def noop(self, cred, anything):
+        return anything
+
+    ##
+    # Execute the server, serving requests forever. 
+
+    def run(self):
+        self.server.serve_forever()
+
+
index 8842eae..9a7fa4a 100644 (file)
@@ -1,11 +1,8 @@
-### $Id$
-### $URL$
-
 import os
 import sys
 import datetime
 import time
-from sfa.util.server import *
+from sfa.server.sfaserver import SfaServer
 
 class SliceMgr(SfaServer):
 
similarity index 80%
rename from sfa/util/server.py
rename to sfa/server/threadedserver.py
index c3ae718..7a9c368 100644 (file)
@@ -7,24 +7,23 @@
 ##
 
 import sys
-import socket, os
+import socket
 import traceback
 import threading
 from Queue import Queue
 import SocketServer
 import BaseHTTPServer
-import SimpleHTTPServer
 import SimpleXMLRPCServer
 from OpenSSL import SSL
 
-from sfa.trust.certificate import Keypair, Certificate
-from sfa.trust.trustedroots import TrustedRoots
+from sfa.util.sfalogging import logger
 from sfa.util.config import Config
-from sfa.trust.credential import *
-from sfa.util.faults import *
-from sfa.plc.api import SfaAPI
 from sfa.util.cache import Cache 
-from sfa.util.sfalogging import logger
+from sfa.trust.certificate import Certificate
+from sfa.trust.trustedroots import TrustedRoots
+
+# don't hard code an api class anymore here
+from sfa.generic import Generic
 
 ##
 # Verification callback for pyOpenSSL. We do our own checking of keys because
@@ -97,11 +96,18 @@ class SecureXMLRpcRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
         try:
             peer_cert = Certificate()
             peer_cert.load_from_pyopenssl_x509(self.connection.get_peer_certificate())
-            self.api = SfaAPI(peer_cert = peer_cert, 
-                              interface = self.server.interface, 
-                              key_file = self.server.key_file, 
-                              cert_file = self.server.cert_file,
-                              cache = self.cache)
+            generic=Generic.the_flavour()
+            self.api = generic.make_api (peer_cert = peer_cert, 
+                                         interface = self.server.interface, 
+                                         key_file = self.server.key_file, 
+                                         cert_file = self.server.cert_file,
+                                         cache = self.cache)
+            #logger.info("SecureXMLRpcRequestHandler.do_POST:")
+            #logger.info("interface=%s"%self.server.interface)
+            #logger.info("key_file=%s"%self.server.key_file)
+            #logger.info("api=%s"%self.api)
+            #logger.info("server=%s"%self.server)
+            #logger.info("handler=%s"%self)
             # get arguments
             request = self.rfile.read(int(self.headers["content-length"]))
             remote_addr = (remote_ip, remote_port) = self.connection.getpeername()
@@ -129,6 +135,7 @@ class SecureXMLRpcRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
 ##
 # Taken from the web (XXX find reference). Implements an HTTPS xmlrpc server
 class SecureXMLRPCServer(BaseHTTPServer.HTTPServer,SimpleXMLRPCServer.SimpleXMLRPCDispatcher):
+
     def __init__(self, server_address, HandlerClass, key_file, cert_file, logRequests=True):
         """Secure XML-RPC server.
 
@@ -260,54 +267,3 @@ class ThreadPoolMixIn(SocketServer.ThreadingMixIn):
 
 class ThreadedServer(ThreadPoolMixIn, SecureXMLRPCServer):
     pass
-##
-# Implements an HTTPS XML-RPC server. Generally it is expected that SFA
-# functions will take a credential string, which is passed to
-# decode_authentication. Decode_authentication() will verify the validity of
-# the credential, and verify that the user is using the key that matches the
-# GID supplied in the credential.
-
-class SfaServer(threading.Thread):
-
-    ##
-    # Create a new SfaServer object.
-    #
-    # @param ip the ip address to listen on
-    # @param port the port to listen on
-    # @param key_file private key filename of registry
-    # @param cert_file certificate filename containing public key 
-    #   (could be a GID file)
-
-    def __init__(self, ip, port, key_file, cert_file,interface):
-        threading.Thread.__init__(self)
-        self.key = Keypair(filename = key_file)
-        self.cert = Certificate(filename = cert_file)
-        #self.server = SecureXMLRPCServer((ip, port), SecureXMLRpcRequestHandler, key_file, cert_file)
-        self.server = ThreadedServer((ip, port), SecureXMLRpcRequestHandler, key_file, cert_file)
-        self.server.interface=interface
-        self.trusted_cert_list = None
-        self.register_functions()
-        logger.info("Starting SfaServer, interface=%s"%interface)
-
-    ##
-    # Register functions that will be served by the XMLRPC server. This
-    # function should be overridden by each descendant class.
-
-    def register_functions(self):
-        self.server.register_function(self.noop)
-
-    ##
-    # Sample no-op server function. The no-op function decodes the credential
-    # that was passed to it.
-
-    def noop(self, cred, anything):
-        self.decode_authentication(cred)
-        return anything
-
-    ##
-    # Execute the server, serving requests forever. 
-
-    def run(self):
-        self.server.serve_forever()
-
-
similarity index 55%
rename from sfa/util/api.py
rename to sfa/server/xmlrpcapi.py
index eeb3d6a..456cd42 100644 (file)
@@ -2,20 +2,25 @@
 # SFA XML-RPC and SOAP interfaces
 #
 
-import sys
-import os
-import traceback
 import string
 import xmlrpclib
-import sfa.util.xmlrpcprotocol as xmlrpcprotocol
+
+# SOAP support is optional
+try:
+    import SOAPpy
+    from SOAPpy.Parser import parseSOAPRPC
+    from SOAPpy.Types import faultType
+    from SOAPpy.NS import NS
+    from SOAPpy.SOAPBuilder import buildSOAP
+except ImportError:
+    SOAPpy = None
+
+####################
+#from sfa.util.faults import SfaNotImplemented, SfaAPIError, SfaInvalidAPIMethod, SfaFault
+from sfa.util.faults import SfaInvalidAPIMethod, SfaAPIError, SfaFault
 from sfa.util.sfalogging import logger
-from sfa.trust.auth import Auth
-from sfa.util.config import *
-from sfa.util.faults import *
-from sfa.util.cache import Cache
-from sfa.trust.credential import *
-from sfa.trust.certificate import *
 
+####################
 # See "2.2 Characters" in the XML specification:
 #
 # #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
@@ -73,115 +78,24 @@ def xmlrpclib_dump(self, value, write):
 # You can't hide from me!
 xmlrpclib.Marshaller._Marshaller__dump = xmlrpclib_dump
 
-# SOAP support is optional
-try:
-    import SOAPpy
-    from SOAPpy.Parser import parseSOAPRPC
-    from SOAPpy.Types import faultType
-    from SOAPpy.NS import NS
-    from SOAPpy.SOAPBuilder import buildSOAP
-except ImportError:
-    SOAPpy = None
-
-
-def import_deep(name):
-    mod = __import__(name)
-    components = name.split('.')
-    for comp in components[1:]:
-        mod = getattr(mod, comp)
-    return mod
-
-class ManagerWrapper:
+class XmlrpcApi:
     """
-    This class acts as a wrapper around an SFA interface manager module, but
-    can be used with any python module. The purpose of this class is raise a 
-    SfaNotImplemented exception if the a someone attepmts to use an attribute 
-    (could be a callable) thats not available in the library by checking the
-    library using hasattr. This helps to communicate better errors messages 
-    to the users and developers in the event that a specifiec operation 
-    is not implemented by a libarary and will generally be more helpful than
-    the standard AttributeError         
+    The XmlrpcApi class implements a basic xmlrpc (or soap) service 
     """
-    def __init__(self, manager, interface):
-        self.manager = manager
-        self.interface = interface
-        
-    def __getattr__(self, method):
-        
-        if not hasattr(self.manager, method):
-            raise SfaNotImplemented(method, self.interface)
-        return getattr(self.manager, method)
-        
-class BaseAPI:
 
     protocol = None
   
-    def __init__(self, config = "/etc/sfa/sfa_config.py", encoding = "utf-8", 
-                 methods='sfa.methods', peer_cert = None, interface = None, 
-                 key_file = None, cert_file = None, cache = None):
+    def __init__ (self, encoding="utf-8", methods='sfa.methods'):
 
         self.encoding = encoding
+        self.source = None 
         
         # flat list of method names
         self.methods_module = methods_module = __import__(methods, fromlist=[methods])
         self.methods = methods_module.all
 
-        # Better just be documenting the API
-        if config is None:
-            return
-        # Load configuration
-        self.config = Config(config)
-        self.auth = Auth(peer_cert)
-        self.hrn = self.config.SFA_INTERFACE_HRN
-        self.interface = interface
-        self.key_file = key_file
-        self.key = Keypair(filename=self.key_file)
-        self.cert_file = cert_file
-        self.cert = Certificate(filename=self.cert_file)
-        self.cache = cache
-        if self.cache is None:
-            self.cache = Cache()
-        self.credential = None
-        self.source = None 
-        self.time_format = "%Y-%m-%d %H:%M:%S"
         self.logger = logger
  
-        # load registries
-        from sfa.server.registry import Registries
-        self.registries = Registries() 
-
-        # load aggregates
-        from sfa.server.aggregate import Aggregates
-        self.aggregates = Aggregates()
-
-
-    def get_interface_manager(self, manager_base = 'sfa.managers'):
-        """
-        Returns the appropriate manager module for this interface.
-        Modules are usually found in sfa/managers/
-        """
-        
-        if self.interface in ['registry']:
-            mgr_type = self.config.SFA_REGISTRY_TYPE
-            manager_module = manager_base + ".registry_manager_%s" % mgr_type
-        elif self.interface in ['aggregate']:
-            mgr_type = self.config.SFA_AGGREGATE_TYPE
-            manager_module = manager_base + ".aggregate_manager_%s" % mgr_type 
-        elif self.interface in ['slicemgr', 'sm']:
-            mgr_type = self.config.SFA_SM_TYPE
-            manager_module = manager_base + ".slice_manager_%s" % mgr_type
-        elif self.interface in ['component', 'cm']:
-            mgr_type = self.config.SFA_CM_TYPE
-            manager_module = manager_base + ".component_manager_%s" % mgr_type
-        else:
-            raise SfaAPIError("No manager for interface: %s" % self.interface)  
-        manager = __import__(manager_module, fromlist=[manager_base])
-        # this isnt necessary but will help to produce better error messages
-        # if someone tries to access an operation this manager doesn't implement  
-        manager = ManagerWrapper(manager, self.interface)
-
-        return manager
-
     def callable(self, method):
         """
         Return a new instance of the specified method.
@@ -196,7 +110,7 @@ class BaseAPI:
             module = __import__(self.methods_module.__name__ + "." + method, globals(), locals(), [classname])
             callablemethod = getattr(module, classname)(self)
             return getattr(module, classname)(self)
-        except ImportError, AttributeError:
+        except (ImportError, AttributeError):
             raise SfaInvalidAPIMethod, method
 
     def call(self, source, method, *args):
@@ -239,7 +153,7 @@ class BaseAPI:
         except SfaFault, fault:
             result = fault 
         except Exception, fault:
-            logger.log_exc("BaseAPI.handle has caught Exception")
+            self.logger.log_exc("XmlrpcApi.handle has caught Exception")
             result = SfaAPIError(fault)
 
 
@@ -268,13 +182,3 @@ class BaseAPI:
             
         return response
 
-    def get_cached_server_version(self, server):
-        cache_key = server.url + "-version"
-        server_version = None
-        if self.cache:
-            server_version = self.cache.get(cache_key)
-        if not server_version:
-            server_version = server.GetVersion()
-            # cache version for 24 hours
-            self.cache.add(cache_key, server_version, ttl= 60*60*24)
-        return server_version
index de0af99..43af740 100644 (file)
@@ -3,16 +3,20 @@
 #
 import sys
 
+from sfa.util.faults import InsufficientRights, MissingCallerGID, MissingTrustedRoots, PermissionError, \
+    BadRequestHash, ConnectionKeyGIDMismatch, SfaPermissionDenied
+from sfa.util.sfalogging import logger
+from sfa.util.config import Config
+from sfa.util.xrn import get_authority
+
+from sfa.trust.gid import GID
+from sfa.trust.rights import Rights
 from sfa.trust.certificate import Keypair, Certificate
 from sfa.trust.credential import Credential
 from sfa.trust.trustedroots import TrustedRoots
-from sfa.util.faults import *
 from sfa.trust.hierarchy import Hierarchy
-from sfa.util.config import *
-from sfa.util.xrn import get_authority
-from sfa.util.sfaticket import *
+from sfa.trust.sfaticket import SfaTicket
 
-from sfa.util.sfalogging import logger
 
 class Auth:
     """
@@ -145,7 +149,8 @@ class Auth:
 
     def authenticateCert(self, certStr, requestHash):
         cert = Certificate(string=certStr)
-        self.validateCert(self, cert)   
+        # xxx should be validateCred ??
+        self.validateCred(cert)   
 
     def gidNoop(self, gidStr, value, requestHash):
         self.authenticateGid(gidStr, [gidStr, value], requestHash)
@@ -313,7 +318,7 @@ class Auth:
         if not isinstance(creds, list):
             creds = [creds]
         creds = []
-        if not isinistance(caller_hrn_list, list):
+        if not isinstance(caller_hrn_list, list):
             caller_hrn_list = [caller_hrn_list]
         for cred in creds:
             try:
index bcec9d6..f0a2d71 100644 (file)
-#----------------------------------------------------------------------\r
-# Copyright (c) 2008 Board of Trustees, Princeton University\r
-#\r
-# Permission is hereby granted, free of charge, to any person obtaining\r
-# a copy of this software and/or hardware specification (the "Work") to\r
-# deal in the Work without restriction, including without limitation the\r
-# rights to use, copy, modify, merge, publish, distribute, sublicense,\r
-# and/or sell copies of the Work, and to permit persons to whom the Work\r
-# is furnished to do so, subject to the following conditions:\r
-#\r
-# The above copyright notice and this permission notice shall be\r
-# included in all copies or substantial portions of the Work.\r
-#\r
-# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS \r
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT \r
-# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
-# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS \r
-# IN THE WORK.\r
-#----------------------------------------------------------------------\r
-\r
-##\r
-# SFA uses two crypto libraries: pyOpenSSL and M2Crypto to implement\r
-# the necessary crypto functionality. Ideally just one of these libraries\r
-# would be used, but unfortunately each of these libraries is independently\r
-# lacking. The pyOpenSSL library is missing many necessary functions, and\r
-# the M2Crypto library has crashed inside of some of the functions. The\r
-# design decision is to use pyOpenSSL whenever possible as it seems more\r
-# stable, and only use M2Crypto for those functions that are not possible\r
-# in pyOpenSSL.\r
-#\r
-# This module exports two classes: Keypair and Certificate.\r
-##\r
-#\r
-\r
-import functools\r
-import os\r
-import tempfile\r
-import base64\r
-import traceback\r
-from tempfile import mkstemp\r
-\r
-from OpenSSL import crypto\r
-import M2Crypto\r
-from M2Crypto import X509\r
-\r
-from sfa.util.sfalogging import logger\r
-from sfa.util.xrn import urn_to_hrn\r
-from sfa.util.faults import *\r
-from sfa.util.sfalogging import logger\r
-\r
-glo_passphrase_callback = None\r
-\r
-##\r
-# A global callback msy be implemented for requesting passphrases from the\r
-# user. The function will be called with three arguments:\r
-#\r
-#    keypair_obj: the keypair object that is calling the passphrase\r
-#    string: the string containing the private key that's being loaded\r
-#    x: unknown, appears to be 0, comes from pyOpenSSL and/or m2crypto\r
-#\r
-# The callback should return a string containing the passphrase.\r
-\r
-def set_passphrase_callback(callback_func):\r
-    global glo_passphrase_callback\r
-\r
-    glo_passphrase_callback = callback_func\r
-\r
-##\r
-# Sets a fixed passphrase.\r
-\r
-def set_passphrase(passphrase):\r
-    set_passphrase_callback( lambda k,s,x: passphrase )\r
-\r
-##\r
-# Check to see if a passphrase works for a particular private key string.\r
-# Intended to be used by passphrase callbacks for input validation.\r
-\r
-def test_passphrase(string, passphrase):\r
-    try:\r
-        crypto.load_privatekey(crypto.FILETYPE_PEM, string, (lambda x: passphrase))\r
-        return True\r
-    except:\r
-        return False\r
-\r
-def convert_public_key(key):\r
-    keyconvert_path = "/usr/bin/keyconvert.py"\r
-    if not os.path.isfile(keyconvert_path):\r
-        raise IOError, "Could not find keyconvert in %s" % keyconvert_path\r
-\r
-    # we can only convert rsa keys\r
-    if "ssh-dss" in key:\r
-        return None\r
-\r
-    (ssh_f, ssh_fn) = tempfile.mkstemp()\r
-    ssl_fn = tempfile.mktemp()\r
-    os.write(ssh_f, key)\r
-    os.close(ssh_f)\r
-\r
-    cmd = keyconvert_path + " " + ssh_fn + " " + ssl_fn\r
-    os.system(cmd)\r
-\r
-    # this check leaves the temporary file containing the public key so\r
-    # that it can be expected to see why it failed.\r
-    # TODO: for production, cleanup the temporary files\r
-    if not os.path.exists(ssl_fn):\r
-        return None\r
-\r
-    k = Keypair()\r
-    try:\r
-        k.load_pubkey_from_file(ssl_fn)\r
-    except:\r
-        logger.log_exc("convert_public_key caught exception")\r
-        k = None\r
-\r
-    # remove the temporary files\r
-    os.remove(ssh_fn)\r
-    os.remove(ssl_fn)\r
-\r
-    return k\r
-\r
-##\r
-# Public-private key pairs are implemented by the Keypair class.\r
-# A Keypair object may represent both a public and private key pair, or it\r
-# may represent only a public key (this usage is consistent with OpenSSL).\r
-\r
-class Keypair:\r
-    key = None       # public/private keypair\r
-    m2key = None     # public key (m2crypto format)\r
-\r
-    ##\r
-    # Creates a Keypair object\r
-    # @param create If create==True, creates a new public/private key and\r
-    #     stores it in the object\r
-    # @param string If string!=None, load the keypair from the string (PEM)\r
-    # @param filename If filename!=None, load the keypair from the file\r
-\r
-    def __init__(self, create=False, string=None, filename=None):\r
-        if create:\r
-            self.create()\r
-        if string:\r
-            self.load_from_string(string)\r
-        if filename:\r
-            self.load_from_file(filename)\r
-\r
-    ##\r
-    # Create a RSA public/private key pair and store it inside the keypair object\r
-\r
-    def create(self):\r
-        self.key = crypto.PKey()\r
-        self.key.generate_key(crypto.TYPE_RSA, 1024)\r
-\r
-    ##\r
-    # Save the private key to a file\r
-    # @param filename name of file to store the keypair in\r
-\r
-    def save_to_file(self, filename):\r
-        open(filename, 'w').write(self.as_pem())\r
-        self.filename=filename\r
-\r
-    ##\r
-    # Load the private key from a file. Implicity the private key includes the public key.\r
-\r
-    def load_from_file(self, filename):\r
-        self.filename=filename\r
-        buffer = open(filename, 'r').read()\r
-        self.load_from_string(buffer)\r
-\r
-    ##\r
-    # Load the private key from a string. Implicitly the private key includes the public key.\r
-\r
-    def load_from_string(self, string):\r
-        if glo_passphrase_callback:\r
-            self.key = crypto.load_privatekey(crypto.FILETYPE_PEM, string, functools.partial(glo_passphrase_callback, self, string) )\r
-            self.m2key = M2Crypto.EVP.load_key_string(string, functools.partial(glo_passphrase_callback, self, string) )\r
-        else:\r
-            self.key = crypto.load_privatekey(crypto.FILETYPE_PEM, string)\r
-            self.m2key = M2Crypto.EVP.load_key_string(string)\r
-\r
-    ##\r
-    #  Load the public key from a string. No private key is loaded.\r
-\r
-    def load_pubkey_from_file(self, filename):\r
-        # load the m2 public key\r
-        m2rsakey = M2Crypto.RSA.load_pub_key(filename)\r
-        self.m2key = M2Crypto.EVP.PKey()\r
-        self.m2key.assign_rsa(m2rsakey)\r
-\r
-        # create an m2 x509 cert\r
-        m2name = M2Crypto.X509.X509_Name()\r
-        m2name.add_entry_by_txt(field="CN", type=0x1001, entry="junk", len=-1, loc=-1, set=0)\r
-        m2x509 = M2Crypto.X509.X509()\r
-        m2x509.set_pubkey(self.m2key)\r
-        m2x509.set_serial_number(0)\r
-        m2x509.set_issuer_name(m2name)\r
-        m2x509.set_subject_name(m2name)\r
-        ASN1 = M2Crypto.ASN1.ASN1_UTCTIME()\r
-        ASN1.set_time(500)\r
-        m2x509.set_not_before(ASN1)\r
-        m2x509.set_not_after(ASN1)\r
-        # x509v3 so it can have extensions\r
-        # prob not necc since this cert itself is junk but still...\r
-        m2x509.set_version(2)\r
-        junk_key = Keypair(create=True)\r
-        m2x509.sign(pkey=junk_key.get_m2_pkey(), md="sha1")\r
-\r
-        # convert the m2 x509 cert to a pyopenssl x509\r
-        m2pem = m2x509.as_pem()\r
-        pyx509 = crypto.load_certificate(crypto.FILETYPE_PEM, m2pem)\r
-\r
-        # get the pyopenssl pkey from the pyopenssl x509\r
-        self.key = pyx509.get_pubkey()\r
-        self.filename=filename\r
-\r
-    ##\r
-    # Load the public key from a string. No private key is loaded.\r
-\r
-    def load_pubkey_from_string(self, string):\r
-        (f, fn) = tempfile.mkstemp()\r
-        os.write(f, string)\r
-        os.close(f)\r
-        self.load_pubkey_from_file(fn)\r
-        os.remove(fn)\r
-\r
-    ##\r
-    # Return the private key in PEM format.\r
-\r
-    def as_pem(self):\r
-        return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.key)\r
-\r
-    ##\r
-    # Return an M2Crypto key object\r
-\r
-    def get_m2_pkey(self):\r
-        if not self.m2key:\r
-            self.m2key = M2Crypto.EVP.load_key_string(self.as_pem())\r
-        return self.m2key\r
-\r
-    ##\r
-    # Returns a string containing the public key represented by this object.\r
-\r
-    def get_pubkey_string(self):\r
-        m2pkey = self.get_m2_pkey()\r
-        return base64.b64encode(m2pkey.as_der())\r
-\r
-    ##\r
-    # Return an OpenSSL pkey object\r
-\r
-    def get_openssl_pkey(self):\r
-        return self.key\r
-\r
-    ##\r
-    # Given another Keypair object, return TRUE if the two keys are the same.\r
-\r
-    def is_same(self, pkey):\r
-        return self.as_pem() == pkey.as_pem()\r
-\r
-    def sign_string(self, data):\r
-        k = self.get_m2_pkey()\r
-        k.sign_init()\r
-        k.sign_update(data)\r
-        return base64.b64encode(k.sign_final())\r
-\r
-    def verify_string(self, data, sig):\r
-        k = self.get_m2_pkey()\r
-        k.verify_init()\r
-        k.verify_update(data)\r
-        return M2Crypto.m2.verify_final(k.ctx, base64.b64decode(sig), k.pkey)\r
-\r
-    def compute_hash(self, value):\r
-        return self.sign_string(str(value))\r
-\r
-    # only informative\r
-    def get_filename(self):\r
-        return getattr(self,'filename',None)\r
-\r
-    def dump (self, *args, **kwargs):\r
-        print self.dump_string(*args, **kwargs)\r
-\r
-    def dump_string (self):\r
-        result=""\r
-        result += "KEYPAIR: pubkey=%40s..."%self.get_pubkey_string()\r
-        filename=self.get_filename()\r
-        if filename: result += "Filename %s\n"%filename\r
-        return result\r
-\r
-##\r
-# The certificate class implements a general purpose X509 certificate, making\r
-# use of the appropriate pyOpenSSL or M2Crypto abstractions. It also adds\r
-# several addition features, such as the ability to maintain a chain of\r
-# parent certificates, and storage of application-specific data.\r
-#\r
-# Certificates include the ability to maintain a chain of parents. Each\r
-# certificate includes a pointer to it's parent certificate. When loaded\r
-# from a file or a string, the parent chain will be automatically loaded.\r
-# When saving a certificate to a file or a string, the caller can choose\r
-# whether to save the parent certificates as well.\r
-\r
-class Certificate:\r
-    digest = "md5"\r
-\r
-    cert = None\r
-    issuerKey = None\r
-    issuerSubject = None\r
-    parent = None\r
-    isCA = None # will be a boolean once set\r
-\r
-    separator="-----parent-----"\r
-\r
-    ##\r
-    # Create a certificate object.\r
-    #\r
-    # @param lifeDays life of cert in days - default is 1825==5 years\r
-    # @param create If create==True, then also create a blank X509 certificate.\r
-    # @param subject If subject!=None, then create a blank certificate and set\r
-    #     it's subject name.\r
-    # @param string If string!=None, load the certficate from the string.\r
-    # @param filename If filename!=None, load the certficiate from the file.\r
-    # @param isCA If !=None, set whether this cert is for a CA\r
-\r
-    def __init__(self, lifeDays=1825, create=False, subject=None, string=None, filename=None, isCA=None):\r
-        self.data = {}\r
-        if create or subject:\r
-            self.create(lifeDays)\r
-        if subject:\r
-            self.set_subject(subject)\r
-        if string:\r
-            self.load_from_string(string)\r
-        if filename:\r
-            self.load_from_file(filename)\r
-\r
-        # Set the CA bit if a value was supplied\r
-        if isCA != None:\r
-            self.set_is_ca(isCA)\r
-\r
-    # Create a blank X509 certificate and store it in this object.\r
-\r
-    def create(self, lifeDays=1825):\r
-        self.cert = crypto.X509()\r
-        # FIXME: Use different serial #s\r
-        self.cert.set_serial_number(3)\r
-        self.cert.gmtime_adj_notBefore(0) # 0 means now\r
-        self.cert.gmtime_adj_notAfter(lifeDays*60*60*24) # five years is default\r
-        self.cert.set_version(2) # x509v3 so it can have extensions\r
-\r
-\r
-    ##\r
-    # Given a pyOpenSSL X509 object, store that object inside of this\r
-    # certificate object.\r
-\r
-    def load_from_pyopenssl_x509(self, x509):\r
-        self.cert = x509\r
-\r
-    ##\r
-    # Load the certificate from a string\r
-\r
-    def load_from_string(self, string):\r
-        # if it is a chain of multiple certs, then split off the first one and\r
-        # load it (support for the ---parent--- tag as well as normal chained certs)\r
-\r
-        string = string.strip()\r
-        \r
-        # If it's not in proper PEM format, wrap it\r
-        if string.count('-----BEGIN CERTIFICATE') == 0:\r
-            string = '-----BEGIN CERTIFICATE-----\n%s\n-----END CERTIFICATE-----' % string\r
-\r
-        # If there is a PEM cert in there, but there is some other text first\r
-        # such as the text of the certificate, skip the text\r
-        beg = string.find('-----BEGIN CERTIFICATE')\r
-        if beg > 0:\r
-            # skipping over non cert beginning                                                                                                              \r
-            string = string[beg:]\r
-\r
-        parts = []\r
-\r
-        if string.count('-----BEGIN CERTIFICATE-----') > 1 and \\r
-               string.count(Certificate.separator) == 0:\r
-            parts = string.split('-----END CERTIFICATE-----',1)\r
-            parts[0] += '-----END CERTIFICATE-----'\r
-        else:\r
-            parts = string.split(Certificate.separator, 1)\r
-\r
-        self.cert = crypto.load_certificate(crypto.FILETYPE_PEM, parts[0])\r
-\r
-        # if there are more certs, then create a parent and let the parent load\r
-        # itself from the remainder of the string\r
-        if len(parts) > 1 and parts[1] != '':\r
-            self.parent = self.__class__()\r
-            self.parent.load_from_string(parts[1])\r
-\r
-    ##\r
-    # Load the certificate from a file\r
-\r
-    def load_from_file(self, filename):\r
-        file = open(filename)\r
-        string = file.read()\r
-        self.load_from_string(string)\r
-        self.filename=filename\r
-\r
-    ##\r
-    # Save the certificate to a string.\r
-    #\r
-    # @param save_parents If save_parents==True, then also save the parent certificates.\r
-\r
-    def save_to_string(self, save_parents=True):\r
-        string = crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert)\r
-        if save_parents and self.parent:\r
-            string = string + self.parent.save_to_string(save_parents)\r
-        return string\r
-\r
-    ##\r
-    # Save the certificate to a file.\r
-    # @param save_parents If save_parents==True, then also save the parent certificates.\r
-\r
-    def save_to_file(self, filename, save_parents=True, filep=None):\r
-        string = self.save_to_string(save_parents=save_parents)\r
-        if filep:\r
-            f = filep\r
-        else:\r
-            f = open(filename, 'w')\r
-        f.write(string)\r
-        f.close()\r
-        self.filename=filename\r
-\r
-    ##\r
-    # Save the certificate to a random file in /tmp/\r
-    # @param save_parents If save_parents==True, then also save the parent certificates.\r
-    def save_to_random_tmp_file(self, save_parents=True):\r
-        fp, filename = mkstemp(suffix='cert', text=True)\r
-        fp = os.fdopen(fp, "w")\r
-        self.save_to_file(filename, save_parents=True, filep=fp)\r
-        return filename\r
-\r
-    ##\r
-    # Sets the issuer private key and name\r
-    # @param key Keypair object containing the private key of the issuer\r
-    # @param subject String containing the name of the issuer\r
-    # @param cert (optional) Certificate object containing the name of the issuer\r
-\r
-    def set_issuer(self, key, subject=None, cert=None):\r
-        self.issuerKey = key\r
-        if subject:\r
-            # it's a mistake to use subject and cert params at the same time\r
-            assert(not cert)\r
-            if isinstance(subject, dict) or isinstance(subject, str):\r
-                req = crypto.X509Req()\r
-                reqSubject = req.get_subject()\r
-                if (isinstance(subject, dict)):\r
-                    for key in reqSubject.keys():\r
-                        setattr(reqSubject, key, subject[key])\r
-                else:\r
-                    setattr(reqSubject, "CN", subject)\r
-                subject = reqSubject\r
-                # subject is not valid once req is out of scope, so save req\r
-                self.issuerReq = req\r
-        if cert:\r
-            # if a cert was supplied, then get the subject from the cert\r
-            subject = cert.cert.get_subject()\r
-        assert(subject)\r
-        self.issuerSubject = subject\r
-\r
-    ##\r
-    # Get the issuer name\r
-\r
-    def get_issuer(self, which="CN"):\r
-        x = self.cert.get_issuer()\r
-        return getattr(x, which)\r
-\r
-    ##\r
-    # Set the subject name of the certificate\r
-\r
-    def set_subject(self, name):\r
-        req = crypto.X509Req()\r
-        subj = req.get_subject()\r
-        if (isinstance(name, dict)):\r
-            for key in name.keys():\r
-                setattr(subj, key, name[key])\r
-        else:\r
-            setattr(subj, "CN", name)\r
-        self.cert.set_subject(subj)\r
-\r
-    ##\r
-    # Get the subject name of the certificate\r
-\r
-    def get_subject(self, which="CN"):\r
-        x = self.cert.get_subject()\r
-        return getattr(x, which)\r
-\r
-    ##\r
-    # Get a pretty-print subject name of the certificate\r
-\r
-    def get_printable_subject(self):\r
-        x = self.cert.get_subject()\r
-        return "[ OU: %s, CN: %s, SubjectAltName: %s ]" % (getattr(x, "OU"), getattr(x, "CN"), self.get_data())\r
-\r
-    ##\r
-    # Get the public key of the certificate.\r
-    #\r
-    # @param key Keypair object containing the public key\r
-\r
-    def set_pubkey(self, key):\r
-        assert(isinstance(key, Keypair))\r
-        self.cert.set_pubkey(key.get_openssl_pkey())\r
-\r
-    ##\r
-    # Get the public key of the certificate.\r
-    # It is returned in the form of a Keypair object.\r
-\r
-    def get_pubkey(self):\r
-        m2x509 = X509.load_cert_string(self.save_to_string())\r
-        pkey = Keypair()\r
-        pkey.key = self.cert.get_pubkey()\r
-        pkey.m2key = m2x509.get_pubkey()\r
-        return pkey\r
-\r
-    def set_intermediate_ca(self, val):\r
-        return self.set_is_ca(val)\r
-\r
-    # Set whether this cert is for a CA. All signers and only signers should be CAs.\r
-    # The local member starts unset, letting us check that you only set it once\r
-    # @param val Boolean indicating whether this cert is for a CA\r
-    def set_is_ca(self, val):\r
-        if val is None:\r
-            return\r
-\r
-        if self.isCA != None:\r
-            # Can't double set properties\r
-            raise "Cannot set basicConstraints CA:?? more than once. Was %s, trying to set as %s" % (self.isCA, val)\r
-\r
-        self.isCA = val\r
-        if val:\r
-            self.add_extension('basicConstraints', 1, 'CA:TRUE')\r
-        else:\r
-            self.add_extension('basicConstraints', 1, 'CA:FALSE')\r
-\r
-\r
-\r
-    ##\r
-    # Add an X509 extension to the certificate. Add_extension can only be called\r
-    # once for a particular extension name, due to limitations in the underlying\r
-    # library.\r
-    #\r
-    # @param name string containing name of extension\r
-    # @param value string containing value of the extension\r
-\r
-    def add_extension(self, name, critical, value):\r
-        oldExtVal = None\r
-        try:\r
-            oldExtVal = self.get_extension(name)\r
-        except:\r
-            # M2Crypto LookupError when the extension isn't there (yet)\r
-            pass\r
-\r
-        # This code limits you from adding the extension with the same value\r
-        # The method comment says you shouldn't do this with the same name\r
-        # But actually it (m2crypto) appears to allow you to do this.\r
-        if oldExtVal and oldExtVal == value:\r
-            # don't add this extension again\r
-            # just do nothing as here\r
-            return\r
-        # FIXME: What if they are trying to set with a different value?\r
-        # Is this ever OK? Or should we raise an exception?\r
-#        elif oldExtVal:\r
-#            raise "Cannot add extension %s which had val %s with new val %s" % (name, oldExtVal, value)\r
-\r
-        ext = crypto.X509Extension (name, critical, value)\r
-        self.cert.add_extensions([ext])\r
-\r
-    ##\r
-    # Get an X509 extension from the certificate\r
-\r
-    def get_extension(self, name):\r
-\r
-        # pyOpenSSL does not have a way to get extensions\r
-        m2x509 = X509.load_cert_string(self.save_to_string())\r
-        value = m2x509.get_ext(name).get_value()\r
-\r
-        return value\r
-\r
-    ##\r
-    # Set_data is a wrapper around add_extension. It stores the parameter str in\r
-    # the X509 subject_alt_name extension. Set_data can only be called once, due\r
-    # to limitations in the underlying library.\r
-\r
-    def set_data(self, str, field='subjectAltName'):\r
-        # pyOpenSSL only allows us to add extensions, so if we try to set the\r
-        # same extension more than once, it will not work\r
-        if self.data.has_key(field):\r
-            raise "Cannot set ", field, " more than once"\r
-        self.data[field] = str\r
-        self.add_extension(field, 0, str)\r
-\r
-    ##\r
-    # Return the data string that was previously set with set_data\r
-\r
-    def get_data(self, field='subjectAltName'):\r
-        if self.data.has_key(field):\r
-            return self.data[field]\r
-\r
-        try:\r
-            uri = self.get_extension(field)\r
-            self.data[field] = uri\r
-        except LookupError:\r
-            return None\r
-\r
-        return self.data[field]\r
-\r
-    ##\r
-    # Sign the certificate using the issuer private key and issuer subject previous set with set_issuer().\r
-\r
-    def sign(self):\r
-        logger.debug('certificate.sign')\r
-        assert self.cert != None\r
-        assert self.issuerSubject != None\r
-        assert self.issuerKey != None\r
-        self.cert.set_issuer(self.issuerSubject)\r
-        self.cert.sign(self.issuerKey.get_openssl_pkey(), self.digest)\r
-\r
-    ##\r
-    # Verify the authenticity of a certificate.\r
-    # @param pkey is a Keypair object representing a public key. If Pkey\r
-    #     did not sign the certificate, then an exception will be thrown.\r
-\r
-    def verify(self, pkey):\r
-        # pyOpenSSL does not have a way to verify signatures\r
-        m2x509 = X509.load_cert_string(self.save_to_string())\r
-        m2pkey = pkey.get_m2_pkey()\r
-        # verify it\r
-        return m2x509.verify(m2pkey)\r
-\r
-        # XXX alternatively, if openssl has been patched, do the much simpler:\r
-        # try:\r
-        #   self.cert.verify(pkey.get_openssl_key())\r
-        #   return 1\r
-        # except:\r
-        #   return 0\r
-\r
-    ##\r
-    # Return True if pkey is identical to the public key that is contained in the certificate.\r
-    # @param pkey Keypair object\r
-\r
-    def is_pubkey(self, pkey):\r
-        return self.get_pubkey().is_same(pkey)\r
-\r
-    ##\r
-    # Given a certificate cert, verify that this certificate was signed by the\r
-    # public key contained in cert. Throw an exception otherwise.\r
-    #\r
-    # @param cert certificate object\r
-\r
-    def is_signed_by_cert(self, cert):\r
-        k = cert.get_pubkey()\r
-        result = self.verify(k)\r
-        return result\r
-\r
-    ##\r
-    # Set the parent certficiate.\r
-    #\r
-    # @param p certificate object.\r
-\r
-    def set_parent(self, p):\r
-        self.parent = p\r
-\r
-    ##\r
-    # Return the certificate object of the parent of this certificate.\r
-\r
-    def get_parent(self):\r
-        return self.parent\r
-\r
-    ##\r
-    # Verification examines a chain of certificates to ensure that each parent\r
-    # signs the child, and that some certificate in the chain is signed by a\r
-    # trusted certificate.\r
-    #\r
-    # Verification is a basic recursion: <pre>\r
-    #     if this_certificate was signed by trusted_certs:\r
-    #         return\r
-    #     else\r
-    #         return verify_chain(parent, trusted_certs)\r
-    # </pre>\r
-    #\r
-    # At each recursion, the parent is tested to ensure that it did sign the\r
-    # child. If a parent did not sign a child, then an exception is thrown. If\r
-    # the bottom of the recursion is reached and the certificate does not match\r
-    # a trusted root, then an exception is thrown.\r
-    # Also require that parents are CAs.\r
-    #\r
-    # @param Trusted_certs is a list of certificates that are trusted.\r
-    #\r
-\r
-    def verify_chain(self, trusted_certs = None):\r
-        # Verify a chain of certificates. Each certificate must be signed by\r
-        # the public key contained in it's parent. The chain is recursed\r
-        # until a certificate is found that is signed by a trusted root.\r
-\r
-        # verify expiration time\r
-        if self.cert.has_expired():\r
-            logger.debug("verify_chain: NO, Certificate %s has expired" % self.get_printable_subject())\r
-            raise CertExpired(self.get_printable_subject(), "client cert")\r
-\r
-        # if this cert is signed by a trusted_cert, then we are set\r
-        for trusted_cert in trusted_certs:\r
-            if self.is_signed_by_cert(trusted_cert):\r
-                # verify expiration of trusted_cert ?\r
-                if not trusted_cert.cert.has_expired():\r
-                    logger.debug("verify_chain: YES. Cert %s signed by trusted cert %s"%(\r
-                            self.get_printable_subject(), trusted_cert.get_printable_subject()))\r
-                    return trusted_cert\r
-                else:\r
-                    logger.debug("verify_chain: NO. Cert %s is signed by trusted_cert %s, but that signer is expired..."%(\r
-                            self.get_printable_subject(),trusted_cert.get_printable_subject()))\r
-                    raise CertExpired(self.get_printable_subject()," signer trusted_cert %s"%trusted_cert.get_printable_subject())\r
-\r
-        # if there is no parent, then no way to verify the chain\r
-        if not self.parent:\r
-            logger.debug("verify_chain: NO. %s has no parent and issuer %s is not in %d trusted roots"%(self.get_printable_subject(), self.get_issuer(), len(trusted_certs)))\r
-            raise CertMissingParent(self.get_printable_subject() + ": Issuer %s not trusted by any of %d trusted roots, and cert has no parent." % (self.get_issuer(), len(trusted_certs)))\r
-\r
-        # if it wasn't signed by the parent...\r
-        if not self.is_signed_by_cert(self.parent):\r
-            logger.debug("verify_chain: NO. %s is not signed by parent %s, but by %s"%self.get_printable_subject(), self.parent.get_printable_subject(), self.get_issuer())\r
-            raise CertNotSignedByParent(self.get_printable_subject() + ": Parent %s, issuer %s" % (self.parent.get_printable_subject(), self.get_issuer()))\r
-\r
-        # Confirm that the parent is a CA. Only CAs can be trusted as\r
-        # signers.\r
-        # Note that trusted roots are not parents, so don't need to be\r
-        # CAs.\r
-        # Ugly - cert objects aren't parsed so we need to read the\r
-        # extension and hope there are no other basicConstraints\r
-        if not self.parent.isCA and not (self.parent.get_extension('basicConstraints') == 'CA:TRUE'):\r
-            logger.warn("verify_chain: cert %s's parent %s is not a CA" % (self.get_printable_subject(), self.parent.get_printable_subject()))\r
-            raise CertNotSignedByParent(self.get_printable_subject() + ": Parent %s not a CA" % self.parent.get_printable_subject())\r
-\r
-        # if the parent isn't verified...\r
-        logger.debug("verify_chain: .. %s, -> verifying parent %s"%(self.get_printable_subject(),self.parent.get_printable_subject()))\r
-        self.parent.verify_chain(trusted_certs)\r
-\r
-        return\r
-\r
-    ### more introspection\r
-    def get_extensions(self):\r
-        # pyOpenSSL does not have a way to get extensions\r
-        triples=[]\r
-        m2x509 = X509.load_cert_string(self.save_to_string())\r
-        nb_extensions=m2x509.get_ext_count()\r
-        logger.debug("X509 had %d extensions"%nb_extensions)\r
-        for i in range(nb_extensions):\r
-            ext=m2x509.get_ext_at(i)\r
-            triples.append( (ext.get_name(), ext.get_value(), ext.get_critical(),) )\r
-        return triples\r
-\r
-    def get_data_names(self):\r
-        return self.data.keys()\r
-\r
-    def get_all_datas (self):\r
-        triples=self.get_extensions()\r
-        for name in self.get_data_names():\r
-            triples.append( (name,self.get_data(name),'data',) )\r
-        return triples\r
-\r
-    # only informative\r
-    def get_filename(self):\r
-        return getattr(self,'filename',None)\r
-\r
-    def dump (self, *args, **kwargs):\r
-        print self.dump_string(*args, **kwargs)\r
-\r
-    def dump_string (self,show_extensions=False):\r
-        result = ""\r
-        result += "CERTIFICATE for %s\n"%self.get_printable_subject()\r
-        result += "Issued by %s\n"%self.get_issuer()\r
-        filename=self.get_filename()\r
-        if filename: result += "Filename %s\n"%filename\r
-        if show_extensions:\r
-            all_datas=self.get_all_datas()\r
-            result += " has %d extensions/data attached"%len(all_datas)\r
-            for (n,v,c) in all_datas:\r
-                if c=='data':\r
-                    result += "   data: %s=%s\n"%(n,v)\r
-                else:\r
-                    result += "    ext: %s (crit=%s)=<<<%s>>>\n"%(n,c,v)\r
-        return result\r
+#----------------------------------------------------------------------
+# Copyright (c) 2008 Board of Trustees, Princeton University
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and/or hardware specification (the "Work") to
+# deal in the Work without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Work, and to permit persons to whom the Work
+# is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Work.
+#
+# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 
+# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS 
+# IN THE WORK.
+#----------------------------------------------------------------------
+
+##
+# SFA uses two crypto libraries: pyOpenSSL and M2Crypto to implement
+# the necessary crypto functionality. Ideally just one of these libraries
+# would be used, but unfortunately each of these libraries is independently
+# lacking. The pyOpenSSL library is missing many necessary functions, and
+# the M2Crypto library has crashed inside of some of the functions. The
+# design decision is to use pyOpenSSL whenever possible as it seems more
+# stable, and only use M2Crypto for those functions that are not possible
+# in pyOpenSSL.
+#
+# This module exports two classes: Keypair and Certificate.
+##
+#
+
+import functools
+import os
+import tempfile
+import base64
+from tempfile import mkstemp
+
+from OpenSSL import crypto
+import M2Crypto
+from M2Crypto import X509
+
+from sfa.util.faults import CertExpired, CertMissingParent, CertNotSignedByParent
+from sfa.util.sfalogging import logger
+
+glo_passphrase_callback = None
+
+##
+# A global callback msy be implemented for requesting passphrases from the
+# user. The function will be called with three arguments:
+#
+#    keypair_obj: the keypair object that is calling the passphrase
+#    string: the string containing the private key that's being loaded
+#    x: unknown, appears to be 0, comes from pyOpenSSL and/or m2crypto
+#
+# The callback should return a string containing the passphrase.
+
+def set_passphrase_callback(callback_func):
+    global glo_passphrase_callback
+
+    glo_passphrase_callback = callback_func
+
+##
+# Sets a fixed passphrase.
+
+def set_passphrase(passphrase):
+    set_passphrase_callback( lambda k,s,x: passphrase )
+
+##
+# Check to see if a passphrase works for a particular private key string.
+# Intended to be used by passphrase callbacks for input validation.
+
+def test_passphrase(string, passphrase):
+    try:
+        crypto.load_privatekey(crypto.FILETYPE_PEM, string, (lambda x: passphrase))
+        return True
+    except:
+        return False
+
+def convert_public_key(key):
+    keyconvert_path = "/usr/bin/keyconvert.py"
+    if not os.path.isfile(keyconvert_path):
+        raise IOError, "Could not find keyconvert in %s" % keyconvert_path
+
+    # we can only convert rsa keys
+    if "ssh-dss" in key:
+        return None
+
+    (ssh_f, ssh_fn) = tempfile.mkstemp()
+    ssl_fn = tempfile.mktemp()
+    os.write(ssh_f, key)
+    os.close(ssh_f)
+
+    cmd = keyconvert_path + " " + ssh_fn + " " + ssl_fn
+    os.system(cmd)
+
+    # this check leaves the temporary file containing the public key so
+    # that it can be expected to see why it failed.
+    # TODO: for production, cleanup the temporary files
+    if not os.path.exists(ssl_fn):
+        return None
+
+    k = Keypair()
+    try:
+        k.load_pubkey_from_file(ssl_fn)
+    except:
+        logger.log_exc("convert_public_key caught exception")
+        k = None
+
+    # remove the temporary files
+    os.remove(ssh_fn)
+    os.remove(ssl_fn)
+
+    return k
+
+##
+# Public-private key pairs are implemented by the Keypair class.
+# A Keypair object may represent both a public and private key pair, or it
+# may represent only a public key (this usage is consistent with OpenSSL).
+
+class Keypair:
+    key = None       # public/private keypair
+    m2key = None     # public key (m2crypto format)
+
+    ##
+    # Creates a Keypair object
+    # @param create If create==True, creates a new public/private key and
+    #     stores it in the object
+    # @param string If string!=None, load the keypair from the string (PEM)
+    # @param filename If filename!=None, load the keypair from the file
+
+    def __init__(self, create=False, string=None, filename=None):
+        if create:
+            self.create()
+        if string:
+            self.load_from_string(string)
+        if filename:
+            self.load_from_file(filename)
+
+    ##
+    # Create a RSA public/private key pair and store it inside the keypair object
+
+    def create(self):
+        self.key = crypto.PKey()
+        self.key.generate_key(crypto.TYPE_RSA, 1024)
+
+    ##
+    # Save the private key to a file
+    # @param filename name of file to store the keypair in
+
+    def save_to_file(self, filename):
+        open(filename, 'w').write(self.as_pem())
+        self.filename=filename
+
+    ##
+    # Load the private key from a file. Implicity the private key includes the public key.
+
+    def load_from_file(self, filename):
+        self.filename=filename
+        buffer = open(filename, 'r').read()
+        self.load_from_string(buffer)
+
+    ##
+    # Load the private key from a string. Implicitly the private key includes the public key.
+
+    def load_from_string(self, string):
+        if glo_passphrase_callback:
+            self.key = crypto.load_privatekey(crypto.FILETYPE_PEM, string, functools.partial(glo_passphrase_callback, self, string) )
+            self.m2key = M2Crypto.EVP.load_key_string(string, functools.partial(glo_passphrase_callback, self, string) )
+        else:
+            self.key = crypto.load_privatekey(crypto.FILETYPE_PEM, string)
+            self.m2key = M2Crypto.EVP.load_key_string(string)
+
+    ##
+    #  Load the public key from a string. No private key is loaded.
+
+    def load_pubkey_from_file(self, filename):
+        # load the m2 public key
+        m2rsakey = M2Crypto.RSA.load_pub_key(filename)
+        self.m2key = M2Crypto.EVP.PKey()
+        self.m2key.assign_rsa(m2rsakey)
+
+        # create an m2 x509 cert
+        m2name = M2Crypto.X509.X509_Name()
+        m2name.add_entry_by_txt(field="CN", type=0x1001, entry="junk", len=-1, loc=-1, set=0)
+        m2x509 = M2Crypto.X509.X509()
+        m2x509.set_pubkey(self.m2key)
+        m2x509.set_serial_number(0)
+        m2x509.set_issuer_name(m2name)
+        m2x509.set_subject_name(m2name)
+        ASN1 = M2Crypto.ASN1.ASN1_UTCTIME()
+        ASN1.set_time(500)
+        m2x509.set_not_before(ASN1)
+        m2x509.set_not_after(ASN1)
+        # x509v3 so it can have extensions
+        # prob not necc since this cert itself is junk but still...
+        m2x509.set_version(2)
+        junk_key = Keypair(create=True)
+        m2x509.sign(pkey=junk_key.get_m2_pkey(), md="sha1")
+
+        # convert the m2 x509 cert to a pyopenssl x509
+        m2pem = m2x509.as_pem()
+        pyx509 = crypto.load_certificate(crypto.FILETYPE_PEM, m2pem)
+
+        # get the pyopenssl pkey from the pyopenssl x509
+        self.key = pyx509.get_pubkey()
+        self.filename=filename
+
+    ##
+    # Load the public key from a string. No private key is loaded.
+
+    def load_pubkey_from_string(self, string):
+        (f, fn) = tempfile.mkstemp()
+        os.write(f, string)
+        os.close(f)
+        self.load_pubkey_from_file(fn)
+        os.remove(fn)
+
+    ##
+    # Return the private key in PEM format.
+
+    def as_pem(self):
+        return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.key)
+
+    ##
+    # Return an M2Crypto key object
+
+    def get_m2_pkey(self):
+        if not self.m2key:
+            self.m2key = M2Crypto.EVP.load_key_string(self.as_pem())
+        return self.m2key
+
+    ##
+    # Returns a string containing the public key represented by this object.
+
+    def get_pubkey_string(self):
+        m2pkey = self.get_m2_pkey()
+        return base64.b64encode(m2pkey.as_der())
+
+    ##
+    # Return an OpenSSL pkey object
+
+    def get_openssl_pkey(self):
+        return self.key
+
+    ##
+    # Given another Keypair object, return TRUE if the two keys are the same.
+
+    def is_same(self, pkey):
+        return self.as_pem() == pkey.as_pem()
+
+    def sign_string(self, data):
+        k = self.get_m2_pkey()
+        k.sign_init()
+        k.sign_update(data)
+        return base64.b64encode(k.sign_final())
+
+    def verify_string(self, data, sig):
+        k = self.get_m2_pkey()
+        k.verify_init()
+        k.verify_update(data)
+        return M2Crypto.m2.verify_final(k.ctx, base64.b64decode(sig), k.pkey)
+
+    def compute_hash(self, value):
+        return self.sign_string(str(value))
+
+    # only informative
+    def get_filename(self):
+        return getattr(self,'filename',None)
+
+    def dump (self, *args, **kwargs):
+        print self.dump_string(*args, **kwargs)
+
+    def dump_string (self):
+        result=""
+        result += "KEYPAIR: pubkey=%40s..."%self.get_pubkey_string()
+        filename=self.get_filename()
+        if filename: result += "Filename %s\n"%filename
+        return result
+
+##
+# The certificate class implements a general purpose X509 certificate, making
+# use of the appropriate pyOpenSSL or M2Crypto abstractions. It also adds
+# several addition features, such as the ability to maintain a chain of
+# parent certificates, and storage of application-specific data.
+#
+# Certificates include the ability to maintain a chain of parents. Each
+# certificate includes a pointer to it's parent certificate. When loaded
+# from a file or a string, the parent chain will be automatically loaded.
+# When saving a certificate to a file or a string, the caller can choose
+# whether to save the parent certificates as well.
+
+class Certificate:
+    digest = "md5"
+
+    cert = None
+    issuerKey = None
+    issuerSubject = None
+    parent = None
+    isCA = None # will be a boolean once set
+
+    separator="-----parent-----"
+
+    ##
+    # Create a certificate object.
+    #
+    # @param lifeDays life of cert in days - default is 1825==5 years
+    # @param create If create==True, then also create a blank X509 certificate.
+    # @param subject If subject!=None, then create a blank certificate and set
+    #     it's subject name.
+    # @param string If string!=None, load the certficate from the string.
+    # @param filename If filename!=None, load the certficiate from the file.
+    # @param isCA If !=None, set whether this cert is for a CA
+
+    def __init__(self, lifeDays=1825, create=False, subject=None, string=None, filename=None, isCA=None):
+        self.data = {}
+        if create or subject:
+            self.create(lifeDays)
+        if subject:
+            self.set_subject(subject)
+        if string:
+            self.load_from_string(string)
+        if filename:
+            self.load_from_file(filename)
+
+        # Set the CA bit if a value was supplied
+        if isCA != None:
+            self.set_is_ca(isCA)
+
+    # Create a blank X509 certificate and store it in this object.
+
+    def create(self, lifeDays=1825):
+        self.cert = crypto.X509()
+        # FIXME: Use different serial #s
+        self.cert.set_serial_number(3)
+        self.cert.gmtime_adj_notBefore(0) # 0 means now
+        self.cert.gmtime_adj_notAfter(lifeDays*60*60*24) # five years is default
+        self.cert.set_version(2) # x509v3 so it can have extensions
+
+
+    ##
+    # Given a pyOpenSSL X509 object, store that object inside of this
+    # certificate object.
+
+    def load_from_pyopenssl_x509(self, x509):
+        self.cert = x509
+
+    ##
+    # Load the certificate from a string
+
+    def load_from_string(self, string):
+        # if it is a chain of multiple certs, then split off the first one and
+        # load it (support for the ---parent--- tag as well as normal chained certs)
+
+        string = string.strip()
+        
+        # If it's not in proper PEM format, wrap it
+        if string.count('-----BEGIN CERTIFICATE') == 0:
+            string = '-----BEGIN CERTIFICATE-----\n%s\n-----END CERTIFICATE-----' % string
+
+        # If there is a PEM cert in there, but there is some other text first
+        # such as the text of the certificate, skip the text
+        beg = string.find('-----BEGIN CERTIFICATE')
+        if beg > 0:
+            # skipping over non cert beginning                                                                                                              
+            string = string[beg:]
+
+        parts = []
+
+        if string.count('-----BEGIN CERTIFICATE-----') > 1 and \
+               string.count(Certificate.separator) == 0:
+            parts = string.split('-----END CERTIFICATE-----',1)
+            parts[0] += '-----END CERTIFICATE-----'
+        else:
+            parts = string.split(Certificate.separator, 1)
+
+        self.cert = crypto.load_certificate(crypto.FILETYPE_PEM, parts[0])
+
+        # if there are more certs, then create a parent and let the parent load
+        # itself from the remainder of the string
+        if len(parts) > 1 and parts[1] != '':
+            self.parent = self.__class__()
+            self.parent.load_from_string(parts[1])
+
+    ##
+    # Load the certificate from a file
+
+    def load_from_file(self, filename):
+        file = open(filename)
+        string = file.read()
+        self.load_from_string(string)
+        self.filename=filename
+
+    ##
+    # Save the certificate to a string.
+    #
+    # @param save_parents If save_parents==True, then also save the parent certificates.
+
+    def save_to_string(self, save_parents=True):
+        string = crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert)
+        if save_parents and self.parent:
+            string = string + self.parent.save_to_string(save_parents)
+        return string
+
+    ##
+    # Save the certificate to a file.
+    # @param save_parents If save_parents==True, then also save the parent certificates.
+
+    def save_to_file(self, filename, save_parents=True, filep=None):
+        string = self.save_to_string(save_parents=save_parents)
+        if filep:
+            f = filep
+        else:
+            f = open(filename, 'w')
+        f.write(string)
+        f.close()
+        self.filename=filename
+
+    ##
+    # Save the certificate to a random file in /tmp/
+    # @param save_parents If save_parents==True, then also save the parent certificates.
+    def save_to_random_tmp_file(self, save_parents=True):
+        fp, filename = mkstemp(suffix='cert', text=True)
+        fp = os.fdopen(fp, "w")
+        self.save_to_file(filename, save_parents=True, filep=fp)
+        return filename
+
+    ##
+    # Sets the issuer private key and name
+    # @param key Keypair object containing the private key of the issuer
+    # @param subject String containing the name of the issuer
+    # @param cert (optional) Certificate object containing the name of the issuer
+
+    def set_issuer(self, key, subject=None, cert=None):
+        self.issuerKey = key
+        if subject:
+            # it's a mistake to use subject and cert params at the same time
+            assert(not cert)
+            if isinstance(subject, dict) or isinstance(subject, str):
+                req = crypto.X509Req()
+                reqSubject = req.get_subject()
+                if (isinstance(subject, dict)):
+                    for key in reqSubject.keys():
+                        setattr(reqSubject, key, subject[key])
+                else:
+                    setattr(reqSubject, "CN", subject)
+                subject = reqSubject
+                # subject is not valid once req is out of scope, so save req
+                self.issuerReq = req
+        if cert:
+            # if a cert was supplied, then get the subject from the cert
+            subject = cert.cert.get_subject()
+        assert(subject)
+        self.issuerSubject = subject
+
+    ##
+    # Get the issuer name
+
+    def get_issuer(self, which="CN"):
+        x = self.cert.get_issuer()
+        return getattr(x, which)
+
+    ##
+    # Set the subject name of the certificate
+
+    def set_subject(self, name):
+        req = crypto.X509Req()
+        subj = req.get_subject()
+        if (isinstance(name, dict)):
+            for key in name.keys():
+                setattr(subj, key, name[key])
+        else:
+            setattr(subj, "CN", name)
+        self.cert.set_subject(subj)
+
+    ##
+    # Get the subject name of the certificate
+
+    def get_subject(self, which="CN"):
+        x = self.cert.get_subject()
+        return getattr(x, which)
+
+    ##
+    # Get a pretty-print subject name of the certificate
+
+    def get_printable_subject(self):
+        x = self.cert.get_subject()
+        return "[ OU: %s, CN: %s, SubjectAltName: %s ]" % (getattr(x, "OU"), getattr(x, "CN"), self.get_data())
+
+    ##
+    # Get the public key of the certificate.
+    #
+    # @param key Keypair object containing the public key
+
+    def set_pubkey(self, key):
+        assert(isinstance(key, Keypair))
+        self.cert.set_pubkey(key.get_openssl_pkey())
+
+    ##
+    # Get the public key of the certificate.
+    # It is returned in the form of a Keypair object.
+
+    def get_pubkey(self):
+        m2x509 = X509.load_cert_string(self.save_to_string())
+        pkey = Keypair()
+        pkey.key = self.cert.get_pubkey()
+        pkey.m2key = m2x509.get_pubkey()
+        return pkey
+
+    def set_intermediate_ca(self, val):
+        return self.set_is_ca(val)
+
+    # Set whether this cert is for a CA. All signers and only signers should be CAs.
+    # The local member starts unset, letting us check that you only set it once
+    # @param val Boolean indicating whether this cert is for a CA
+    def set_is_ca(self, val):
+        if val is None:
+            return
+
+        if self.isCA != None:
+            # Can't double set properties
+            raise Exception, "Cannot set basicConstraints CA:?? more than once. Was %s, trying to set as %s" % (self.isCA, val)
+
+        self.isCA = val
+        if val:
+            self.add_extension('basicConstraints', 1, 'CA:TRUE')
+        else:
+            self.add_extension('basicConstraints', 1, 'CA:FALSE')
+
+
+
+    ##
+    # Add an X509 extension to the certificate. Add_extension can only be called
+    # once for a particular extension name, due to limitations in the underlying
+    # library.
+    #
+    # @param name string containing name of extension
+    # @param value string containing value of the extension
+
+    def add_extension(self, name, critical, value):
+        oldExtVal = None
+        try:
+            oldExtVal = self.get_extension(name)
+        except:
+            # M2Crypto LookupError when the extension isn't there (yet)
+            pass
+
+        # This code limits you from adding the extension with the same value
+        # The method comment says you shouldn't do this with the same name
+        # But actually it (m2crypto) appears to allow you to do this.
+        if oldExtVal and oldExtVal == value:
+            # don't add this extension again
+            # just do nothing as here
+            return
+        # FIXME: What if they are trying to set with a different value?
+        # Is this ever OK? Or should we raise an exception?
+#        elif oldExtVal:
+#            raise "Cannot add extension %s which had val %s with new val %s" % (name, oldExtVal, value)
+
+        ext = crypto.X509Extension (name, critical, value)
+        self.cert.add_extensions([ext])
+
+    ##
+    # Get an X509 extension from the certificate
+
+    def get_extension(self, name):
+
+        # pyOpenSSL does not have a way to get extensions
+        m2x509 = X509.load_cert_string(self.save_to_string())
+        value = m2x509.get_ext(name).get_value()
+
+        return value
+
+    ##
+    # Set_data is a wrapper around add_extension. It stores the parameter str in
+    # the X509 subject_alt_name extension. Set_data can only be called once, due
+    # to limitations in the underlying library.
+
+    def set_data(self, str, field='subjectAltName'):
+        # pyOpenSSL only allows us to add extensions, so if we try to set the
+        # same extension more than once, it will not work
+        if self.data.has_key(field):
+            raise "Cannot set ", field, " more than once"
+        self.data[field] = str
+        self.add_extension(field, 0, str)
+
+    ##
+    # Return the data string that was previously set with set_data
+
+    def get_data(self, field='subjectAltName'):
+        if self.data.has_key(field):
+            return self.data[field]
+
+        try:
+            uri = self.get_extension(field)
+            self.data[field] = uri
+        except LookupError:
+            return None
+
+        return self.data[field]
+
+    ##
+    # Sign the certificate using the issuer private key and issuer subject previous set with set_issuer().
+
+    def sign(self):
+        logger.debug('certificate.sign')
+        assert self.cert != None
+        assert self.issuerSubject != None
+        assert self.issuerKey != None
+        self.cert.set_issuer(self.issuerSubject)
+        self.cert.sign(self.issuerKey.get_openssl_pkey(), self.digest)
+
+    ##
+    # Verify the authenticity of a certificate.
+    # @param pkey is a Keypair object representing a public key. If Pkey
+    #     did not sign the certificate, then an exception will be thrown.
+
+    def verify(self, pkey):
+        # pyOpenSSL does not have a way to verify signatures
+        m2x509 = X509.load_cert_string(self.save_to_string())
+        m2pkey = pkey.get_m2_pkey()
+        # verify it
+        return m2x509.verify(m2pkey)
+
+        # XXX alternatively, if openssl has been patched, do the much simpler:
+        # try:
+        #   self.cert.verify(pkey.get_openssl_key())
+        #   return 1
+        # except:
+        #   return 0
+
+    ##
+    # Return True if pkey is identical to the public key that is contained in the certificate.
+    # @param pkey Keypair object
+
+    def is_pubkey(self, pkey):
+        return self.get_pubkey().is_same(pkey)
+
+    ##
+    # Given a certificate cert, verify that this certificate was signed by the
+    # public key contained in cert. Throw an exception otherwise.
+    #
+    # @param cert certificate object
+
+    def is_signed_by_cert(self, cert):
+        k = cert.get_pubkey()
+        result = self.verify(k)
+        return result
+
+    ##
+    # Set the parent certficiate.
+    #
+    # @param p certificate object.
+
+    def set_parent(self, p):
+        self.parent = p
+
+    ##
+    # Return the certificate object of the parent of this certificate.
+
+    def get_parent(self):
+        return self.parent
+
+    ##
+    # Verification examines a chain of certificates to ensure that each parent
+    # signs the child, and that some certificate in the chain is signed by a
+    # trusted certificate.
+    #
+    # Verification is a basic recursion: <pre>
+    #     if this_certificate was signed by trusted_certs:
+    #         return
+    #     else
+    #         return verify_chain(parent, trusted_certs)
+    # </pre>
+    #
+    # At each recursion, the parent is tested to ensure that it did sign the
+    # child. If a parent did not sign a child, then an exception is thrown. If
+    # the bottom of the recursion is reached and the certificate does not match
+    # a trusted root, then an exception is thrown.
+    # Also require that parents are CAs.
+    #
+    # @param Trusted_certs is a list of certificates that are trusted.
+    #
+
+    def verify_chain(self, trusted_certs = None):
+        # Verify a chain of certificates. Each certificate must be signed by
+        # the public key contained in it's parent. The chain is recursed
+        # until a certificate is found that is signed by a trusted root.
+
+        # verify expiration time
+        if self.cert.has_expired():
+            logger.debug("verify_chain: NO, Certificate %s has expired" % self.get_printable_subject())
+            raise CertExpired(self.get_printable_subject(), "client cert")
+
+        # if this cert is signed by a trusted_cert, then we are set
+        for trusted_cert in trusted_certs:
+            if self.is_signed_by_cert(trusted_cert):
+                # verify expiration of trusted_cert ?
+                if not trusted_cert.cert.has_expired():
+                    logger.debug("verify_chain: YES. Cert %s signed by trusted cert %s"%(
+                            self.get_printable_subject(), trusted_cert.get_printable_subject()))
+                    return trusted_cert
+                else:
+                    logger.debug("verify_chain: NO. Cert %s is signed by trusted_cert %s, but that signer is expired..."%(
+                            self.get_printable_subject(),trusted_cert.get_printable_subject()))
+                    raise CertExpired(self.get_printable_subject()," signer trusted_cert %s"%trusted_cert.get_printable_subject())
+
+        # if there is no parent, then no way to verify the chain
+        if not self.parent:
+            logger.debug("verify_chain: NO. %s has no parent and issuer %s is not in %d trusted roots"%(self.get_printable_subject(), self.get_issuer(), len(trusted_certs)))
+            raise CertMissingParent(self.get_printable_subject() + ": Issuer %s not trusted by any of %d trusted roots, and cert has no parent." % (self.get_issuer(), len(trusted_certs)))
+
+        # if it wasn't signed by the parent...
+        if not self.is_signed_by_cert(self.parent):
+            logger.debug("verify_chain: NO. %s is not signed by parent %s, but by %s"%\
+                             (self.get_printable_subject(), 
+                              self.parent.get_printable_subject(), 
+                              self.get_issuer()))
+            raise CertNotSignedByParent("%s: Parent %s, issuer %s"\
+                                            % (self.get_printable_subject(), 
+                                               self.parent.get_printable_subject(),
+                                               self.get_issuer()))
+
+        # Confirm that the parent is a CA. Only CAs can be trusted as
+        # signers.
+        # Note that trusted roots are not parents, so don't need to be
+        # CAs.
+        # Ugly - cert objects aren't parsed so we need to read the
+        # extension and hope there are no other basicConstraints
+        if not self.parent.isCA and not (self.parent.get_extension('basicConstraints') == 'CA:TRUE'):
+            logger.warn("verify_chain: cert %s's parent %s is not a CA" % \
+                            (self.get_printable_subject(), self.parent.get_printable_subject()))
+            raise CertNotSignedByParent("%s: Parent %s not a CA" % (self.get_printable_subject(),
+                                                                    self.parent.get_printable_subject()))
+
+        # if the parent isn't verified...
+        logger.debug("verify_chain: .. %s, -> verifying parent %s"%\
+                         (self.get_printable_subject(),self.parent.get_printable_subject()))
+        self.parent.verify_chain(trusted_certs)
+
+        return
+
+    ### more introspection
+    def get_extensions(self):
+        # pyOpenSSL does not have a way to get extensions
+        triples=[]
+        m2x509 = X509.load_cert_string(self.save_to_string())
+        nb_extensions=m2x509.get_ext_count()
+        logger.debug("X509 had %d extensions"%nb_extensions)
+        for i in range(nb_extensions):
+            ext=m2x509.get_ext_at(i)
+            triples.append( (ext.get_name(), ext.get_value(), ext.get_critical(),) )
+        return triples
+
+    def get_data_names(self):
+        return self.data.keys()
+
+    def get_all_datas (self):
+        triples=self.get_extensions()
+        for name in self.get_data_names():
+            triples.append( (name,self.get_data(name),'data',) )
+        return triples
+
+    # only informative
+    def get_filename(self):
+        return getattr(self,'filename',None)
+
+    def dump (self, *args, **kwargs):
+        print self.dump_string(*args, **kwargs)
+
+    def dump_string (self,show_extensions=False):
+        result = ""
+        result += "CERTIFICATE for %s\n"%self.get_printable_subject()
+        result += "Issued by %s\n"%self.get_issuer()
+        filename=self.get_filename()
+        if filename: result += "Filename %s\n"%filename
+        if show_extensions:
+            all_datas=self.get_all_datas()
+            result += " has %d extensions/data attached"%len(all_datas)
+            for (n,v,c) in all_datas:
+                if c=='data':
+                    result += "   data: %s=%s\n"%(n,v)
+                else:
+                    result += "    ext: %s (crit=%s)=<<<%s>>>\n"%(n,c,v)
+        return result
index a18019d..8fd11e8 100644 (file)
-#----------------------------------------------------------------------\r
-# Copyright (c) 2008 Board of Trustees, Princeton University\r
-#\r
-# Permission is hereby granted, free of charge, to any person obtaining\r
-# a copy of this software and/or hardware specification (the "Work") to\r
-# deal in the Work without restriction, including without limitation the\r
-# rights to use, copy, modify, merge, publish, distribute, sublicense,\r
-# and/or sell copies of the Work, and to permit persons to whom the Work\r
-# is furnished to do so, subject to the following conditions:\r
-#\r
-# The above copyright notice and this permission notice shall be\r
-# included in all copies or substantial portions of the Work.\r
-#\r
-# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS \r
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT \r
-# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
-# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS \r
-# IN THE WORK.\r
-#----------------------------------------------------------------------\r
-##\r
-# Implements SFA Credentials\r
-#\r
-# Credentials are signed XML files that assign a subject gid privileges to an object gid\r
-##\r
-\r
-import os\r
-from types import StringTypes\r
-import datetime\r
-from StringIO import StringIO\r
-from tempfile import mkstemp\r
-from xml.dom.minidom import Document, parseString\r
-\r
-HAVELXML = False\r
-try:\r
-    from lxml import etree\r
-    HAVELXML = True\r
-except:\r
-    pass\r
-\r
-from sfa.util.faults import *\r
-from sfa.util.sfalogging import logger\r
-from sfa.util.sfatime import utcparse\r
-from sfa.trust.certificate import Keypair\r
-from sfa.trust.credential_legacy import CredentialLegacy\r
-from sfa.trust.rights import Right, Rights, determine_rights\r
-from sfa.trust.gid import GID\r
-from sfa.util.xrn import urn_to_hrn, hrn_authfor_hrn\r
-\r
-# 2 weeks, in seconds \r
-DEFAULT_CREDENTIAL_LIFETIME = 86400 * 14\r
-\r
-\r
-# TODO:\r
-# . make privs match between PG and PL\r
-# . Need to add support for other types of credentials, e.g. tickets\r
-# . add namespaces to signed-credential element?\r
-\r
-signature_template = \\r
-'''\r
-<Signature xml:id="Sig_%s" xmlns="http://www.w3.org/2000/09/xmldsig#">\r
-  <SignedInfo>\r
-    <CanonicalizationMethod Algorithm="http://www.w3.org/TR/2001/REC-xml-c14n-20010315"/>\r
-    <SignatureMethod Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/>\r
-    <Reference URI="#%s">\r
-      <Transforms>\r
-        <Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature" />\r
-      </Transforms>\r
-      <DigestMethod Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>\r
-      <DigestValue></DigestValue>\r
-    </Reference>\r
-  </SignedInfo>\r
-  <SignatureValue />\r
-  <KeyInfo>\r
-    <X509Data>\r
-      <X509SubjectName/>\r
-      <X509IssuerSerial/>\r
-      <X509Certificate/>\r
-    </X509Data>\r
-    <KeyValue />\r
-  </KeyInfo>\r
-</Signature>\r
-'''\r
-\r
-# PG formats the template (whitespace) slightly differently.\r
-# Note that they don't include the xmlns in the template, but add it later.\r
-# Otherwise the two are equivalent.\r
-#signature_template_as_in_pg = \\r
-#'''\r
-#<Signature xml:id="Sig_%s" >\r
-# <SignedInfo>\r
-#  <CanonicalizationMethod      Algorithm="http://www.w3.org/TR/2001/REC-xml-c14n-20010315"/>\r
-#  <SignatureMethod      Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/>\r
-#  <Reference URI="#%s">\r
-#    <Transforms>\r
-#      <Transform         Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature" />\r
-#    </Transforms>\r
-#    <DigestMethod        Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>\r
-#    <DigestValue></DigestValue>\r
-#    </Reference>\r
-# </SignedInfo>\r
-# <SignatureValue />\r
-# <KeyInfo>\r
-#  <X509Data >\r
-#   <X509SubjectName/>\r
-#   <X509IssuerSerial/>\r
-#   <X509Certificate/>\r
-#  </X509Data>\r
-#  <KeyValue />\r
-# </KeyInfo>\r
-#</Signature>\r
-#'''\r
-\r
-##\r
-# Convert a string into a bool\r
-# used to convert an xsd:boolean to a Python boolean\r
-def str2bool(str):\r
-    if str.lower() in ['true','1']:\r
-        return True\r
-    return False\r
-\r
-\r
-##\r
-# Utility function to get the text of an XML element\r
-\r
-def getTextNode(element, subele):\r
-    sub = element.getElementsByTagName(subele)[0]\r
-    if len(sub.childNodes) > 0:            \r
-        return sub.childNodes[0].nodeValue\r
-    else:\r
-        return None\r
-        \r
-##\r
-# Utility function to set the text of an XML element\r
-# It creates the element, adds the text to it,\r
-# and then appends it to the parent.\r
-\r
-def append_sub(doc, parent, element, text):\r
-    ele = doc.createElement(element)\r
-    ele.appendChild(doc.createTextNode(text))\r
-    parent.appendChild(ele)\r
-\r
-##\r
-# Signature contains information about an xmlsec1 signature\r
-# for a signed-credential\r
-#\r
-\r
-class Signature(object):\r
-   \r
-    def __init__(self, string=None):\r
-        self.refid = None\r
-        self.issuer_gid = None\r
-        self.xml = None\r
-        if string:\r
-            self.xml = string\r
-            self.decode()\r
-\r
-\r
-    def get_refid(self):\r
-        if not self.refid:\r
-            self.decode()\r
-        return self.refid\r
-\r
-    def get_xml(self):\r
-        if not self.xml:\r
-            self.encode()\r
-        return self.xml\r
-\r
-    def set_refid(self, id):\r
-        self.refid = id\r
-\r
-    def get_issuer_gid(self):\r
-        if not self.gid:\r
-            self.decode()\r
-        return self.gid        \r
-\r
-    def set_issuer_gid(self, gid):\r
-        self.gid = gid\r
-\r
-    def decode(self):\r
-        try:\r
-            doc = parseString(self.xml)\r
-        except ExpatError,e:\r
-            logger.log_exc ("Failed to parse credential, %s"%self.xml)\r
-            raise\r
-        sig = doc.getElementsByTagName("Signature")[0]\r
-        self.set_refid(sig.getAttribute("xml:id").strip("Sig_"))\r
-        keyinfo = sig.getElementsByTagName("X509Data")[0]\r
-        szgid = getTextNode(keyinfo, "X509Certificate")\r
-        szgid = "-----BEGIN CERTIFICATE-----\n%s\n-----END CERTIFICATE-----" % szgid\r
-        self.set_issuer_gid(GID(string=szgid))        \r
-        \r
-    def encode(self):\r
-        self.xml = signature_template % (self.get_refid(), self.get_refid())\r
-\r
-\r
-##\r
-# A credential provides a caller gid with privileges to an object gid.\r
-# A signed credential is signed by the object's authority.\r
-#\r
-# Credentials are encoded in one of two ways.  The legacy style places\r
-# it in the subjectAltName of an X509 certificate.  The new credentials\r
-# are placed in signed XML.\r
-#\r
-# WARNING:\r
-# In general, a signed credential obtained externally should\r
-# not be changed else the signature is no longer valid.  So, once\r
-# you have loaded an existing signed credential, do not call encode() or sign() on it.\r
-\r
-def filter_creds_by_caller(creds, caller_hrn_list):\r
-        """\r
-        Returns a list of creds who's gid caller matches the\r
-        specified caller hrn\r
-        """\r
-        if not isinstance(creds, list): creds = [creds]\r
-        if not isinstance(caller_hrn_list, list): \r
-            caller_hrn_list = [caller_hrn_list]\r
-        caller_creds = []\r
-        for cred in creds:\r
-            try:\r
-                tmp_cred = Credential(string=cred)\r
-                if tmp_cred.get_gid_caller().get_hrn() in caller_hrn_list:\r
-                    caller_creds.append(cred)\r
-            except: pass\r
-        return caller_creds\r
-\r
-class Credential(object):\r
-\r
-    ##\r
-    # Create a Credential object\r
-    #\r
-    # @param create If true, create a blank x509 certificate\r
-    # @param subject If subject!=None, create an x509 cert with the subject name\r
-    # @param string If string!=None, load the credential from the string\r
-    # @param filename If filename!=None, load the credential from the file\r
-    # FIXME: create and subject are ignored!\r
-    def __init__(self, create=False, subject=None, string=None, filename=None):\r
-        self.gidCaller = None\r
-        self.gidObject = None\r
-        self.expiration = None\r
-        self.privileges = None\r
-        self.issuer_privkey = None\r
-        self.issuer_gid = None\r
-        self.issuer_pubkey = None\r
-        self.parent = None\r
-        self.signature = None\r
-        self.xml = None\r
-        self.refid = None\r
-        self.legacy = None\r
-\r
-        # Check if this is a legacy credential, translate it if so\r
-        if string or filename:\r
-            if string:                \r
-                str = string\r
-            elif filename:\r
-                str = file(filename).read()\r
-                \r
-            if str.strip().startswith("-----"):\r
-                self.legacy = CredentialLegacy(False,string=str)\r
-                self.translate_legacy(str)\r
-            else:\r
-                self.xml = str\r
-                self.decode()\r
-\r
-        # Find an xmlsec1 path\r
-        self.xmlsec_path = ''\r
-        paths = ['/usr/bin','/usr/local/bin','/bin','/opt/bin','/opt/local/bin']\r
-        for path in paths:\r
-            if os.path.isfile(path + '/' + 'xmlsec1'):\r
-                self.xmlsec_path = path + '/' + 'xmlsec1'\r
-                break\r
-\r
-    def get_subject(self):\r
-        if not self.gidObject:\r
-            self.decode()\r
-        return self.gidObject.get_printable_subject()\r
-\r
-    def get_summary_tostring(self):\r
-        if not self.gidObject:\r
-            self.decode()\r
-        obj = self.gidObject.get_printable_subject()\r
-        caller = self.gidCaller.get_printable_subject()\r
-        exp = self.get_expiration()\r
-        # Summarize the rights too? The issuer?\r
-        return "[ Grant %s rights on %s until %s ]" % (caller, obj, exp)\r
-\r
-    def get_signature(self):\r
-        if not self.signature:\r
-            self.decode()\r
-        return self.signature\r
-\r
-    def set_signature(self, sig):\r
-        self.signature = sig\r
-\r
-        \r
-    ##\r
-    # Translate a legacy credential into a new one\r
-    #\r
-    # @param String of the legacy credential\r
-\r
-    def translate_legacy(self, str):\r
-        legacy = CredentialLegacy(False,string=str)\r
-        self.gidCaller = legacy.get_gid_caller()\r
-        self.gidObject = legacy.get_gid_object()\r
-        lifetime = legacy.get_lifetime()\r
-        if not lifetime:\r
-            self.set_expiration(datetime.datetime.utcnow() + datetime.timedelta(seconds=DEFAULT_CREDENTIAL_LIFETIME))\r
-        else:\r
-            self.set_expiration(int(lifetime))\r
-        self.lifeTime = legacy.get_lifetime()\r
-        self.set_privileges(legacy.get_privileges())\r
-        self.get_privileges().delegate_all_privileges(legacy.get_delegate())\r
-\r
-    ##\r
-    # Need the issuer's private key and name\r
-    # @param key Keypair object containing the private key of the issuer\r
-    # @param gid GID of the issuing authority\r
-\r
-    def set_issuer_keys(self, privkey, gid):\r
-        self.issuer_privkey = privkey\r
-        self.issuer_gid = gid\r
-\r
-\r
-    ##\r
-    # Set this credential's parent\r
-    def set_parent(self, cred):\r
-        self.parent = cred\r
-        self.updateRefID()\r
-\r
-    ##\r
-    # set the GID of the caller\r
-    #\r
-    # @param gid GID object of the caller\r
-\r
-    def set_gid_caller(self, gid):\r
-        self.gidCaller = gid\r
-        # gid origin caller is the caller's gid by default\r
-        self.gidOriginCaller = gid\r
-\r
-    ##\r
-    # get the GID of the object\r
-\r
-    def get_gid_caller(self):\r
-        if not self.gidCaller:\r
-            self.decode()\r
-        return self.gidCaller\r
-\r
-    ##\r
-    # set the GID of the object\r
-    #\r
-    # @param gid GID object of the object\r
-\r
-    def set_gid_object(self, gid):\r
-        self.gidObject = gid\r
-\r
-    ##\r
-    # get the GID of the object\r
-\r
-    def get_gid_object(self):\r
-        if not self.gidObject:\r
-            self.decode()\r
-        return self.gidObject\r
-\r
-\r
-            \r
-    ##\r
-    # Expiration: an absolute UTC time of expiration (as either an int or string or datetime)\r
-    # \r
-    def set_expiration(self, expiration):\r
-        if isinstance(expiration, (int, float)):\r
-            self.expiration = datetime.datetime.fromtimestamp(expiration)\r
-        elif isinstance (expiration, datetime.datetime):\r
-            self.expiration = expiration\r
-        elif isinstance (expiration, StringTypes):\r
-            self.expiration = utcparse (expiration)\r
-        else:\r
-            logger.error ("unexpected input type in Credential.set_expiration")\r
-\r
-\r
-    ##\r
-    # get the lifetime of the credential (always in datetime format)\r
-\r
-    def get_expiration(self):\r
-        if not self.expiration:\r
-            self.decode()\r
-        # at this point self.expiration is normalized as a datetime - DON'T call utcparse again\r
-        return self.expiration\r
-\r
-    ##\r
-    # For legacy sake\r
-    def get_lifetime(self):\r
-        return self.get_expiration()\r
\r
-    ##\r
-    # set the privileges\r
-    #\r
-    # @param privs either a comma-separated list of privileges of a Rights object\r
-\r
-    def set_privileges(self, privs):\r
-        if isinstance(privs, str):\r
-            self.privileges = Rights(string = privs)\r
-        else:\r
-            self.privileges = privs\r
-        \r
-\r
-    ##\r
-    # return the privileges as a Rights object\r
-\r
-    def get_privileges(self):\r
-        if not self.privileges:\r
-            self.decode()\r
-        return self.privileges\r
-\r
-    ##\r
-    # determine whether the credential allows a particular operation to be\r
-    # performed\r
-    #\r
-    # @param op_name string specifying name of operation ("lookup", "update", etc)\r
-\r
-    def can_perform(self, op_name):\r
-        rights = self.get_privileges()\r
-        \r
-        if not rights:\r
-            return False\r
-\r
-        return rights.can_perform(op_name)\r
-\r
-\r
-    ##\r
-    # Encode the attributes of the credential into an XML string    \r
-    # This should be done immediately before signing the credential.    \r
-    # WARNING:\r
-    # In general, a signed credential obtained externally should\r
-    # not be changed else the signature is no longer valid.  So, once\r
-    # you have loaded an existing signed credential, do not call encode() or sign() on it.\r
-\r
-    def encode(self):\r
-        # Create the XML document\r
-        doc = Document()\r
-        signed_cred = doc.createElement("signed-credential")\r
-\r
-# Declare namespaces\r
-# Note that credential/policy.xsd are really the PG schemas\r
-# in a PL namespace.\r
-# Note that delegation of credentials between the 2 only really works\r
-# cause those schemas are identical.\r
-# Also note these PG schemas talk about PG tickets and CM policies.\r
-        signed_cred.setAttribute("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance")\r
-        signed_cred.setAttribute("xsi:noNamespaceSchemaLocation", "http://www.planet-lab.org/resources/sfa/credential.xsd")\r
-        signed_cred.setAttribute("xsi:schemaLocation", "http://www.planet-lab.org/resources/sfa/ext/policy/1 http://www.planet-lab.org/resources/sfa/ext/policy/1/policy.xsd")\r
-\r
-# PG says for those last 2:\r
-#        signed_cred.setAttribute("xsi:noNamespaceSchemaLocation", "http://www.protogeni.net/resources/credential/credential.xsd")\r
-#        signed_cred.setAttribute("xsi:schemaLocation", "http://www.protogeni.net/resources/credential/ext/policy/1 http://www.protogeni.net/resources/credential/ext/policy/1/policy.xsd")\r
-\r
-        doc.appendChild(signed_cred)  \r
-        \r
-        # Fill in the <credential> bit        \r
-        cred = doc.createElement("credential")\r
-        cred.setAttribute("xml:id", self.get_refid())\r
-        signed_cred.appendChild(cred)\r
-        append_sub(doc, cred, "type", "privilege")\r
-        append_sub(doc, cred, "serial", "8")\r
-        append_sub(doc, cred, "owner_gid", self.gidCaller.save_to_string())\r
-        append_sub(doc, cred, "owner_urn", self.gidCaller.get_urn())\r
-        append_sub(doc, cred, "target_gid", self.gidObject.save_to_string())\r
-        append_sub(doc, cred, "target_urn", self.gidObject.get_urn())\r
-        append_sub(doc, cred, "uuid", "")\r
-        if not self.expiration:\r
-            self.set_expiration(datetime.datetime.utcnow() + datetime.timedelta(seconds=DEFAULT_CREDENTIAL_LIFETIME))\r
-        self.expiration = self.expiration.replace(microsecond=0)\r
-        append_sub(doc, cred, "expires", self.expiration.isoformat())\r
-        privileges = doc.createElement("privileges")\r
-        cred.appendChild(privileges)\r
-\r
-        if self.privileges:\r
-            rights = self.get_privileges()\r
-            for right in rights.rights:\r
-                priv = doc.createElement("privilege")\r
-                append_sub(doc, priv, "name", right.kind)\r
-                append_sub(doc, priv, "can_delegate", str(right.delegate).lower())\r
-                privileges.appendChild(priv)\r
-\r
-        # Add the parent credential if it exists\r
-        if self.parent:\r
-            sdoc = parseString(self.parent.get_xml())\r
-            # If the root node is a signed-credential (it should be), then\r
-            # get all its attributes and attach those to our signed_cred\r
-            # node.\r
-            # Specifically, PG and PLadd attributes for namespaces (which is reasonable),\r
-            # and we need to include those again here or else their signature\r
-            # no longer matches on the credential.\r
-            # We expect three of these, but here we copy them all:\r
-#        signed_cred.setAttribute("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance")\r
-# and from PG (PL is equivalent, as shown above):\r
-#        signed_cred.setAttribute("xsi:noNamespaceSchemaLocation", "http://www.protogeni.net/resources/credential/credential.xsd")\r
-#        signed_cred.setAttribute("xsi:schemaLocation", "http://www.protogeni.net/resources/credential/ext/policy/1 http://www.protogeni.net/resources/credential/ext/policy/1/policy.xsd")\r
-\r
-            # HOWEVER!\r
-            # PL now also declares these, with different URLs, so\r
-            # the code notices those attributes already existed with\r
-            # different values, and complains.\r
-            # This happens regularly on delegation now that PG and\r
-            # PL both declare the namespace with different URLs.\r
-            # If the content ever differs this is a problem,\r
-            # but for now it works - different URLs (values in the attributes)\r
-            # but the same actual schema, so using the PG schema\r
-            # on delegated-to-PL credentials works fine.\r
-\r
-            # Note: you could also not copy attributes\r
-            # which already exist. It appears that both PG and PL\r
-            # will actually validate a slicecred with a parent\r
-            # signed using PG namespaces and a child signed with PL\r
-            # namespaces over the whole thing. But I don't know\r
-            # if that is a bug in xmlsec1, an accident since\r
-            # the contents of the schemas are the same,\r
-            # or something else, but it seems odd. And this works.\r
-            parentRoot = sdoc.documentElement\r
-            if parentRoot.tagName == "signed-credential" and parentRoot.hasAttributes():\r
-                for attrIx in range(0, parentRoot.attributes.length):\r
-                    attr = parentRoot.attributes.item(attrIx)\r
-                    # returns the old attribute of same name that was\r
-                    # on the credential\r
-                    # Below throws InUse exception if we forgot to clone the attribute first\r
-                    oldAttr = signed_cred.setAttributeNode(attr.cloneNode(True))\r
-                    if oldAttr and oldAttr.value != attr.value:\r
-                        msg = "Delegating cred from owner %s to %s over %s replaced attribute %s value '%s' with '%s'" % (self.parent.gidCaller.get_urn(), self.gidCaller.get_urn(), self.gidObject.get_urn(), oldAttr.name, oldAttr.value, attr.value)\r
-                        logger.warn(msg)\r
-                        #raise CredentialNotVerifiable("Can't encode new valid delegated credential: %s" % msg)\r
-\r
-            p_cred = doc.importNode(sdoc.getElementsByTagName("credential")[0], True)\r
-            p = doc.createElement("parent")\r
-            p.appendChild(p_cred)\r
-            cred.appendChild(p)\r
-        # done handling parent credential\r
-\r
-        # Create the <signatures> tag\r
-        signatures = doc.createElement("signatures")\r
-        signed_cred.appendChild(signatures)\r
-\r
-        # Add any parent signatures\r
-        if self.parent:\r
-            for cur_cred in self.get_credential_list()[1:]:\r
-                sdoc = parseString(cur_cred.get_signature().get_xml())\r
-                ele = doc.importNode(sdoc.getElementsByTagName("Signature")[0], True)\r
-                signatures.appendChild(ele)\r
-                \r
-        # Get the finished product\r
-        self.xml = doc.toxml()\r
-\r
-\r
-    def save_to_random_tmp_file(self):       \r
-        fp, filename = mkstemp(suffix='cred', text=True)\r
-        fp = os.fdopen(fp, "w")\r
-        self.save_to_file(filename, save_parents=True, filep=fp)\r
-        return filename\r
-    \r
-    def save_to_file(self, filename, save_parents=True, filep=None):\r
-        if not self.xml:\r
-            self.encode()\r
-        if filep:\r
-            f = filep \r
-        else:\r
-            f = open(filename, "w")\r
-        f.write(self.xml)\r
-        f.close()\r
-\r
-    def save_to_string(self, save_parents=True):\r
-        if not self.xml:\r
-            self.encode()\r
-        return self.xml\r
-\r
-    def get_refid(self):\r
-        if not self.refid:\r
-            self.refid = 'ref0'\r
-        return self.refid\r
-\r
-    def set_refid(self, rid):\r
-        self.refid = rid\r
-\r
-    ##\r
-    # Figure out what refids exist, and update this credential's id\r
-    # so that it doesn't clobber the others.  Returns the refids of\r
-    # the parents.\r
-    \r
-    def updateRefID(self):\r
-        if not self.parent:\r
-            self.set_refid('ref0')\r
-            return []\r
-        \r
-        refs = []\r
-\r
-        next_cred = self.parent\r
-        while next_cred:\r
-            refs.append(next_cred.get_refid())\r
-            if next_cred.parent:\r
-                next_cred = next_cred.parent\r
-            else:\r
-                next_cred = None\r
-\r
-        \r
-        # Find a unique refid for this credential\r
-        rid = self.get_refid()\r
-        while rid in refs:\r
-            val = int(rid[3:])\r
-            rid = "ref%d" % (val + 1)\r
-\r
-        # Set the new refid\r
-        self.set_refid(rid)\r
-\r
-        # Return the set of parent credential ref ids\r
-        return refs\r
-\r
-    def get_xml(self):\r
-        if not self.xml:\r
-            self.encode()\r
-        return self.xml\r
-\r
-    ##\r
-    # Sign the XML file created by encode()\r
-    #\r
-    # WARNING:\r
-    # In general, a signed credential obtained externally should\r
-    # not be changed else the signature is no longer valid.  So, once\r
-    # you have loaded an existing signed credential, do not call encode() or sign() on it.\r
-\r
-    def sign(self):\r
-        if not self.issuer_privkey or not self.issuer_gid:\r
-            return\r
-        doc = parseString(self.get_xml())\r
-        sigs = doc.getElementsByTagName("signatures")[0]\r
-\r
-        # Create the signature template to be signed\r
-        signature = Signature()\r
-        signature.set_refid(self.get_refid())\r
-        sdoc = parseString(signature.get_xml())        \r
-        sig_ele = doc.importNode(sdoc.getElementsByTagName("Signature")[0], True)\r
-        sigs.appendChild(sig_ele)\r
-\r
-        self.xml = doc.toxml()\r
-\r
-\r
-        # Split the issuer GID into multiple certificates if it's a chain\r
-        chain = GID(filename=self.issuer_gid)\r
-        gid_files = []\r
-        while chain:\r
-            gid_files.append(chain.save_to_random_tmp_file(False))\r
-            if chain.get_parent():\r
-                chain = chain.get_parent()\r
-            else:\r
-                chain = None\r
-\r
-\r
-        # Call out to xmlsec1 to sign it\r
-        ref = 'Sig_%s' % self.get_refid()\r
-        filename = self.save_to_random_tmp_file()\r
-        signed = os.popen('%s --sign --node-id "%s" --privkey-pem %s,%s %s' \\r
-                 % (self.xmlsec_path, ref, self.issuer_privkey, ",".join(gid_files), filename)).read()\r
-        os.remove(filename)\r
-\r
-        for gid_file in gid_files:\r
-            os.remove(gid_file)\r
-\r
-        self.xml = signed\r
-\r
-        # This is no longer a legacy credential\r
-        if self.legacy:\r
-            self.legacy = None\r
-\r
-        # Update signatures\r
-        self.decode()       \r
-\r
-        \r
-    ##\r
-    # Retrieve the attributes of the credential from the XML.\r
-    # This is automatically called by the various get_* methods of\r
-    # this class and should not need to be called explicitly.\r
-\r
-    def decode(self):\r
-        if not self.xml:\r
-            return\r
-        doc = parseString(self.xml)\r
-        sigs = []\r
-        signed_cred = doc.getElementsByTagName("signed-credential")\r
-\r
-        # Is this a signed-cred or just a cred?\r
-        if len(signed_cred) > 0:\r
-            creds = signed_cred[0].getElementsByTagName("credential")\r
-            signatures = signed_cred[0].getElementsByTagName("signatures")\r
-            if len(signatures) > 0:\r
-                sigs = signatures[0].getElementsByTagName("Signature")\r
-        else:\r
-            creds = doc.getElementsByTagName("credential")\r
-        \r
-        if creds is None or len(creds) == 0:\r
-            # malformed cred file\r
-            raise CredentialNotVerifiable("Malformed XML: No credential tag found")\r
-\r
-        # Just take the first cred if there are more than one\r
-        cred = creds[0]\r
-\r
-        self.set_refid(cred.getAttribute("xml:id"))\r
-        self.set_expiration(utcparse(getTextNode(cred, "expires")))\r
-        self.gidCaller = GID(string=getTextNode(cred, "owner_gid"))\r
-        self.gidObject = GID(string=getTextNode(cred, "target_gid"))   \r
-\r
-\r
-        # Process privileges\r
-        privs = cred.getElementsByTagName("privileges")[0]\r
-        rlist = Rights()\r
-        for priv in privs.getElementsByTagName("privilege"):\r
-            kind = getTextNode(priv, "name")\r
-            deleg = str2bool(getTextNode(priv, "can_delegate"))\r
-            if kind == '*':\r
-                # Convert * into the default privileges for the credential's type\r
-                # Each inherits the delegatability from the * above\r
-                _ , type = urn_to_hrn(self.gidObject.get_urn())\r
-                rl = determine_rights(type, self.gidObject.get_urn())\r
-                for r in rl.rights:\r
-                    r.delegate = deleg\r
-                    rlist.add(r)\r
-            else:\r
-                rlist.add(Right(kind.strip(), deleg))\r
-        self.set_privileges(rlist)\r
-\r
-\r
-        # Is there a parent?\r
-        parent = cred.getElementsByTagName("parent")\r
-        if len(parent) > 0:\r
-            parent_doc = parent[0].getElementsByTagName("credential")[0]\r
-            parent_xml = parent_doc.toxml()\r
-            self.parent = Credential(string=parent_xml)\r
-            self.updateRefID()\r
-\r
-        # Assign the signatures to the credentials\r
-        for sig in sigs:\r
-            Sig = Signature(string=sig.toxml())\r
-\r
-            for cur_cred in self.get_credential_list():\r
-                if cur_cred.get_refid() == Sig.get_refid():\r
-                    cur_cred.set_signature(Sig)\r
-                                    \r
-            \r
-    ##\r
-    # Verify\r
-    #   trusted_certs: A list of trusted GID filenames (not GID objects!) \r
-    #                  Chaining is not supported within the GIDs by xmlsec1.\r
-    #\r
-    #   trusted_certs_required: Should usually be true. Set False means an\r
-    #                 empty list of trusted_certs would still let this method pass.\r
-    #                 It just skips xmlsec1 verification et al. Only used by some utils\r
-    #    \r
-    # Verify that:\r
-    # . All of the signatures are valid and that the issuers trace back\r
-    #   to trusted roots (performed by xmlsec1)\r
-    # . The XML matches the credential schema\r
-    # . That the issuer of the credential is the authority in the target's urn\r
-    #    . In the case of a delegated credential, this must be true of the root\r
-    # . That all of the gids presented in the credential are valid\r
-    #    . Including verifying GID chains, and includ the issuer\r
-    # . The credential is not expired\r
-    #\r
-    # -- For Delegates (credentials with parents)\r
-    # . The privileges must be a subset of the parent credentials\r
-    # . The privileges must have "can_delegate" set for each delegated privilege\r
-    # . The target gid must be the same between child and parents\r
-    # . The expiry time on the child must be no later than the parent\r
-    # . The signer of the child must be the owner of the parent\r
-    #\r
-    # -- Verify does *NOT*\r
-    # . ensure that an xmlrpc client's gid matches a credential gid, that\r
-    #   must be done elsewhere\r
-    #\r
-    # @param trusted_certs: The certificates of trusted CA certificates\r
-    def verify(self, trusted_certs=None, schema=None, trusted_certs_required=True):\r
-        if not self.xml:\r
-            self.decode()\r
-\r
-        # validate against RelaxNG schema\r
-        if HAVELXML and not self.legacy:\r
-            if schema and os.path.exists(schema):\r
-                tree = etree.parse(StringIO(self.xml))\r
-                schema_doc = etree.parse(schema)\r
-                xmlschema = etree.XMLSchema(schema_doc)\r
-                if not xmlschema.validate(tree):\r
-                    error = xmlschema.error_log.last_error\r
-                    message = "%s: %s (line %s)" % (self.get_summary_tostring(), error.message, error.line)\r
-                    raise CredentialNotVerifiable(message)\r
-\r
-        if trusted_certs_required and trusted_certs is None:\r
-            trusted_certs = []\r
-\r
-#        trusted_cert_objects = [GID(filename=f) for f in trusted_certs]\r
-        trusted_cert_objects = []\r
-        ok_trusted_certs = []\r
-        # If caller explicitly passed in None that means skip cert chain validation.\r
-        # Strange and not typical\r
-        if trusted_certs is not None:\r
-            for f in trusted_certs:\r
-                try:\r
-                    # Failures here include unreadable files\r
-                    # or non PEM files\r
-                    trusted_cert_objects.append(GID(filename=f))\r
-                    ok_trusted_certs.append(f)\r
-                except Exception, exc:\r
-                    logger.error("Failed to load trusted cert from %s: %r", f, exc)\r
-            trusted_certs = ok_trusted_certs\r
-\r
-        # Use legacy verification if this is a legacy credential\r
-        if self.legacy:\r
-            self.legacy.verify_chain(trusted_cert_objects)\r
-            if self.legacy.client_gid:\r
-                self.legacy.client_gid.verify_chain(trusted_cert_objects)\r
-            if self.legacy.object_gid:\r
-                self.legacy.object_gid.verify_chain(trusted_cert_objects)\r
-            return True\r
-        \r
-        # make sure it is not expired\r
-        if self.get_expiration() < datetime.datetime.utcnow():\r
-            raise CredentialNotVerifiable("Credential %s expired at %s" % (self.get_summary_tostring(), self.expiration.isoformat()))\r
-\r
-        # Verify the signatures\r
-        filename = self.save_to_random_tmp_file()\r
-        if trusted_certs is not None:\r
-            cert_args = " ".join(['--trusted-pem %s' % x for x in trusted_certs])\r
-\r
-        # If caller explicitly passed in None that means skip cert chain validation.\r
-        # - Strange and not typical\r
-        if trusted_certs is not None:\r
-            # Verify the gids of this cred and of its parents\r
-            for cur_cred in self.get_credential_list():\r
-                cur_cred.get_gid_object().verify_chain(trusted_cert_objects)\r
-                cur_cred.get_gid_caller().verify_chain(trusted_cert_objects)\r
-\r
-        refs = []\r
-        refs.append("Sig_%s" % self.get_refid())\r
-\r
-        parentRefs = self.updateRefID()\r
-        for ref in parentRefs:\r
-            refs.append("Sig_%s" % ref)\r
-\r
-        for ref in refs:\r
-            # If caller explicitly passed in None that means skip xmlsec1 validation.\r
-            # Strange and not typical\r
-            if trusted_certs is None:\r
-                break\r
-\r
-#            print "Doing %s --verify --node-id '%s' %s %s 2>&1" % \\r
-#                (self.xmlsec_path, ref, cert_args, filename)\r
-            verified = os.popen('%s --verify --node-id "%s" %s %s 2>&1' \\r
-                            % (self.xmlsec_path, ref, cert_args, filename)).read()\r
-            if not verified.strip().startswith("OK"):\r
-                # xmlsec errors have a msg= which is the interesting bit.\r
-                mstart = verified.find("msg=")\r
-                msg = ""\r
-                if mstart > -1 and len(verified) > 4:\r
-                    mstart = mstart + 4\r
-                    mend = verified.find('\\', mstart)\r
-                    msg = verified[mstart:mend]\r
-                raise CredentialNotVerifiable("xmlsec1 error verifying cred %s using Signature ID %s: %s %s" % (self.get_summary_tostring(), ref, msg, verified.strip()))\r
-        os.remove(filename)\r
-\r
-        # Verify the parents (delegation)\r
-        if self.parent:\r
-            self.verify_parent(self.parent)\r
-\r
-        # Make sure the issuer is the target's authority, and is\r
-        # itself a valid GID\r
-        self.verify_issuer(trusted_cert_objects)\r
-        return True\r
-\r
-    ##\r
-    # Creates a list of the credential and its parents, with the root \r
-    # (original delegated credential) as the last item in the list\r
-    def get_credential_list(self):    \r
-        cur_cred = self\r
-        list = []\r
-        while cur_cred:\r
-            list.append(cur_cred)\r
-            if cur_cred.parent:\r
-                cur_cred = cur_cred.parent\r
-            else:\r
-                cur_cred = None\r
-        return list\r
-    \r
-    ##\r
-    # Make sure the credential's target gid (a) was signed by or (b)\r
-    # is the same as the entity that signed the original credential,\r
-    # or (c) is an authority over the target's namespace.\r
-    # Also ensure that the credential issuer / signer itself has a valid\r
-    # GID signature chain (signed by an authority with namespace rights).\r
-    def verify_issuer(self, trusted_gids):\r
-        root_cred = self.get_credential_list()[-1]\r
-        root_target_gid = root_cred.get_gid_object()\r
-        root_cred_signer = root_cred.get_signature().get_issuer_gid()\r
-\r
-        # Case 1:\r
-        # Allow non authority to sign target and cred about target.\r
-        #\r
-        # Why do we need to allow non authorities to sign?\r
-        # If in the target gid validation step we correctly\r
-        # checked that the target is only signed by an authority,\r
-        # then this is just a special case of case 3.\r
-        # This short-circuit is the common case currently -\r
-        # and cause GID validation doesn't check 'authority',\r
-        # this allows users to generate valid slice credentials.\r
-        if root_target_gid.is_signed_by_cert(root_cred_signer):\r
-            # cred signer matches target signer, return success\r
-            return\r
-\r
-        # Case 2:\r
-        # Allow someone to sign credential about themeselves. Used?\r
-        # If not, remove this.\r
-        #root_target_gid_str = root_target_gid.save_to_string()\r
-        #root_cred_signer_str = root_cred_signer.save_to_string()\r
-        #if root_target_gid_str == root_cred_signer_str:\r
-        #    # cred signer is target, return success\r
-        #    return\r
-\r
-        # Case 3:\r
-\r
-        # root_cred_signer is not the target_gid\r
-        # So this is a different gid that we have not verified.\r
-        # xmlsec1 verified the cert chain on this already, but\r
-        # it hasn't verified that the gid meets the HRN namespace\r
-        # requirements.\r
-        # Below we'll ensure that it is an authority.\r
-        # But we haven't verified that it is _signed by_ an authority\r
-        # We also don't know if xmlsec1 requires that cert signers\r
-        # are marked as CAs.\r
-\r
-        # Note that if verify() gave us no trusted_gids then this\r
-        # call will fail. So skip it if we have no trusted_gids\r
-        if trusted_gids and len(trusted_gids) > 0:\r
-            root_cred_signer.verify_chain(trusted_gids)\r
-        else:\r
-            logger.debug("No trusted gids. Cannot verify that cred signer is signed by a trusted authority. Skipping that check.")\r
-\r
-        # See if the signer is an authority over the domain of the target.\r
-        # There are multiple types of authority - accept them all here\r
-        # Maybe should be (hrn, type) = urn_to_hrn(root_cred_signer.get_urn())\r
-        root_cred_signer_type = root_cred_signer.get_type()\r
-        if (root_cred_signer_type.find('authority') == 0):\r
-            #logger.debug('Cred signer is an authority')\r
-            # signer is an authority, see if target is in authority's domain\r
-            signerhrn = root_cred_signer.get_hrn()\r
-            if hrn_authfor_hrn(signerhrn, root_target_gid.get_hrn()):\r
-                return\r
-\r
-        # We've required that the credential be signed by an authority\r
-        # for that domain. Reasonable and probably correct.\r
-        # A looser model would also allow the signer to be an authority\r
-        # in my control framework - eg My CA or CH. Even if it is not\r
-        # the CH that issued these, eg, user credentials.\r
-\r
-        # Give up, credential does not pass issuer verification\r
-\r
-        raise CredentialNotVerifiable("Could not verify credential owned by %s for object %s. Cred signer %s not the trusted authority for Cred target %s" % (self.gidCaller.get_urn(), self.gidObject.get_urn(), root_cred_signer.get_hrn(), root_target_gid.get_hrn()))\r
-\r
-\r
-    ##\r
-    # -- For Delegates (credentials with parents) verify that:\r
-    # . The privileges must be a subset of the parent credentials\r
-    # . The privileges must have "can_delegate" set for each delegated privilege\r
-    # . The target gid must be the same between child and parents\r
-    # . The expiry time on the child must be no later than the parent\r
-    # . The signer of the child must be the owner of the parent        \r
-    def verify_parent(self, parent_cred):\r
-        # make sure the rights given to the child are a subset of the\r
-        # parents rights (and check delegate bits)\r
-        if not parent_cred.get_privileges().is_superset(self.get_privileges()):\r
-            raise ChildRightsNotSubsetOfParent(("Parent cred ref %s rights " % parent_cred.get_refid()) +\r
-                self.parent.get_privileges().save_to_string() + (" not superset of delegated cred %s ref %s rights " % (self.get_summary_tostring(), self.get_refid())) +\r
-                self.get_privileges().save_to_string())\r
-\r
-        # make sure my target gid is the same as the parent's\r
-        if not parent_cred.get_gid_object().save_to_string() == \\r
-           self.get_gid_object().save_to_string():\r
-            raise CredentialNotVerifiable("Delegated cred %s: Target gid not equal between parent and child. Parent %s" % (self.get_summary_tostring(), parent_cred.get_summary_tostring()))\r
-\r
-        # make sure my expiry time is <= my parent's\r
-        if not parent_cred.get_expiration() >= self.get_expiration():\r
-            raise CredentialNotVerifiable("Delegated credential %s expires after parent %s" % (self.get_summary_tostring(), parent_cred.get_summary_tostring()))\r
-\r
-        # make sure my signer is the parent's caller\r
-        if not parent_cred.get_gid_caller().save_to_string(False) == \\r
-           self.get_signature().get_issuer_gid().save_to_string(False):\r
-            raise CredentialNotVerifiable("Delegated credential %s not signed by parent %s's caller" % (self.get_summary_tostring(), parent_cred.get_summary_tostring()))\r
-                \r
-        # Recurse\r
-        if parent_cred.parent:\r
-            parent_cred.verify_parent(parent_cred.parent)\r
-\r
-\r
-    def delegate(self, delegee_gidfile, caller_keyfile, caller_gidfile):\r
-        """\r
-        Return a delegated copy of this credential, delegated to the \r
-        specified gid's user.    \r
-        """\r
-        # get the gid of the object we are delegating\r
-        object_gid = self.get_gid_object()\r
-        object_hrn = object_gid.get_hrn()        \r
\r
-        # the hrn of the user who will be delegated to\r
-        delegee_gid = GID(filename=delegee_gidfile)\r
-        delegee_hrn = delegee_gid.get_hrn()\r
-  \r
-        #user_key = Keypair(filename=keyfile)\r
-        #user_hrn = self.get_gid_caller().get_hrn()\r
-        subject_string = "%s delegated to %s" % (object_hrn, delegee_hrn)\r
-        dcred = Credential(subject=subject_string)\r
-        dcred.set_gid_caller(delegee_gid)\r
-        dcred.set_gid_object(object_gid)\r
-        dcred.set_parent(self)\r
-        dcred.set_expiration(self.get_expiration())\r
-        dcred.set_privileges(self.get_privileges())\r
-        dcred.get_privileges().delegate_all_privileges(True)\r
-        #dcred.set_issuer_keys(keyfile, delegee_gidfile)\r
-        dcred.set_issuer_keys(caller_keyfile, caller_gidfile)\r
-        dcred.encode()\r
-        dcred.sign()\r
-\r
-        return dcred\r
-\r
-    # only informative\r
-    def get_filename(self):\r
-        return getattr(self,'filename',None)\r
-\r
-    ##\r
-    # Dump the contents of a credential to stdout in human-readable format\r
-    #\r
-    # @param dump_parents If true, also dump the parent certificates\r
-    def dump (self, *args, **kwargs):\r
-        print self.dump_string(*args, **kwargs)\r
-\r
-\r
-    def dump_string(self, dump_parents=False):\r
-        result=""\r
-        result += "CREDENTIAL %s\n" % self.get_subject()\r
-        filename=self.get_filename()\r
-        if filename: result += "Filename %s\n"%filename\r
-        result += "      privs: %s\n" % self.get_privileges().save_to_string()\r
-        gidCaller = self.get_gid_caller()\r
-        if gidCaller:\r
-            result += "  gidCaller:\n"\r
-            result += gidCaller.dump_string(8, dump_parents)\r
-\r
-        if self.get_signature():\r
-            print "  gidIssuer:"\r
-            self.get_signature().get_issuer_gid().dump(8, dump_parents)\r
-\r
-        gidObject = self.get_gid_object()\r
-        if gidObject:\r
-            result += "  gidObject:\n"\r
-            result += gidObject.dump_string(8, dump_parents)\r
-\r
-        if self.parent and dump_parents:\r
-            result += "\nPARENT"\r
-            result += self.parent.dump_string(True)\r
-\r
-        return result\r
+#----------------------------------------------------------------------
+# Copyright (c) 2008 Board of Trustees, Princeton University
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and/or hardware specification (the "Work") to
+# deal in the Work without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Work, and to permit persons to whom the Work
+# is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Work.
+#
+# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 
+# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS 
+# IN THE WORK.
+#----------------------------------------------------------------------
+##
+# Implements SFA Credentials
+#
+# Credentials are signed XML files that assign a subject gid privileges to an object gid
+##
+
+import os
+from types import StringTypes
+import datetime
+from StringIO import StringIO
+from tempfile import mkstemp
+from xml.dom.minidom import Document, parseString
+
+HAVELXML = False
+try:
+    from lxml import etree
+    HAVELXML = True
+except:
+    pass
+
+from xml.parsers.expat import ExpatError
+
+from sfa.util.faults import CredentialNotVerifiable, ChildRightsNotSubsetOfParent
+from sfa.util.sfalogging import logger
+from sfa.util.sfatime import utcparse
+from sfa.trust.credential_legacy import CredentialLegacy
+from sfa.trust.rights import Right, Rights, determine_rights
+from sfa.trust.gid import GID
+from sfa.util.xrn import urn_to_hrn, hrn_authfor_hrn
+
+# 2 weeks, in seconds 
+DEFAULT_CREDENTIAL_LIFETIME = 86400 * 14
+
+
+# TODO:
+# . make privs match between PG and PL
+# . Need to add support for other types of credentials, e.g. tickets
+# . add namespaces to signed-credential element?
+
+signature_template = \
+'''
+<Signature xml:id="Sig_%s" xmlns="http://www.w3.org/2000/09/xmldsig#">
+  <SignedInfo>
+    <CanonicalizationMethod Algorithm="http://www.w3.org/TR/2001/REC-xml-c14n-20010315"/>
+    <SignatureMethod Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/>
+    <Reference URI="#%s">
+      <Transforms>
+        <Transform Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature" />
+      </Transforms>
+      <DigestMethod Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>
+      <DigestValue></DigestValue>
+    </Reference>
+  </SignedInfo>
+  <SignatureValue />
+  <KeyInfo>
+    <X509Data>
+      <X509SubjectName/>
+      <X509IssuerSerial/>
+      <X509Certificate/>
+    </X509Data>
+    <KeyValue />
+  </KeyInfo>
+</Signature>
+'''
+
+# PG formats the template (whitespace) slightly differently.
+# Note that they don't include the xmlns in the template, but add it later.
+# Otherwise the two are equivalent.
+#signature_template_as_in_pg = \
+#'''
+#<Signature xml:id="Sig_%s" >
+# <SignedInfo>
+#  <CanonicalizationMethod      Algorithm="http://www.w3.org/TR/2001/REC-xml-c14n-20010315"/>
+#  <SignatureMethod      Algorithm="http://www.w3.org/2000/09/xmldsig#rsa-sha1"/>
+#  <Reference URI="#%s">
+#    <Transforms>
+#      <Transform         Algorithm="http://www.w3.org/2000/09/xmldsig#enveloped-signature" />
+#    </Transforms>
+#    <DigestMethod        Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/>
+#    <DigestValue></DigestValue>
+#    </Reference>
+# </SignedInfo>
+# <SignatureValue />
+# <KeyInfo>
+#  <X509Data >
+#   <X509SubjectName/>
+#   <X509IssuerSerial/>
+#   <X509Certificate/>
+#  </X509Data>
+#  <KeyValue />
+# </KeyInfo>
+#</Signature>
+#'''
+
+##
+# Convert a string into a bool
+# used to convert an xsd:boolean to a Python boolean
+def str2bool(str):
+    if str.lower() in ['true','1']:
+        return True
+    return False
+
+
+##
+# Utility function to get the text of an XML element
+
+def getTextNode(element, subele):
+    sub = element.getElementsByTagName(subele)[0]
+    if len(sub.childNodes) > 0:            
+        return sub.childNodes[0].nodeValue
+    else:
+        return None
+        
+##
+# Utility function to set the text of an XML element
+# It creates the element, adds the text to it,
+# and then appends it to the parent.
+
+def append_sub(doc, parent, element, text):
+    ele = doc.createElement(element)
+    ele.appendChild(doc.createTextNode(text))
+    parent.appendChild(ele)
+
+##
+# Signature contains information about an xmlsec1 signature
+# for a signed-credential
+#
+
+class Signature(object):
+   
+    def __init__(self, string=None):
+        self.refid = None
+        self.issuer_gid = None
+        self.xml = None
+        if string:
+            self.xml = string
+            self.decode()
+
+
+    def get_refid(self):
+        if not self.refid:
+            self.decode()
+        return self.refid
+
+    def get_xml(self):
+        if not self.xml:
+            self.encode()
+        return self.xml
+
+    def set_refid(self, id):
+        self.refid = id
+
+    def get_issuer_gid(self):
+        if not self.gid:
+            self.decode()
+        return self.gid        
+
+    def set_issuer_gid(self, gid):
+        self.gid = gid
+
+    def decode(self):
+        try:
+            doc = parseString(self.xml)
+        except ExpatError,e:
+            logger.log_exc ("Failed to parse credential, %s"%self.xml)
+            raise
+        sig = doc.getElementsByTagName("Signature")[0]
+        self.set_refid(sig.getAttribute("xml:id").strip("Sig_"))
+        keyinfo = sig.getElementsByTagName("X509Data")[0]
+        szgid = getTextNode(keyinfo, "X509Certificate")
+        szgid = "-----BEGIN CERTIFICATE-----\n%s\n-----END CERTIFICATE-----" % szgid
+        self.set_issuer_gid(GID(string=szgid))        
+        
+    def encode(self):
+        self.xml = signature_template % (self.get_refid(), self.get_refid())
+
+
+##
+# A credential provides a caller gid with privileges to an object gid.
+# A signed credential is signed by the object's authority.
+#
+# Credentials are encoded in one of two ways.  The legacy style places
+# it in the subjectAltName of an X509 certificate.  The new credentials
+# are placed in signed XML.
+#
+# WARNING:
+# In general, a signed credential obtained externally should
+# not be changed else the signature is no longer valid.  So, once
+# you have loaded an existing signed credential, do not call encode() or sign() on it.
+
+def filter_creds_by_caller(creds, caller_hrn_list):
+        """
+        Returns a list of creds who's gid caller matches the
+        specified caller hrn
+        """
+        if not isinstance(creds, list): creds = [creds]
+        if not isinstance(caller_hrn_list, list): 
+            caller_hrn_list = [caller_hrn_list]
+        caller_creds = []
+        for cred in creds:
+            try:
+                tmp_cred = Credential(string=cred)
+                if tmp_cred.get_gid_caller().get_hrn() in caller_hrn_list:
+                    caller_creds.append(cred)
+            except: pass
+        return caller_creds
+
+class Credential(object):
+
+    ##
+    # Create a Credential object
+    #
+    # @param create If true, create a blank x509 certificate
+    # @param subject If subject!=None, create an x509 cert with the subject name
+    # @param string If string!=None, load the credential from the string
+    # @param filename If filename!=None, load the credential from the file
+    # FIXME: create and subject are ignored!
+    def __init__(self, create=False, subject=None, string=None, filename=None):
+        self.gidCaller = None
+        self.gidObject = None
+        self.expiration = None
+        self.privileges = None
+        self.issuer_privkey = None
+        self.issuer_gid = None
+        self.issuer_pubkey = None
+        self.parent = None
+        self.signature = None
+        self.xml = None
+        self.refid = None
+        self.legacy = None
+
+        # Check if this is a legacy credential, translate it if so
+        if string or filename:
+            if string:                
+                str = string
+            elif filename:
+                str = file(filename).read()
+                
+            if str.strip().startswith("-----"):
+                self.legacy = CredentialLegacy(False,string=str)
+                self.translate_legacy(str)
+            else:
+                self.xml = str
+                self.decode()
+
+        # Find an xmlsec1 path
+        self.xmlsec_path = ''
+        paths = ['/usr/bin','/usr/local/bin','/bin','/opt/bin','/opt/local/bin']
+        for path in paths:
+            if os.path.isfile(path + '/' + 'xmlsec1'):
+                self.xmlsec_path = path + '/' + 'xmlsec1'
+                break
+
+    def get_subject(self):
+        if not self.gidObject:
+            self.decode()
+        return self.gidObject.get_printable_subject()
+
+    def get_summary_tostring(self):
+        if not self.gidObject:
+            self.decode()
+        obj = self.gidObject.get_printable_subject()
+        caller = self.gidCaller.get_printable_subject()
+        exp = self.get_expiration()
+        # Summarize the rights too? The issuer?
+        return "[ Grant %s rights on %s until %s ]" % (caller, obj, exp)
+
+    def get_signature(self):
+        if not self.signature:
+            self.decode()
+        return self.signature
+
+    def set_signature(self, sig):
+        self.signature = sig
+
+        
+    ##
+    # Translate a legacy credential into a new one
+    #
+    # @param String of the legacy credential
+
+    def translate_legacy(self, str):
+        legacy = CredentialLegacy(False,string=str)
+        self.gidCaller = legacy.get_gid_caller()
+        self.gidObject = legacy.get_gid_object()
+        lifetime = legacy.get_lifetime()
+        if not lifetime:
+            self.set_expiration(datetime.datetime.utcnow() + datetime.timedelta(seconds=DEFAULT_CREDENTIAL_LIFETIME))
+        else:
+            self.set_expiration(int(lifetime))
+        self.lifeTime = legacy.get_lifetime()
+        self.set_privileges(legacy.get_privileges())
+        self.get_privileges().delegate_all_privileges(legacy.get_delegate())
+
+    ##
+    # Need the issuer's private key and name
+    # @param key Keypair object containing the private key of the issuer
+    # @param gid GID of the issuing authority
+
+    def set_issuer_keys(self, privkey, gid):
+        self.issuer_privkey = privkey
+        self.issuer_gid = gid
+
+
+    ##
+    # Set this credential's parent
+    def set_parent(self, cred):
+        self.parent = cred
+        self.updateRefID()
+
+    ##
+    # set the GID of the caller
+    #
+    # @param gid GID object of the caller
+
+    def set_gid_caller(self, gid):
+        self.gidCaller = gid
+        # gid origin caller is the caller's gid by default
+        self.gidOriginCaller = gid
+
+    ##
+    # get the GID of the object
+
+    def get_gid_caller(self):
+        if not self.gidCaller:
+            self.decode()
+        return self.gidCaller
+
+    ##
+    # set the GID of the object
+    #
+    # @param gid GID object of the object
+
+    def set_gid_object(self, gid):
+        self.gidObject = gid
+
+    ##
+    # get the GID of the object
+
+    def get_gid_object(self):
+        if not self.gidObject:
+            self.decode()
+        return self.gidObject
+
+
+            
+    ##
+    # Expiration: an absolute UTC time of expiration (as either an int or string or datetime)
+    # 
+    def set_expiration(self, expiration):
+        if isinstance(expiration, (int, float)):
+            self.expiration = datetime.datetime.fromtimestamp(expiration)
+        elif isinstance (expiration, datetime.datetime):
+            self.expiration = expiration
+        elif isinstance (expiration, StringTypes):
+            self.expiration = utcparse (expiration)
+        else:
+            logger.error ("unexpected input type in Credential.set_expiration")
+
+
+    ##
+    # get the lifetime of the credential (always in datetime format)
+
+    def get_expiration(self):
+        if not self.expiration:
+            self.decode()
+        # at this point self.expiration is normalized as a datetime - DON'T call utcparse again
+        return self.expiration
+
+    ##
+    # For legacy sake
+    def get_lifetime(self):
+        return self.get_expiration()
+    ##
+    # set the privileges
+    #
+    # @param privs either a comma-separated list of privileges of a Rights object
+
+    def set_privileges(self, privs):
+        if isinstance(privs, str):
+            self.privileges = Rights(string = privs)
+        else:
+            self.privileges = privs
+        
+
+    ##
+    # return the privileges as a Rights object
+
+    def get_privileges(self):
+        if not self.privileges:
+            self.decode()
+        return self.privileges
+
+    ##
+    # determine whether the credential allows a particular operation to be
+    # performed
+    #
+    # @param op_name string specifying name of operation ("lookup", "update", etc)
+
+    def can_perform(self, op_name):
+        rights = self.get_privileges()
+        
+        if not rights:
+            return False
+
+        return rights.can_perform(op_name)
+
+
+    ##
+    # Encode the attributes of the credential into an XML string    
+    # This should be done immediately before signing the credential.    
+    # WARNING:
+    # In general, a signed credential obtained externally should
+    # not be changed else the signature is no longer valid.  So, once
+    # you have loaded an existing signed credential, do not call encode() or sign() on it.
+
+    def encode(self):
+        # Create the XML document
+        doc = Document()
+        signed_cred = doc.createElement("signed-credential")
+
+# Declare namespaces
+# Note that credential/policy.xsd are really the PG schemas
+# in a PL namespace.
+# Note that delegation of credentials between the 2 only really works
+# cause those schemas are identical.
+# Also note these PG schemas talk about PG tickets and CM policies.
+        signed_cred.setAttribute("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance")
+        signed_cred.setAttribute("xsi:noNamespaceSchemaLocation", "http://www.planet-lab.org/resources/sfa/credential.xsd")
+        signed_cred.setAttribute("xsi:schemaLocation", "http://www.planet-lab.org/resources/sfa/ext/policy/1 http://www.planet-lab.org/resources/sfa/ext/policy/1/policy.xsd")
+
+# PG says for those last 2:
+#        signed_cred.setAttribute("xsi:noNamespaceSchemaLocation", "http://www.protogeni.net/resources/credential/credential.xsd")
+#        signed_cred.setAttribute("xsi:schemaLocation", "http://www.protogeni.net/resources/credential/ext/policy/1 http://www.protogeni.net/resources/credential/ext/policy/1/policy.xsd")
+
+        doc.appendChild(signed_cred)  
+        
+        # Fill in the <credential> bit        
+        cred = doc.createElement("credential")
+        cred.setAttribute("xml:id", self.get_refid())
+        signed_cred.appendChild(cred)
+        append_sub(doc, cred, "type", "privilege")
+        append_sub(doc, cred, "serial", "8")
+        append_sub(doc, cred, "owner_gid", self.gidCaller.save_to_string())
+        append_sub(doc, cred, "owner_urn", self.gidCaller.get_urn())
+        append_sub(doc, cred, "target_gid", self.gidObject.save_to_string())
+        append_sub(doc, cred, "target_urn", self.gidObject.get_urn())
+        append_sub(doc, cred, "uuid", "")
+        if not self.expiration:
+            self.set_expiration(datetime.datetime.utcnow() + datetime.timedelta(seconds=DEFAULT_CREDENTIAL_LIFETIME))
+        self.expiration = self.expiration.replace(microsecond=0)
+        append_sub(doc, cred, "expires", self.expiration.isoformat())
+        privileges = doc.createElement("privileges")
+        cred.appendChild(privileges)
+
+        if self.privileges:
+            rights = self.get_privileges()
+            for right in rights.rights:
+                priv = doc.createElement("privilege")
+                append_sub(doc, priv, "name", right.kind)
+                append_sub(doc, priv, "can_delegate", str(right.delegate).lower())
+                privileges.appendChild(priv)
+
+        # Add the parent credential if it exists
+        if self.parent:
+            sdoc = parseString(self.parent.get_xml())
+            # If the root node is a signed-credential (it should be), then
+            # get all its attributes and attach those to our signed_cred
+            # node.
+            # Specifically, PG and PLadd attributes for namespaces (which is reasonable),
+            # and we need to include those again here or else their signature
+            # no longer matches on the credential.
+            # We expect three of these, but here we copy them all:
+#        signed_cred.setAttribute("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance")
+# and from PG (PL is equivalent, as shown above):
+#        signed_cred.setAttribute("xsi:noNamespaceSchemaLocation", "http://www.protogeni.net/resources/credential/credential.xsd")
+#        signed_cred.setAttribute("xsi:schemaLocation", "http://www.protogeni.net/resources/credential/ext/policy/1 http://www.protogeni.net/resources/credential/ext/policy/1/policy.xsd")
+
+            # HOWEVER!
+            # PL now also declares these, with different URLs, so
+            # the code notices those attributes already existed with
+            # different values, and complains.
+            # This happens regularly on delegation now that PG and
+            # PL both declare the namespace with different URLs.
+            # If the content ever differs this is a problem,
+            # but for now it works - different URLs (values in the attributes)
+            # but the same actual schema, so using the PG schema
+            # on delegated-to-PL credentials works fine.
+
+            # Note: you could also not copy attributes
+            # which already exist. It appears that both PG and PL
+            # will actually validate a slicecred with a parent
+            # signed using PG namespaces and a child signed with PL
+            # namespaces over the whole thing. But I don't know
+            # if that is a bug in xmlsec1, an accident since
+            # the contents of the schemas are the same,
+            # or something else, but it seems odd. And this works.
+            parentRoot = sdoc.documentElement
+            if parentRoot.tagName == "signed-credential" and parentRoot.hasAttributes():
+                for attrIx in range(0, parentRoot.attributes.length):
+                    attr = parentRoot.attributes.item(attrIx)
+                    # returns the old attribute of same name that was
+                    # on the credential
+                    # Below throws InUse exception if we forgot to clone the attribute first
+                    oldAttr = signed_cred.setAttributeNode(attr.cloneNode(True))
+                    if oldAttr and oldAttr.value != attr.value:
+                        msg = "Delegating cred from owner %s to %s over %s replaced attribute %s value '%s' with '%s'" % (self.parent.gidCaller.get_urn(), self.gidCaller.get_urn(), self.gidObject.get_urn(), oldAttr.name, oldAttr.value, attr.value)
+                        logger.warn(msg)
+                        #raise CredentialNotVerifiable("Can't encode new valid delegated credential: %s" % msg)
+
+            p_cred = doc.importNode(sdoc.getElementsByTagName("credential")[0], True)
+            p = doc.createElement("parent")
+            p.appendChild(p_cred)
+            cred.appendChild(p)
+        # done handling parent credential
+
+        # Create the <signatures> tag
+        signatures = doc.createElement("signatures")
+        signed_cred.appendChild(signatures)
+
+        # Add any parent signatures
+        if self.parent:
+            for cur_cred in self.get_credential_list()[1:]:
+                sdoc = parseString(cur_cred.get_signature().get_xml())
+                ele = doc.importNode(sdoc.getElementsByTagName("Signature")[0], True)
+                signatures.appendChild(ele)
+                
+        # Get the finished product
+        self.xml = doc.toxml()
+
+
+    def save_to_random_tmp_file(self):       
+        fp, filename = mkstemp(suffix='cred', text=True)
+        fp = os.fdopen(fp, "w")
+        self.save_to_file(filename, save_parents=True, filep=fp)
+        return filename
+    
+    def save_to_file(self, filename, save_parents=True, filep=None):
+        if not self.xml:
+            self.encode()
+        if filep:
+            f = filep 
+        else:
+            f = open(filename, "w")
+        f.write(self.xml)
+        f.close()
+
+    def save_to_string(self, save_parents=True):
+        if not self.xml:
+            self.encode()
+        return self.xml
+
+    def get_refid(self):
+        if not self.refid:
+            self.refid = 'ref0'
+        return self.refid
+
+    def set_refid(self, rid):
+        self.refid = rid
+
+    ##
+    # Figure out what refids exist, and update this credential's id
+    # so that it doesn't clobber the others.  Returns the refids of
+    # the parents.
+    
+    def updateRefID(self):
+        if not self.parent:
+            self.set_refid('ref0')
+            return []
+        
+        refs = []
+
+        next_cred = self.parent
+        while next_cred:
+            refs.append(next_cred.get_refid())
+            if next_cred.parent:
+                next_cred = next_cred.parent
+            else:
+                next_cred = None
+
+        
+        # Find a unique refid for this credential
+        rid = self.get_refid()
+        while rid in refs:
+            val = int(rid[3:])
+            rid = "ref%d" % (val + 1)
+
+        # Set the new refid
+        self.set_refid(rid)
+
+        # Return the set of parent credential ref ids
+        return refs
+
+    def get_xml(self):
+        if not self.xml:
+            self.encode()
+        return self.xml
+
+    ##
+    # Sign the XML file created by encode()
+    #
+    # WARNING:
+    # In general, a signed credential obtained externally should
+    # not be changed else the signature is no longer valid.  So, once
+    # you have loaded an existing signed credential, do not call encode() or sign() on it.
+
+    def sign(self):
+        if not self.issuer_privkey or not self.issuer_gid:
+            return
+        doc = parseString(self.get_xml())
+        sigs = doc.getElementsByTagName("signatures")[0]
+
+        # Create the signature template to be signed
+        signature = Signature()
+        signature.set_refid(self.get_refid())
+        sdoc = parseString(signature.get_xml())        
+        sig_ele = doc.importNode(sdoc.getElementsByTagName("Signature")[0], True)
+        sigs.appendChild(sig_ele)
+
+        self.xml = doc.toxml()
+
+
+        # Split the issuer GID into multiple certificates if it's a chain
+        chain = GID(filename=self.issuer_gid)
+        gid_files = []
+        while chain:
+            gid_files.append(chain.save_to_random_tmp_file(False))
+            if chain.get_parent():
+                chain = chain.get_parent()
+            else:
+                chain = None
+
+
+        # Call out to xmlsec1 to sign it
+        ref = 'Sig_%s' % self.get_refid()
+        filename = self.save_to_random_tmp_file()
+        signed = os.popen('%s --sign --node-id "%s" --privkey-pem %s,%s %s' \
+                 % (self.xmlsec_path, ref, self.issuer_privkey, ",".join(gid_files), filename)).read()
+        os.remove(filename)
+
+        for gid_file in gid_files:
+            os.remove(gid_file)
+
+        self.xml = signed
+
+        # This is no longer a legacy credential
+        if self.legacy:
+            self.legacy = None
+
+        # Update signatures
+        self.decode()       
+
+        
+    ##
+    # Retrieve the attributes of the credential from the XML.
+    # This is automatically called by the various get_* methods of
+    # this class and should not need to be called explicitly.
+
+    def decode(self):
+        if not self.xml:
+            return
+        doc = parseString(self.xml)
+        sigs = []
+        signed_cred = doc.getElementsByTagName("signed-credential")
+
+        # Is this a signed-cred or just a cred?
+        if len(signed_cred) > 0:
+            creds = signed_cred[0].getElementsByTagName("credential")
+            signatures = signed_cred[0].getElementsByTagName("signatures")
+            if len(signatures) > 0:
+                sigs = signatures[0].getElementsByTagName("Signature")
+        else:
+            creds = doc.getElementsByTagName("credential")
+        
+        if creds is None or len(creds) == 0:
+            # malformed cred file
+            raise CredentialNotVerifiable("Malformed XML: No credential tag found")
+
+        # Just take the first cred if there are more than one
+        cred = creds[0]
+
+        self.set_refid(cred.getAttribute("xml:id"))
+        self.set_expiration(utcparse(getTextNode(cred, "expires")))
+        self.gidCaller = GID(string=getTextNode(cred, "owner_gid"))
+        self.gidObject = GID(string=getTextNode(cred, "target_gid"))   
+
+
+        # Process privileges
+        privs = cred.getElementsByTagName("privileges")[0]
+        rlist = Rights()
+        for priv in privs.getElementsByTagName("privilege"):
+            kind = getTextNode(priv, "name")
+            deleg = str2bool(getTextNode(priv, "can_delegate"))
+            if kind == '*':
+                # Convert * into the default privileges for the credential's type
+                # Each inherits the delegatability from the * above
+                _ , type = urn_to_hrn(self.gidObject.get_urn())
+                rl = determine_rights(type, self.gidObject.get_urn())
+                for r in rl.rights:
+                    r.delegate = deleg
+                    rlist.add(r)
+            else:
+                rlist.add(Right(kind.strip(), deleg))
+        self.set_privileges(rlist)
+
+
+        # Is there a parent?
+        parent = cred.getElementsByTagName("parent")
+        if len(parent) > 0:
+            parent_doc = parent[0].getElementsByTagName("credential")[0]
+            parent_xml = parent_doc.toxml()
+            self.parent = Credential(string=parent_xml)
+            self.updateRefID()
+
+        # Assign the signatures to the credentials
+        for sig in sigs:
+            Sig = Signature(string=sig.toxml())
+
+            for cur_cred in self.get_credential_list():
+                if cur_cred.get_refid() == Sig.get_refid():
+                    cur_cred.set_signature(Sig)
+                                    
+            
+    ##
+    # Verify
+    #   trusted_certs: A list of trusted GID filenames (not GID objects!) 
+    #                  Chaining is not supported within the GIDs by xmlsec1.
+    #
+    #   trusted_certs_required: Should usually be true. Set False means an
+    #                 empty list of trusted_certs would still let this method pass.
+    #                 It just skips xmlsec1 verification et al. Only used by some utils
+    #    
+    # Verify that:
+    # . All of the signatures are valid and that the issuers trace back
+    #   to trusted roots (performed by xmlsec1)
+    # . The XML matches the credential schema
+    # . That the issuer of the credential is the authority in the target's urn
+    #    . In the case of a delegated credential, this must be true of the root
+    # . That all of the gids presented in the credential are valid
+    #    . Including verifying GID chains, and includ the issuer
+    # . The credential is not expired
+    #
+    # -- For Delegates (credentials with parents)
+    # . The privileges must be a subset of the parent credentials
+    # . The privileges must have "can_delegate" set for each delegated privilege
+    # . The target gid must be the same between child and parents
+    # . The expiry time on the child must be no later than the parent
+    # . The signer of the child must be the owner of the parent
+    #
+    # -- Verify does *NOT*
+    # . ensure that an xmlrpc client's gid matches a credential gid, that
+    #   must be done elsewhere
+    #
+    # @param trusted_certs: The certificates of trusted CA certificates
+    def verify(self, trusted_certs=None, schema=None, trusted_certs_required=True):
+        if not self.xml:
+            self.decode()
+
+        # validate against RelaxNG schema
+        if HAVELXML and not self.legacy:
+            if schema and os.path.exists(schema):
+                tree = etree.parse(StringIO(self.xml))
+                schema_doc = etree.parse(schema)
+                xmlschema = etree.XMLSchema(schema_doc)
+                if not xmlschema.validate(tree):
+                    error = xmlschema.error_log.last_error
+                    message = "%s: %s (line %s)" % (self.get_summary_tostring(), error.message, error.line)
+                    raise CredentialNotVerifiable(message)
+
+        if trusted_certs_required and trusted_certs is None:
+            trusted_certs = []
+
+#        trusted_cert_objects = [GID(filename=f) for f in trusted_certs]
+        trusted_cert_objects = []
+        ok_trusted_certs = []
+        # If caller explicitly passed in None that means skip cert chain validation.
+        # Strange and not typical
+        if trusted_certs is not None:
+            for f in trusted_certs:
+                try:
+                    # Failures here include unreadable files
+                    # or non PEM files
+                    trusted_cert_objects.append(GID(filename=f))
+                    ok_trusted_certs.append(f)
+                except Exception, exc:
+                    logger.error("Failed to load trusted cert from %s: %r", f, exc)
+            trusted_certs = ok_trusted_certs
+
+        # Use legacy verification if this is a legacy credential
+        if self.legacy:
+            self.legacy.verify_chain(trusted_cert_objects)
+            if self.legacy.client_gid:
+                self.legacy.client_gid.verify_chain(trusted_cert_objects)
+            if self.legacy.object_gid:
+                self.legacy.object_gid.verify_chain(trusted_cert_objects)
+            return True
+        
+        # make sure it is not expired
+        if self.get_expiration() < datetime.datetime.utcnow():
+            raise CredentialNotVerifiable("Credential %s expired at %s" % (self.get_summary_tostring(), self.expiration.isoformat()))
+
+        # Verify the signatures
+        filename = self.save_to_random_tmp_file()
+        if trusted_certs is not None:
+            cert_args = " ".join(['--trusted-pem %s' % x for x in trusted_certs])
+
+        # If caller explicitly passed in None that means skip cert chain validation.
+        # - Strange and not typical
+        if trusted_certs is not None:
+            # Verify the gids of this cred and of its parents
+            for cur_cred in self.get_credential_list():
+                cur_cred.get_gid_object().verify_chain(trusted_cert_objects)
+                cur_cred.get_gid_caller().verify_chain(trusted_cert_objects)
+
+        refs = []
+        refs.append("Sig_%s" % self.get_refid())
+
+        parentRefs = self.updateRefID()
+        for ref in parentRefs:
+            refs.append("Sig_%s" % ref)
+
+        for ref in refs:
+            # If caller explicitly passed in None that means skip xmlsec1 validation.
+            # Strange and not typical
+            if trusted_certs is None:
+                break
+
+#            print "Doing %s --verify --node-id '%s' %s %s 2>&1" % \
+#                (self.xmlsec_path, ref, cert_args, filename)
+            verified = os.popen('%s --verify --node-id "%s" %s %s 2>&1' \
+                            % (self.xmlsec_path, ref, cert_args, filename)).read()
+            if not verified.strip().startswith("OK"):
+                # xmlsec errors have a msg= which is the interesting bit.
+                mstart = verified.find("msg=")
+                msg = ""
+                if mstart > -1 and len(verified) > 4:
+                    mstart = mstart + 4
+                    mend = verified.find('\\', mstart)
+                    msg = verified[mstart:mend]
+                raise CredentialNotVerifiable("xmlsec1 error verifying cred %s using Signature ID %s: %s %s" % (self.get_summary_tostring(), ref, msg, verified.strip()))
+        os.remove(filename)
+
+        # Verify the parents (delegation)
+        if self.parent:
+            self.verify_parent(self.parent)
+
+        # Make sure the issuer is the target's authority, and is
+        # itself a valid GID
+        self.verify_issuer(trusted_cert_objects)
+        return True
+
+    ##
+    # Creates a list of the credential and its parents, with the root 
+    # (original delegated credential) as the last item in the list
+    def get_credential_list(self):    
+        cur_cred = self
+        list = []
+        while cur_cred:
+            list.append(cur_cred)
+            if cur_cred.parent:
+                cur_cred = cur_cred.parent
+            else:
+                cur_cred = None
+        return list
+    
+    ##
+    # Make sure the credential's target gid (a) was signed by or (b)
+    # is the same as the entity that signed the original credential,
+    # or (c) is an authority over the target's namespace.
+    # Also ensure that the credential issuer / signer itself has a valid
+    # GID signature chain (signed by an authority with namespace rights).
+    def verify_issuer(self, trusted_gids):
+        root_cred = self.get_credential_list()[-1]
+        root_target_gid = root_cred.get_gid_object()
+        root_cred_signer = root_cred.get_signature().get_issuer_gid()
+
+        # Case 1:
+        # Allow non authority to sign target and cred about target.
+        #
+        # Why do we need to allow non authorities to sign?
+        # If in the target gid validation step we correctly
+        # checked that the target is only signed by an authority,
+        # then this is just a special case of case 3.
+        # This short-circuit is the common case currently -
+        # and cause GID validation doesn't check 'authority',
+        # this allows users to generate valid slice credentials.
+        if root_target_gid.is_signed_by_cert(root_cred_signer):
+            # cred signer matches target signer, return success
+            return
+
+        # Case 2:
+        # Allow someone to sign credential about themeselves. Used?
+        # If not, remove this.
+        #root_target_gid_str = root_target_gid.save_to_string()
+        #root_cred_signer_str = root_cred_signer.save_to_string()
+        #if root_target_gid_str == root_cred_signer_str:
+        #    # cred signer is target, return success
+        #    return
+
+        # Case 3:
+
+        # root_cred_signer is not the target_gid
+        # So this is a different gid that we have not verified.
+        # xmlsec1 verified the cert chain on this already, but
+        # it hasn't verified that the gid meets the HRN namespace
+        # requirements.
+        # Below we'll ensure that it is an authority.
+        # But we haven't verified that it is _signed by_ an authority
+        # We also don't know if xmlsec1 requires that cert signers
+        # are marked as CAs.
+
+        # Note that if verify() gave us no trusted_gids then this
+        # call will fail. So skip it if we have no trusted_gids
+        if trusted_gids and len(trusted_gids) > 0:
+            root_cred_signer.verify_chain(trusted_gids)
+        else:
+            logger.debug("No trusted gids. Cannot verify that cred signer is signed by a trusted authority. Skipping that check.")
+
+        # See if the signer is an authority over the domain of the target.
+        # There are multiple types of authority - accept them all here
+        # Maybe should be (hrn, type) = urn_to_hrn(root_cred_signer.get_urn())
+        root_cred_signer_type = root_cred_signer.get_type()
+        if (root_cred_signer_type.find('authority') == 0):
+            #logger.debug('Cred signer is an authority')
+            # signer is an authority, see if target is in authority's domain
+            signerhrn = root_cred_signer.get_hrn()
+            if hrn_authfor_hrn(signerhrn, root_target_gid.get_hrn()):
+                return
+
+        # We've required that the credential be signed by an authority
+        # for that domain. Reasonable and probably correct.
+        # A looser model would also allow the signer to be an authority
+        # in my control framework - eg My CA or CH. Even if it is not
+        # the CH that issued these, eg, user credentials.
+
+        # Give up, credential does not pass issuer verification
+
+        raise CredentialNotVerifiable("Could not verify credential owned by %s for object %s. Cred signer %s not the trusted authority for Cred target %s" % (self.gidCaller.get_urn(), self.gidObject.get_urn(), root_cred_signer.get_hrn(), root_target_gid.get_hrn()))
+
+
+    ##
+    # -- For Delegates (credentials with parents) verify that:
+    # . The privileges must be a subset of the parent credentials
+    # . The privileges must have "can_delegate" set for each delegated privilege
+    # . The target gid must be the same between child and parents
+    # . The expiry time on the child must be no later than the parent
+    # . The signer of the child must be the owner of the parent        
+    def verify_parent(self, parent_cred):
+        # make sure the rights given to the child are a subset of the
+        # parents rights (and check delegate bits)
+        if not parent_cred.get_privileges().is_superset(self.get_privileges()):
+            raise ChildRightsNotSubsetOfParent(("Parent cred ref %s rights " % parent_cred.get_refid()) +
+                self.parent.get_privileges().save_to_string() + (" not superset of delegated cred %s ref %s rights " % (self.get_summary_tostring(), self.get_refid())) +
+                self.get_privileges().save_to_string())
+
+        # make sure my target gid is the same as the parent's
+        if not parent_cred.get_gid_object().save_to_string() == \
+           self.get_gid_object().save_to_string():
+            raise CredentialNotVerifiable("Delegated cred %s: Target gid not equal between parent and child. Parent %s" % (self.get_summary_tostring(), parent_cred.get_summary_tostring()))
+
+        # make sure my expiry time is <= my parent's
+        if not parent_cred.get_expiration() >= self.get_expiration():
+            raise CredentialNotVerifiable("Delegated credential %s expires after parent %s" % (self.get_summary_tostring(), parent_cred.get_summary_tostring()))
+
+        # make sure my signer is the parent's caller
+        if not parent_cred.get_gid_caller().save_to_string(False) == \
+           self.get_signature().get_issuer_gid().save_to_string(False):
+            raise CredentialNotVerifiable("Delegated credential %s not signed by parent %s's caller" % (self.get_summary_tostring(), parent_cred.get_summary_tostring()))
+                
+        # Recurse
+        if parent_cred.parent:
+            parent_cred.verify_parent(parent_cred.parent)
+
+
+    def delegate(self, delegee_gidfile, caller_keyfile, caller_gidfile):
+        """
+        Return a delegated copy of this credential, delegated to the 
+        specified gid's user.    
+        """
+        # get the gid of the object we are delegating
+        object_gid = self.get_gid_object()
+        object_hrn = object_gid.get_hrn()        
+        # the hrn of the user who will be delegated to
+        delegee_gid = GID(filename=delegee_gidfile)
+        delegee_hrn = delegee_gid.get_hrn()
+  
+        #user_key = Keypair(filename=keyfile)
+        #user_hrn = self.get_gid_caller().get_hrn()
+        subject_string = "%s delegated to %s" % (object_hrn, delegee_hrn)
+        dcred = Credential(subject=subject_string)
+        dcred.set_gid_caller(delegee_gid)
+        dcred.set_gid_object(object_gid)
+        dcred.set_parent(self)
+        dcred.set_expiration(self.get_expiration())
+        dcred.set_privileges(self.get_privileges())
+        dcred.get_privileges().delegate_all_privileges(True)
+        #dcred.set_issuer_keys(keyfile, delegee_gidfile)
+        dcred.set_issuer_keys(caller_keyfile, caller_gidfile)
+        dcred.encode()
+        dcred.sign()
+
+        return dcred
+
+    # only informative
+    def get_filename(self):
+        return getattr(self,'filename',None)
+
+    ##
+    # Dump the contents of a credential to stdout in human-readable format
+    #
+    # @param dump_parents If true, also dump the parent certificates
+    def dump (self, *args, **kwargs):
+        print self.dump_string(*args, **kwargs)
+
+
+    def dump_string(self, dump_parents=False):
+        result=""
+        result += "CREDENTIAL %s\n" % self.get_subject()
+        filename=self.get_filename()
+        if filename: result += "Filename %s\n"%filename
+        result += "      privs: %s\n" % self.get_privileges().save_to_string()
+        gidCaller = self.get_gid_caller()
+        if gidCaller:
+            result += "  gidCaller:\n"
+            result += gidCaller.dump_string(8, dump_parents)
+
+        if self.get_signature():
+            print "  gidIssuer:"
+            self.get_signature().get_issuer_gid().dump(8, dump_parents)
+
+        gidObject = self.get_gid_object()
+        if gidObject:
+            result += "  gidObject:\n"
+            result += gidObject.dump_string(8, dump_parents)
+
+        if self.parent and dump_parents:
+            result += "\nPARENT"
+            result += self.parent.dump_string(True)
+
+        return result
index 8ba90b2..e66e699 100644 (file)
@@ -5,14 +5,10 @@
 # certificate that stores a tuple of parameters.
 ##
 
-### $Id: credential.py 17477 2010-03-25 16:49:34Z jkarlin $
-### $URL: svn+ssh://svn.planet-lab.org/svn/sfa/branches/geni-api/sfa/trust/credential.py $
-
 import xmlrpclib
 
-from sfa.util.faults import *
+from sfa.util.faults import MissingDelegateBit, ChildRightsNotSubsetOfParent
 from sfa.trust.certificate import Certificate
-from sfa.trust.rights import Right,Rights
 from sfa.trust.gid import GID
 
 ##
index 15ad6bf..656de4b 100644 (file)
@@ -30,7 +30,7 @@ import uuid
 
 from sfa.trust.certificate import Certificate
 
-from sfa.util.faults import *
+from sfa.util.faults import GidInvalidParentHrn, GidParentHrn
 from sfa.util.sfalogging import logger
 from sfa.util.xrn import hrn_to_urn, urn_to_hrn, hrn_authfor_hrn
 
index 6323436..9648c9d 100644 (file)
 
 import os
 
-from sfa.util.faults import *
+from sfa.util.faults import MissingAuthority
 from sfa.util.sfalogging import logger
 from sfa.util.xrn import get_leaf, get_authority, hrn_to_urn, urn_to_hrn
 from sfa.trust.certificate import Keypair
 from sfa.trust.credential import Credential
 from sfa.trust.gid import GID, create_uuid
 from sfa.util.config import Config
-from sfa.util.sfaticket import SfaTicket
+from sfa.trust.sfaticket import SfaTicket
 
 ##
 # The AuthInfo class contains the information for an authority. This information
@@ -204,7 +204,7 @@ class Hierarchy:
     def get_auth_info(self, xrn):
         hrn, type = urn_to_hrn(xrn)
         if not self.auth_exists(hrn):
-            logger.warning("Hierarchy: mising authority - xrn=%s, hrn=%s"%(xrn,hrn))
+            logger.warning("Hierarchy: missing authority - xrn=%s, hrn=%s"%(xrn,hrn))
             raise MissingAuthority(hrn)
 
         (directory, gid_filename, privkey_filename, dbinfo_filename) = \
similarity index 98%
rename from sfa/util/sfaticket.py
rename to sfa/trust/sfaticket.py
index 0be5d93..018d929 100644 (file)
@@ -5,8 +5,7 @@
 import xmlrpclib
 
 from sfa.trust.certificate import Certificate
-from sfa.trust.rights import *
-from sfa.trust.gid import *
+from sfa.trust.gid import GID
 
 # Ticket is tuple:
 #   (gidCaller, gidObject, attributes, rspec, delegate)
index 19cd4d0..f16f48d 100644 (file)
@@ -21,7 +21,7 @@ import sys
 try: import pgdb
 except: print >> sys.stderr, "WARNING, could not import pgdb"
 
-from sfa.util.faults import *
+from sfa.util.faults import SfaDBError
 from sfa.util.sfalogging import logger
 
 if not psycopg2:
diff --git a/sfa/util/bwlimit.py b/sfa/util/bwlimit.py
deleted file mode 100644 (file)
index 62d3cca..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-# Taken from bwlimit.py
-#
-# See tc_util.c and http://physics.nist.gov/cuu/Units/binary.html. Be
-# warned that older versions of tc interpret "kbps", "mbps", "mbit",
-# and "kbit" to mean (in this system) "kibps", "mibps", "mibit", and
-# "kibit" and that if an older version is installed, all rates will
-# be off by a small fraction.
-suffixes = {
-    "":         1,
-    "bit":  1,
-    "kibit":    1024,
-    "kbit": 1000,
-    "mibit":    1024*1024,
-    "mbit": 1000000,
-    "gibit":    1024*1024*1024,
-    "gbit": 1000000000,
-    "tibit":    1024*1024*1024*1024,
-    "tbit": 1000000000000,
-    "bps":  8,
-    "kibps":    8*1024,
-    "kbps": 8000,
-    "mibps":    8*1024*1024,
-    "mbps": 8000000,
-    "gibps":    8*1024*1024*1024,
-    "gbps": 8000000000,
-    "tibps":    8*1024*1024*1024*1024,
-    "tbps": 8000000000000
-}
-
-def get_tc_rate(s):
-    """
-    Parses an integer or a tc rate string (e.g., 1.5mbit) into bits/second
-    """
-
-    if type(s) == int:
-        return s
-    m = re.match(r"([0-9.]+)(\D*)", s)
-    if m is None:
-        return -1
-    suffix = m.group(2).lower()
-    if suffixes.has_key(suffix):
-        return int(float(m.group(1)) * suffixes[suffix])
-    else:
-        return -1
-
-def format_tc_rate(rate):
-    """
-    Formats a bits/second rate into a tc rate string
-    """
-
-    if rate >= 1000000000 and (rate % 1000000000) == 0:
-        return "%.0fgbit" % (rate / 1000000000.)
-    elif rate >= 1000000 and (rate % 1000000) == 0:
-        return "%.0fmbit" % (rate / 1000000.)
-    elif rate >= 1000:
-        return "%.0fkbit" % (rate / 1000.)
-    else:
-        return "%.0fbit" % rate
index 0383ccc..a2ded4a 100644 (file)
@@ -82,9 +82,18 @@ class Cache:
            
     def get(self, key):
         data = self.cache.get(key)
-        if not data or data.is_expired():
-            return None 
-        return data.get_data()
+        if not data:  
+            data = None
+        elif data.is_expired():
+            self.pop(key)
+            data = None 
+        else:
+            data = data.get_data()
+        return data
+
+    def pop(self, key):
+        if key in self.cache:
+            self.cache.pop(key) 
 
     def dump(self):
         result = {}
diff --git a/sfa/util/componentserver.py b/sfa/util/componentserver.py
deleted file mode 100644 (file)
index 9fa40f9..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-##
-# This module implements a general-purpose server layer for sfa.
-# The same basic server should be usable on the registry, component, or
-# other interfaces.
-#
-# TODO: investigate ways to combine this with existing PLC server?
-##
-
-### $Id$
-### $URL$
-
-import sys
-import traceback
-import threading
-import socket, os
-import SocketServer
-import BaseHTTPServer
-import SimpleHTTPServer
-import SimpleXMLRPCServer
-from OpenSSL import SSL
-
-from sfa.util.sfalogging import logger
-from sfa.trust.certificate import Keypair, Certificate
-from sfa.trust.credential import *
-from sfa.util.faults import *
-from sfa.plc.api import ComponentAPI 
-from sfa.util.server import verify_callback, ThreadedServer 
-
-
-##
-# taken from the web (XXX find reference). Implents HTTPS xmlrpc request handler
-
-class SecureXMLRpcRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
-    """Secure XML-RPC request handler class.
-
-    It it very similar to SimpleXMLRPCRequestHandler but it uses HTTPS for transporting XML data.
-    """
-    def setup(self):
-        self.connection = self.request
-        self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
-        self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
-
-    def do_POST(self):
-        """Handles the HTTPS POST request.
-
-        It was copied out from SimpleXMLRPCServer.py and modified to shutdown the socket cleanly.
-        """
-        try:
-            peer_cert = Certificate()
-            peer_cert.load_from_pyopenssl_x509(self.connection.get_peer_certificate())
-            self.api = ComponentAPI(peer_cert = peer_cert, 
-                           interface = self.server.interface, 
-                           key_file = self.server.key_file, 
-                           cert_file = self.server.cert_file)
-            # get arguments
-            request = self.rfile.read(int(self.headers["content-length"]))
-            # In previous versions of SimpleXMLRPCServer, _dispatch
-            # could be overridden in this class, instead of in
-            # SimpleXMLRPCDispatcher. To maintain backwards compatibility,
-            # check to see if a subclass implements _dispatch and dispatch
-            # using that method if present.
-            #response = self.server._marshaled_dispatch(request, getattr(self, '_dispatch', None))
-            # XX TODO: Need to get the real remote address
-            remote_addr = (remote_ip, remote_port) = self.connection.getpeername()
-            self.api.remote_addr = remote_addr
-            #remote_addr = (self.rfile.connection.remote_ip, remote_port)
-            #self.api.remote_addr = remote_addr
-            response = self.api.handle(remote_addr, request)
-
-        
-        except Exception, fault:
-            raise
-            # This should only happen if the module is buggy
-            # internal error, report as HTTP server error
-            self.send_response(500)
-            self.end_headers()
-            logger.log_exc("componentserver.SecureXMLRpcRequestHandler.do_POST")
-        else:
-            # got a valid XML RPC response
-            self.send_response(200)
-            self.send_header("Content-type", "text/xml")
-            self.send_header("Content-length", str(len(response)))
-            self.end_headers()
-            self.wfile.write(response)
-
-            # shut down the connection
-            self.wfile.flush()
-            self.connection.shutdown() # Modified here!
-
-##
-# Implements an HTTPS XML-RPC server. Generally it is expected that SFA
-# functions will take a credential string, which is passed to
-# decode_authentication. Decode_authentication() will verify the validity of
-# the credential, and verify that the user is using the key that matches the
-# GID supplied in the credential.
-
-class ComponentServer(threading.Thread):
-
-    ##
-    # Create a new SfaServer object.
-    #
-    # @param ip the ip address to listen on
-    # @param port the port to listen on
-    # @param key_file private key filename of registry
-    # @param cert_file certificate filename containing public key 
-    #   (could be a GID file)
-
-    def __init__(self, ip, port, key_file, cert_file, api=None):
-        threading.Thread.__init__(self)
-        self.key = Keypair(filename = key_file)
-        self.cert = Certificate(filename = cert_file)
-        self.server = ThreadedServer((ip, port), SecureXMLRpcRequestHandler, key_file, cert_file)
-        self.trusted_cert_list = None
-        self.register_functions()
-
-
-    ##
-    # Register functions that will be served by the XMLRPC server. This
-    # function should be overrided by each descendant class.
-
-    def register_functions(self):
-        self.server.register_function(self.noop)
-
-    ##
-    # Sample no-op server function. The no-op function decodes the credential
-    # that was passed to it.
-
-    def noop(self, cred, anything):
-        self.decode_authentication(cred)
-
-        return anything
-
-    ##
-    # Execute the server, serving requests forever. 
-
-    def run(self):
-        self.server.serve_forever()
-
-
index 42d2c87..1466916 100644 (file)
@@ -14,9 +14,6 @@
 # Note that SFA does not access any of the PLC databases directly via
 # a mysql connection; All PLC databases are accessed via PLCAPI.
 
-### $Id$
-### $URL$
-
 import os.path
 import traceback
 
@@ -79,7 +76,7 @@ class Config:
                 except: pass
              
         except IOError, e:
-            raise IOError, "Could not find the configuration file: %s" % config_file
+            raise IOError, "Could not find or load the configuration file: %s" % config_file
 
     def get_trustedroots_dir(self):
         return self.config_path + os.sep + 'trusted_roots'
diff --git a/sfa/util/defaultdict.py b/sfa/util/defaultdict.py
new file mode 100644 (file)
index 0000000..e0dd145
--- /dev/null
@@ -0,0 +1,38 @@
+# from http://code.activestate.com/recipes/523034/
+try:
+    from collections import defaultdict
+except:
+    class defaultdict(dict):
+        def __init__(self, default_factory=None, *a, **kw):
+            if (default_factory is not None and
+                not hasattr(default_factory, '__call__')):
+                raise TypeError('first argument must be callable')
+            dict.__init__(self, *a, **kw)
+            self.default_factory = default_factory
+        def __getitem__(self, key):
+            try:
+                return dict.__getitem__(self, key)
+            except KeyError:
+                return self.__missing__(key)
+        def __missing__(self, key):
+            if self.default_factory is None:
+                raise KeyError(key)
+            self[key] = value = self.default_factory()
+            return value
+        def __reduce__(self):
+            if self.default_factory is None:
+                args = tuple()
+            else:
+                args = self.default_factory,
+            return type(self), args, None, None, self.items()
+        def copy(self):
+            return self.__copy__()
+        def __copy__(self):
+            return type(self)(self.default_factory, self)
+        def __deepcopy__(self, memo):
+            import copy
+            return type(self)(self.default_factory,
+                              copy.deepcopy(self.items()))
+        def __repr__(self):
+            return 'defaultdict(%s, %s)' % (self.default_factory,
+                                            dict.__repr__(self))
index 344da1c..91e5300 100644 (file)
@@ -278,6 +278,22 @@ class InvalidRSpecElement(SfaFault):
     def __str__(self):
         return repr(self.value)
 
+class InvalidXML(SfaFault):
+    def __init__(self, value, extra = None):
+        self.value = value
+        faultString = "Invalid XML Document: %(value)s" % locals()
+        SfaFault.__init__(self, 108, faultString, extra)
+    def __str__(self):
+        return repr(self.value)
+
+class InvalidXMLElement(SfaFault):
+    def __init__(self, value, extra = None):
+        self.value = value
+        faultString = "Invalid XML Element: %(value)s" % locals()
+        SfaFault.__init__(self, 108, faultString, extra)
+    def __str__(self):
+        return repr(self.value)
+
 class AccountNotEnabled(SfaFault):
     def __init__(self,  extra = None):
         faultString = "Account Disabled"
index ada44ba..8f037ca 100644 (file)
@@ -5,11 +5,10 @@ except NameError:
     from sets import Set
     set = Set
 
-import time
 try: import pgdb
 except: pass
  
-from sfa.util.faults import *
+from sfa.util.faults import SfaInvalidArgument
 from sfa.util.parameter import Parameter, Mixed, python_type
 
 
@@ -128,7 +127,7 @@ class Filter(Parameter, dict):
 
            for char in modifiers.keys():
                if field[0] == char:
-                   modifiers[char]=True;
+                   modifiers[char]=True
                    field = field[1:]
                    break
 
diff --git a/sfa/util/httpsProtocol.py b/sfa/util/httpsProtocol.py
deleted file mode 100644 (file)
index e6c6be1..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-import httplib
-import socket
-import sys
-
-
-def is_python26():
-    return False
-    #return sys.version_info[0] == 2 and sys.version_info[1] == 6
-
-# wrapper around standartd https modules. Properly supports timeouts.  
-
-class HTTPSConnection(httplib.HTTPSConnection):
-    def __init__(self, host, port=None, key_file=None, cert_file=None,
-                 strict=None, timeout = None):
-        httplib.HTTPSConnection.__init__(self, host, port, key_file, cert_file, strict)
-        if timeout:
-            timeout = float(timeout)
-        self.timeout = timeout
-
-    def connect(self):
-        """Connect to a host on a given (SSL) port."""
-        if is_python26():
-            from sfa.util.ssl_socket import SSLSocket
-            sock = socket.create_connection((self.host, self.port), self.timeout)
-            if self._tunnel_host:
-                self.sock = sock
-                self._tunnel()
-            self.sock = SSLSocket(sock, self.key_file, self.cert_file)
-        else:
-            sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
-            sock.settimeout(self.timeout)
-            sock.connect((self.host, self.port))
-            ssl = socket.ssl(sock, self.key_file, self.cert_file)
-            self.sock = httplib.FakeSocket(sock, ssl)
-
-class HTTPS(httplib.HTTPS):
-    def __init__(self, host='', port=None, key_file=None, cert_file=None,
-                     strict=None, timeout = None):
-        # urf. compensate for bad input.
-        if port == 0:
-            port = None
-        self._setup(HTTPSConnection(host, port, key_file, cert_file, strict, timeout))
-
-        # we never actually use these for anything, but we keep them
-        # here for compatibility with post-1.5.2 CVS.
-        self.key_file = key_file
-        self.cert_file = cert_file
-    
-    def set_timeout(self, timeout):
-        if is_python26():
-            self._conn.timeout = timeout
index 4c37c67..97ddb39 100644 (file)
@@ -3,18 +3,14 @@
 #
 #
 
-import os, time
-from types import *
-from types import StringTypes
-import traceback
+import time
+from types import IntType, LongType, StringTypes
 import textwrap
-import xmlrpclib
 
 
 from sfa.util.sfalogging import logger
-from sfa.util.faults import 
+from sfa.util.faults import SfaFault, SfaInvalidAPIMethod, SfaInvalidArgumentCount, SfaInvalidArgument
 from sfa.util.parameter import Parameter, Mixed, python_type, xmlrpc_type
-from sfa.trust.auth import Auth
 
 class Method:
     """
index 00d9319..e520bfc 100644 (file)
@@ -4,14 +4,9 @@
 # Mark Huang <mlhuang@cs.princeton.edu>
 # Copyright (C) 2006 The Trustees of Princeton University
 #
-# $Id$
-#
-
-### $Id$
-### $URL$
 
-from types import *
-from sfa.util.faults import *
+from types import NoneType, IntType, LongType, FloatType, StringTypes, DictType, TupleType, ListType
+from sfa.util.faults import SfaAPIError
 
 class Parameter:
     """
index dacdd51..f292823 100644 (file)
@@ -26,7 +26,7 @@ class PlXrn (Xrn):
     def site_hrn (auth, login_base):
         return '.'.join([auth,login_base])
 
-    def __init__ (self, auth=None, hostname=None, slicename=None, email=None, **kwargs):
+    def __init__ (self, auth=None, hostname=None, slicename=None, email=None, interface=None, **kwargs):
         #def hostname_to_hrn(auth_hrn, login_base, hostname):
         if hostname is not None:
             self.type='node'
@@ -48,6 +48,10 @@ class PlXrn (Xrn):
             # keep only the part before '@' and replace special chars into _
             self.hrn='.'.join([auth,email.split('@')[0].replace(".", "_").replace("+", "_")])
             self.hrn_to_urn()
+        elif interface is not None:
+            self.type = 'interface'
+            self.hrn = auth + '.' + interface
+            self.hrn_to_urn()
         else:
             Xrn.__init__ (self,**kwargs)
 
@@ -63,6 +67,10 @@ class PlXrn (Xrn):
         self._normalize()
         return self.authority[-1]
 
+    def interface_name(self):
+        self._normalize()
+        return self.leaf
+
     #def hrn_to_pl_login_base(hrn):
     def pl_login_base (self):
         self._normalize()
index aa68f43..5e43be5 100644 (file)
@@ -1,9 +1,6 @@
-### $Id$
-### $URL$
-
 import os
 
-from sfa.util.storage import *
+from sfa.util.storage import SimpleStorage
 
 class Policy(SimpleStorage):
 
index 8a50a65..7ebf379 100644 (file)
@@ -4,17 +4,14 @@
 # TODO: Use existing PLC database methods? or keep this separate?
 ##
 
-### $Id$
-### $URL$
-
 from types import StringTypes
 
-from sfa.trust.gid import *
+from sfa.trust.gid import GID
 
-from sfa.util.rspec import *
-from sfa.util.parameter import *
+from sfa.util.parameter import Parameter
 from sfa.util.xrn import get_authority
 from sfa.util.row import Row
+from sfa.util.xml import XML 
 
 class SfaRecord(Row):
     """ 
@@ -282,6 +279,7 @@ class SfaRecord(Row):
         """
         Load the record from a dictionary 
         """
+
         self.set_name(dict['hrn'])
         gidstr = dict.get("gid", None)
         if gidstr:
@@ -304,10 +302,9 @@ class SfaRecord(Row):
         """
         recorddict = self.as_dict()
         filteredDict = dict([(key, val) for (key, val) in recorddict.iteritems() if key in self.fields.keys()])
-        record = RecordSpec()
-        record.parseDict(filteredDict)
+        record = XML('<record/>')
+        record.parse_dict(filteredDict)
         str = record.toxml()
-        #str = xmlrpclib.dumps((dict,), allow_none=True)
         return str
 
     ##
@@ -320,12 +317,9 @@ class SfaRecord(Row):
         representation of the record.
         """
         #dict = xmlrpclib.loads(str)[0][0]
-        
-        record = RecordSpec()
-        record.parseString(str)
-        record_dict = record.toDict()
-        sfa_dict = record_dict['record']
-        self.load_from_dict(sfa_dict)
+
+        record = XML(str)
+        self.load_from_dict(record.todict())
 
     ##
     # Dump the record to stdout
diff --git a/sfa/util/rspec.py b/sfa/util/rspec.py
deleted file mode 100644 (file)
index 1f6f228..0000000
+++ /dev/null
@@ -1,421 +0,0 @@
-import sys
-import pprint
-import os
-from StringIO import StringIO
-from types import StringTypes, ListType
-import httplib
-from xml.dom import minidom
-from lxml import etree
-import codecs 
-from sfa.util.sfalogging import info_logger
-
-class RSpec:
-
-    def __init__(self, xml = None, xsd = None, NSURL = None):
-        '''
-        Class to manipulate RSpecs.  Reads and parses rspec xml into python dicts
-        and reads python dicts and writes rspec xml
-
-        self.xsd = # Schema.  Can be local or remote file.
-        self.NSURL = # If schema is remote, Name Space URL to query (full path minus filename)
-        self.rootNode = # root of the DOM
-        self.dict = # dict of the RSpec.
-        self.schemaDict = {} # dict of the Schema
-        '''
-        self.xsd = xsd
-        self.rootNode = None
-        self.dict = {}
-        self.schemaDict = {}
-        self.NSURL = NSURL 
-        if xml:
-            if type(xml) == file:
-                self.parseFile(xml)
-            if type(xml) in StringTypes:
-                self.parseString(xml)
-            self.dict = self.toDict() 
-        if xsd:
-            self._parseXSD(self.NSURL + self.xsd)
-
-
-    def _getText(self, nodelist):
-        rc = ""
-        for node in nodelist:
-            if node.nodeType == node.TEXT_NODE:
-                rc = rc + node.data
-        return rc
-  
-    # The rspec is comprised of 2 parts, and 1 reference:
-    # attributes/elements describe individual resources
-    # complexTypes are used to describe a set of attributes/elements
-    # complexTypes can include a reference to other complexTypes.
-  
-  
-    def _getName(self, node):
-        '''Gets name of node. If tag has no name, then return tag's localName'''
-        name = None
-        if not node.nodeName.startswith("#"):
-            if node.localName:
-                name = node.localName
-            elif node.attributes.has_key("name"):
-                name = node.attributes.get("name").value
-        return name     
-    # Attribute.  {name : nameofattribute, {items: values})
-    def _attributeDict(self, attributeDom):
-        '''Traverse single attribute node.  Create a dict {attributename : {name: value,}]}'''
-        node = {} # parsed dict
-        for attr in attributeDom.attributes.keys():
-            node[attr] = attributeDom.attributes.get(attr).value
-        return node
-  
-    def appendToDictOrCreate(self, dict, key, value):
-        if (dict.has_key(key)):
-            dict[key].append(value)
-        else:
-            dict[key]=[value]
-        return dict
-
-    def toGenDict(self, nodeDom=None, parentdict=None, siblingdict={}, parent=None):
-        """
-        convert an XML to a nested dict:
-          * Non-terminal nodes (elements with string children and attributes) are simple dictionaries
-          * Terminal nodes (the rest) are nested dictionaries
-        """
-
-        if (not nodeDom):
-            nodeDom=self.rootNode
-
-        curNodeName = nodeDom.localName
-
-        if (nodeDom.hasChildNodes()):
-            childdict={}
-            for attribute in nodeDom.attributes.keys():
-                childdict = self.appendToDictOrCreate(childdict, attribute, nodeDom.getAttribute(attribute))
-            for child in nodeDom.childNodes[:-1]:
-                if (child.nodeValue):
-                    siblingdict = self.appendToDictOrCreate(siblingdict, curNodeName, child.nodeValue)
-                else:
-                    childdict = self.toGenDict(child, None, childdict, curNodeName)
-
-            child = nodeDom.childNodes[-1]
-            if (child.nodeValue):
-                siblingdict = self.appendToDictOrCreate(siblingdict, curNodeName, child.nodeValue)
-                if (childdict):
-                    siblingdict = self.appendToDictOrCreate(siblingdict, curNodeName, childdict)
-            else:
-                siblingdict = self.toGenDict(child, siblingdict, childdict, curNodeName)
-        else:
-            childdict={}
-            for attribute in nodeDom.attributes.keys():
-                childdict = self.appendToDictOrCreate(childdict, attribute, nodeDom.getAttribute(attribute))
-
-            self.appendToDictOrCreate(siblingdict, curNodeName, childdict)
-            
-        if (parentdict is not None):
-            parentdict = self.appendToDictOrCreate(parentdict, parent, siblingdict)
-            return parentdict
-        else:
-            return siblingdict
-
-
-
-    def toDict(self, nodeDom = None):
-        """
-        convert this rspec to a dict and return it.
-        """
-        node = {}
-        if not nodeDom:
-             nodeDom = self.rootNode
-  
-        elementName = nodeDom.nodeName
-        if elementName and not elementName.startswith("#"):
-            # attributes have tags and values.  get {tag: value}, else {type: value}
-            node[elementName] = self._attributeDict(nodeDom)
-            # resolve the child nodes.
-            if nodeDom.hasChildNodes():
-                for child in nodeDom.childNodes:
-                    childName = self._getName(child)
-                    
-                    # skip null children
-                    if not childName: continue
-
-                    # initialize the possible array of children
-                    if not node[elementName].has_key(childName): node[elementName][childName] = []
-
-                    if isinstance(child, minidom.Text):
-                        # add if data is not empty
-                        if child.data.strip():
-                            node[elementName][childName].append(nextchild.data)
-                    elif child.hasChildNodes() and isinstance(child.childNodes[0], minidom.Text):
-                        for nextchild in child.childNodes:  
-                            node[elementName][childName].append(nextchild.data)
-                    else:
-                        childdict = self.toDict(child)
-                        for value in childdict.values():
-                            node[elementName][childName].append(value)
-
-        return node
-
-  
-    def toxml(self):
-        """
-        convert this rspec to an xml string and return it.
-        """
-        return self.rootNode.toxml()
-
-  
-    def toprettyxml(self):
-        """
-        print this rspec in xml in a pretty format.
-        """
-        return self.rootNode.toprettyxml()
-
-  
-    def __removeWhitespaceNodes(self, parent):
-        for child in list(parent.childNodes):
-            if child.nodeType == minidom.Node.TEXT_NODE and child.data.strip() == '':
-                parent.removeChild(child)
-            else:
-                self.__removeWhitespaceNodes(child)
-
-    def parseFile(self, filename):
-        """
-        read a local xml file and store it as a dom object.
-        """
-        dom = minidom.parse(filename)
-        self.__removeWhitespaceNodes(dom)
-        self.rootNode = dom.childNodes[0]
-
-
-    def parseString(self, xml):
-        """
-        read an xml string and store it as a dom object.
-        """
-       print>>sys.stderr, "\r\n \t RSPEC.PY parseString xml \r\n %s " %(xml)
-       #xmlUnicode = unicode( xml, 'utf-8' )
-       xml = (xml.encode("utf-8"))
-        dom = minidom.parseString(xml)
-       print>>sys.stderr, "\r\n \t RSPEC.PY OKKK parseString dom \r\n %s " %(dom)
-        self.__removeWhitespaceNodes(dom)
-        self.rootNode = dom.childNodes[0]
-
-    def _httpGetXSD(self, xsdURI):
-        # split the URI into relevant parts
-        host = xsdURI.split("/")[2]
-        if xsdURI.startswith("https"):
-            conn = httplib.HTTPSConnection(host,
-                httplib.HTTPSConnection.default_port)
-        elif xsdURI.startswith("http"):
-            conn = httplib.HTTPConnection(host,
-                httplib.HTTPConnection.default_port)
-        conn.request("GET", xsdURI)
-        # If we can't download the schema, raise an exception
-        r1 = conn.getresponse()
-        if r1.status != 200: 
-            raise Exception
-        return r1.read().replace('\n', '').replace('\t', '').strip() 
-
-
-    def _parseXSD(self, xsdURI):
-        """
-        Download XSD from URL, or if file, read local xsd file and set
-        schemaDict.
-        
-        Since the schema definiton is a global namespace shared by and
-        agreed upon by others, this should probably be a URL.  Check
-        for URL, download xsd, parse, or if local file, use that.
-        """
-        schemaDom = None
-        if xsdURI.startswith("http"):
-            try: 
-                schemaDom = minidom.parseString(self._httpGetXSD(xsdURI))
-            except Exception, e:
-                # logging.debug("%s: web file not found" % xsdURI)
-                # logging.debug("Using local file %s" % self.xsd")
-                info_logger.log_exc("rspec.parseXSD: can't find %s on the web. Continuing." % xsdURI)
-        if not schemaDom:
-            if os.path.exists(xsdURI):
-                # logging.debug("using local copy.")
-                info_logger.debug("rspec.parseXSD: Using local %s" % xsdURI)
-                schemaDom = minidom.parse(xsdURI)
-            else:
-                raise Exception("rspec.parseXSD: can't find xsd locally")
-        self.schemaDict = self.toDict(schemaDom.childNodes[0])
-
-
-    def dict2dom(self, rdict, include_doc = False):
-        """
-        convert a dict object into a dom object.
-        """
-     
-        def elementNode(tagname, rd):
-            element = minidom.Element(tagname)
-            for key in rd.keys():
-                if isinstance(rd[key], StringTypes) or isinstance(rd[key], int):
-                    element.setAttribute(key, unicode(rd[key]))
-                elif isinstance(rd[key], dict):
-                    child = elementNode(key, rd[key])
-                    element.appendChild(child)
-                elif isinstance(rd[key], list):
-                    for item in rd[key]:
-                        if isinstance(item, dict):
-                            child = elementNode(key, item)
-                            element.appendChild(child)
-                        elif isinstance(item, StringTypes) or isinstance(item, int):
-                            child = minidom.Element(key)
-                            text = minidom.Text()
-                            text.data = item
-                            child.appendChild(text)
-                            element.appendChild(child) 
-            return element
-        
-        # Minidom does not allow documents to have more then one
-        # child, but elements may have many children. Because of
-        # this, the document's root node will be the first key/value
-        # pair in the dictionary.  
-        node = elementNode(rdict.keys()[0], rdict.values()[0])
-        if include_doc:
-            rootNode = minidom.Document()
-            rootNode.appendChild(node)
-        else:
-            rootNode = node
-        return rootNode
-
-    def parseDict(self, rdict, include_doc = True):
-        """
-        Convert a dictionary into a dom object and store it.
-        """
-        self.rootNode = self.dict2dom(rdict, include_doc).childNodes[0]
-    def getDictsByTagName(self, tagname, dom = None):
-        """
-        Search the dom for all elements with the specified tagname
-        and return them as a list of dicts
-        """
-        if not dom:
-            dom = self.rootNode
-        dicts = []
-        doms = dom.getElementsByTagName(tagname)
-        dictlist = [self.toDict(d) for d in doms]
-        for item in dictlist:
-            for value in item.values():
-                dicts.append(value)
-        return dicts
-
-    def getDictByTagNameValue(self, tagname, value, dom = None):
-        """
-        Search the dom for the first element with the specified tagname
-        and value and return it as a dict.
-        """
-        tempdict = {}
-        if not dom:
-            dom = self.rootNode
-        dicts = self.getDictsByTagName(tagname, dom)
-        
-        for rdict in dicts:
-            if rdict.has_key('name') and rdict['name'] in [value]:
-                return rdict
-              
-        return tempdict
-
-
-    def filter(self, tagname, attribute, blacklist = [], whitelist = [], dom = None):
-        """
-        Removes all elements where:
-        1. tagname matches the element tag
-        2. attribute matches the element attribte
-        3. attribute value is in valuelist  
-        """
-
-        tempdict = {}
-        if not dom:
-            dom = self.rootNode
-       
-        if dom.localName in [tagname] and dom.attributes.has_key(attribute):
-            if whitelist and dom.attributes.get(attribute).value not in whitelist:
-                dom.parentNode.removeChild(dom)
-            if blacklist and dom.attributes.get(attribute).value in blacklist:
-                dom.parentNode.removeChild(dom)
-           
-        if dom.hasChildNodes():
-            for child in dom.childNodes:
-                self.filter(tagname, attribute, blacklist, whitelist, child) 
-
-
-    def merge(self, rspecs, tagname, dom=None):
-        """
-        Merge this rspec with the requested rspec based on the specified 
-        starting tag name. The start tag (and all of its children) will be merged  
-        """
-        tempdict = {}
-        if not dom:
-            dom = self.rootNode
-
-        whitelist = []
-        blacklist = []
-            
-        if dom.localName in [tagname] and dom.attributes.has_key(attribute):
-            if whitelist and dom.attributes.get(attribute).value not in whitelist:
-                dom.parentNode.removeChild(dom)
-            if blacklist and dom.attributes.get(attribute).value in blacklist:
-                dom.parentNode.removeChild(dom)
-
-        if dom.hasChildNodes():
-            for child in dom.childNodes:
-                self.filter(tagname, attribute, blacklist, whitelist, child) 
-
-    def validateDicts(self):
-        types = {
-            'EInt' : int,
-            'EString' : str,
-            'EByteArray' : list,
-            'EBoolean' : bool,
-            'EFloat' : float,
-            'EDate' : date}
-
-
-    def pprint(self, r = None, depth = 0):
-        """
-        Pretty print the dict
-        """
-        line = ""
-        if r == None: r = self.dict
-        # Set the dept
-        for tab in range(0,depth): line += "    "
-        # check if it's nested
-        if type(r) == dict:
-            for i in r.keys():
-                print line + "%s:" % i
-                self.pprint(r[i], depth + 1)
-        elif type(r) in (tuple, list):
-            for j in r: self.pprint(j, depth + 1)
-        # not nested so just print.
-        else:
-            print line + "%s" %  r
-    
-
-
-class RecordSpec(RSpec):
-
-    root_tag = 'record'
-    def parseDict(self, rdict, include_doc = False):
-        """
-        Convert a dictionary into a dom object and store it.
-        """
-        self.rootNode = self.dict2dom(rdict, include_doc)
-
-    def dict2dom(self, rdict, include_doc = False):
-        record_dict = rdict
-        if not len(rdict.keys()) == 1:
-            record_dict = {self.root_tag : rdict}
-        return RSpec.dict2dom(self, record_dict, include_doc)
-
-        
-# vim:ts=4:expandtab
-    
diff --git a/sfa/util/rspecHelper.py b/sfa/util/rspecHelper.py
deleted file mode 100755 (executable)
index 89f15af..0000000
+++ /dev/null
@@ -1,418 +0,0 @@
-#! /usr/bin/env python
-
-import sys
-
-from copy import deepcopy
-from lxml import etree
-from StringIO import StringIO
-from optparse import OptionParser
-
-from sfa.util.faults import *
-from sfa.util.sfalogging import logger
-
-def merge_rspecs(rspecs):
-    """
-    Merge merge a list of RSpecs into 1 RSpec, and return the result.
-    rspecs must be a valid RSpec string or list of RSpec strings.
-    """
-    if not rspecs or not isinstance(rspecs, list):
-        return rspecs
-
-    # ugly hack to avoid sending the same info twice, when the call graph has dags
-    known_networks={}
-    def register_network (network):
-        try:
-            known_networks[network.get('name')]=True
-        except:
-            logger.error("merge_rspecs: cannot register network with no name in rspec")
-            pass
-    def is_registered_network (network):
-        try:
-            return network.get('name') in known_networks
-        except:
-            logger.error("merge_rspecs: cannot retrieve network with no name in rspec")
-            return False
-
-    # the resulting tree
-    rspec = None
-    for input_rspec in rspecs:
-        # ignore empty strings as returned with used call_ids
-        if not input_rspec: continue
-        try:
-            tree = etree.parse(StringIO(input_rspec))
-        except etree.XMLSyntaxError:
-            # consider failing silently here
-            logger.log_exc("merge_rspecs, parse error")
-            message = str(sys.exc_info()[1]) + ' with ' + input_rspec
-            raise InvalidRSpec(message)
-
-        root = tree.getroot()
-        if not root.get("type") in ["SFA"]:
-            logger.error("merge_rspecs: unexpected type for rspec root, %s"%root.get('type'))
-            continue
-        if rspec == None:
-            # we scan the first input, register all networks
-            # in addition we remove duplicates - needed until everyone runs 1.0-10
-            rspec = root
-            for network in root.iterfind("./network"):
-                if not is_registered_network(network):
-                    register_network(network)
-                else:
-                    # duplicate in the first input - trash it
-                    root.remove(network)
-        else:
-            for network in root.iterfind("./network"):
-                if not is_registered_network(network):
-                    rspec.append(deepcopy(network))
-                    register_network(network)
-            for request in root.iterfind("./request"):
-                rspec.append(deepcopy(request))
-    return etree.tostring(rspec, xml_declaration=True, pretty_print=True)
-
-class RSpec:
-    def __init__(self, xml):
-        parser = etree.XMLParser(remove_blank_text=True)
-        tree = etree.parse(StringIO(xml), parser)
-        self.rspec = tree.getroot()
-
-        # If there is only one network in the rspec, make it the default
-        self.network = None
-        networks = self.get_network_list()
-        if len(networks) == 1:
-            self.network = networks[0]
-
-    # Thierry : need this to locate hostname even if several networks
-    def get_node_element(self, hostname, network=None):
-        if network == None and self.network:
-            network = self.network
-        if network != None:
-            names = self.rspec.iterfind("./network[@name='%s']/site/node/hostname" % network)
-        else:
-            names = self.rspec.iterfind("./network/site/node/hostname")
-        for name in names:
-            if name.text == hostname:
-                return name.getparent()
-        return None
-        
-    # Thierry : need this to return all nodes in all networks
-    def get_node_list(self, network=None):
-        if network == None and self.network:
-            network = self.network
-        if network != None:
-            return self.rspec.xpath("./network[@name='%s']/site/node/hostname/text()" % network)
-        else:
-            return self.rspec.xpath("./network/site/node/hostname/text()")
-
-    def get_network_list(self):
-        return self.rspec.xpath("./network[@name]/@name")
-
-    def get_sliver_list(self, network=None):
-        if network == None:
-            network = self.network
-        result = self.rspec.xpath("./network[@name='%s']/site/node[sliver]/hostname/text()" % network)
-        return result
-
-    def get_available_node_list(self, network=None):
-        if network == None:
-            network = self.network
-        result = self.rspec.xpath("./network[@name='%s']/site/node[not(sliver)]/hostname/text()" % network)
-        return result
-
-    def add_sliver(self, hostname, network=None):
-        if network == None:
-            network = self.network
-        node = self.get_node_element(hostname, network)
-        etree.SubElement(node, "sliver")
-
-    def remove_sliver(self, hostname, network=None):
-        if network == None:
-            network = self.network
-        node = self.get_node_element(hostname, network)
-        node.remove(node.find("sliver"))
-
-    def attributes_list(self, elem):
-        opts = []
-        if elem is not None:
-            for e in elem:
-                opts.append((e.tag, e.text))
-        return opts
-
-    def get_default_sliver_attributes(self, network=None):
-        if network == None:
-            network = self.network
-        defaults = self.rspec.find("./network[@name='%s']/sliver_defaults" % network)
-        return self.attributes_list(defaults)
-
-    def get_sliver_attributes(self, hostname, network=None):
-        if network == None:
-            network = self.network
-        node = self.get_node_element(hostname, network)
-        sliver = node.find("sliver")
-        return self.attributes_list(sliver)
-
-    def add_attribute(self, elem, name, value):
-        opt = etree.SubElement(elem, name)
-        opt.text = value
-
-    def add_default_sliver_attribute(self, name, value, network=None):
-        if network == None:
-            network = self.network
-        defaults = self.rspec.find("./network[@name='%s']/sliver_defaults" % network)
-        if defaults is None:
-            defaults = etree.Element("sliver_defaults")
-            network = self.rspec.find("./network[@name='%s']" % network)
-            network.insert(0, defaults)
-        self.add_attribute(defaults, name, value)
-
-    def add_sliver_attribute(self, hostname, name, value, network=None):
-        if network == None:
-            network = self.network
-        node = self.get_node_element(hostname, network)
-        sliver = node.find("sliver")
-        self.add_attribute(sliver, name, value)
-
-    def remove_attribute(self, elem, name, value):
-        if elem is not None:
-            opts = elem.iterfind(name)
-            if opts is not None:
-                for opt in opts:
-                    if opt.text == value:
-                        elem.remove(opt)
-
-    def remove_default_sliver_attribute(self, name, value, network=None):
-        if network == None:
-            network = self.network
-        defaults = self.rspec.find("./network[@name='%s']/sliver_defaults" % network)
-        self.remove_attribute(defaults, name, value)
-
-    def remove_sliver_attribute(self, hostname, name, value, network=None):
-        if network == None:
-            network = self.network
-        node = self.get_node_element(hostname, network)
-        sliver = node.find("sliver")
-        self.remove_attribute(sliver, name, value)
-
-    def get_site_nodes(self, siteid, network=None):
-        if network == None:
-            network = self.network
-        query = './network[@name="%s"]/site[@id="%s"]/node/hostname/text()' % (network, siteid)
-        result = self.rspec.xpath(query)
-        return result
-        
-    def get_link_list(self, network=None):
-        if network == None:
-            network = self.network
-        linklist = []
-        links = self.rspec.iterfind("./network[@name='%s']/link" % network)
-        for link in links:
-            (end1, end2) = link.get("endpoints").split()
-            name = link.find("description")
-            linklist.append((name.text, 
-                             self.get_site_nodes(end1, network), 
-                             self.get_site_nodes(end2, network)))
-        return linklist
-
-    def get_vlink_list(self, network=None):
-        if network == None:
-            network = self.network
-        vlinklist = []
-        vlinks = self.rspec.iterfind("./network[@name='%s']//vlink" % network)
-        for vlink in vlinks:
-            endpoints = vlink.get("endpoints")
-            (end1, end2) = endpoints.split()
-            query = './network[@name="%s"]//node[@id="%s"]/hostname/text()' % network
-            node1 = self.rspec.xpath(query % end1)[0]
-            node2 = self.rspec.xpath(query % end2)[0]
-            desc = "%s <--> %s" % (node1, node2) 
-            kbps = vlink.find("kbps")
-            vlinklist.append((endpoints, desc, kbps.text))
-        return vlinklist
-
-    def query_links(self, fromnode, tonode, network=None):
-        if network == None:
-            network = self.network
-        fromsite = fromnode.getparent()
-        tosite = tonode.getparent()
-        fromid = fromsite.get("id")
-        toid = tosite.get("id")
-
-        query = "./network[@name='%s']/link[@endpoints = '%s %s']" % (network, fromid, toid)
-        results = self.rspec.xpath(query)
-        if results == None:
-            query = "./network[@name='%s']/link[@endpoints = '%s %s']" % (network, toid, fromid)
-            results = self.rspec.xpath(query)
-        return results
-
-    def query_vlinks(self, endpoints, network=None):
-        if network == None:
-            network = self.network
-        query = "./network[@name='%s']//vlink[@endpoints = '%s']" % (network, endpoints)
-        results = self.rspec.xpath(query)
-        return results
-            
-    
-    def add_vlink(self, fromhost, tohost, kbps, network=None):
-        if network == None:
-            network = self.network
-        fromnode = self.get_node_element(fromhost, network)
-        tonode = self.get_node_element(tohost, network)
-        links = self.query_links(fromnode, tonode, network)
-
-        for link in links:
-            vlink = etree.SubElement(link, "vlink")
-            fromid = fromnode.get("id")
-            toid = tonode.get("id")
-            vlink.set("endpoints", "%s %s" % (fromid, toid))
-            self.add_attribute(vlink, "kbps", kbps)
-        
-
-    def remove_vlink(self, endpoints, network=None):
-        if network == None:
-            network = self.network
-        vlinks = self.query_vlinks(endpoints, network)
-        for vlink in vlinks:
-            vlink.getparent().remove(vlink)
-
-    def toxml(self):
-        return etree.tostring(self.rspec, pretty_print=True, 
-                              xml_declaration=True)
-
-    def __str__(self):
-        return self.toxml()
-
-    def save(self, filename):
-        f = open(filename, "w")
-        f.write(self.toxml())
-        f.close()
-
-
-class Commands:
-    def __init__(self, usage, description, epilog=None):
-        self.parser = OptionParser(usage=usage, description=description,
-                                   epilog=epilog)
-        self.parser.add_option("-i", "", dest="infile", metavar="FILE",
-                               help="read RSpec from FILE (default is stdin)")
-        self.parser.add_option("-o", "", dest="outfile", metavar="FILE",
-                               help="write output to FILE (default is stdout)")
-        self.nodefile = False
-        self.attributes = {}
-
-    def add_nodefile_option(self):
-        self.nodefile = True
-        self.parser.add_option("-n", "", dest="nodefile", 
-                               metavar="FILE",
-                               help="read node list from FILE"),
-
-    def add_show_attributes_option(self):
-        self.parser.add_option("-s", "--show-attributes", action="store_true", 
-                               dest="showatt", default=False, 
-                               help="show sliver attributes")
-
-    def add_attribute_options(self):
-        self.parser.add_option("", "--capabilities", action="append",
-                               metavar="<cap1,cap2,...>",
-                               help="Vserver bcapabilities")
-        self.parser.add_option("", "--codemux", action="append",
-                               metavar="<host,local-port>",
-                               help="Demux HTTP between slices using " +
-                               "localhost ports")
-        self.parser.add_option("", "--cpu-pct", action="append",
-                               metavar="<num>", 
-                               help="Reserved CPU percent (e.g., 25)")
-        self.parser.add_option("", "--cpu-share", action="append",
-                               metavar="<num>", 
-                               help="Number of CPU shares (e.g., 5)")
-        self.parser.add_option("", "--delegations", 
-                               metavar="<slice1,slice2,...>", action="append",
-                               help="List of slices with delegation authority")
-        self.parser.add_option("", "--disk-max", 
-                               metavar="<num>", action="append",
-                               help="Disk quota (1k disk blocks)")
-        self.parser.add_option("", "--initscript", 
-                               metavar="<name>", action="append",
-                               help="Slice initialization script (e.g., stork)")
-        self.parser.add_option("", "--ip-addresses", action="append",
-                               metavar="<IP addr>", 
-                               help="Add an IP address to a sliver")
-        self.parser.add_option("", "--net-i2-max-kbyte", 
-                               metavar="<KBytes>", action="append",
-                               help="Maximum daily network Tx limit " +
-                               "to I2 hosts.")
-        self.parser.add_option("", "--net-i2-max-rate", 
-                               metavar="<Kbps>", action="append",
-                               help="Maximum bandwidth over I2 routes")
-        self.parser.add_option("", "--net-i2-min-rate", 
-                               metavar="<Kbps>", action="append",
-                               help="Minimum bandwidth over I2 routes")
-        self.parser.add_option("", "--net-i2-share", 
-                               metavar="<num>", action="append",
-                               help="Number of bandwidth shares over I2 routes")
-        self.parser.add_option("", "--net-i2-thresh-kbyte", 
-                               metavar="<KBytes>", action="append",
-                               help="Limit sent to I2 hosts before warning, " +
-                               "throttling")
-        self.parser.add_option("", "--net-max-kbyte", 
-                               metavar="<KBytes>", action="append",
-                               help="Maximum daily network Tx limit " +
-                               "to non-I2 hosts.")
-        self.parser.add_option("", "--net-max-rate", 
-                               metavar="<Kbps>", action="append",
-                               help="Maximum bandwidth over non-I2 routes")
-        self.parser.add_option("", "--net-min-rate", 
-                               metavar="<Kbps>", action="append",
-                               help="Minimum bandwidth over non-I2 routes")
-        self.parser.add_option("", "--net-share", 
-                               metavar="<num>", action="append",
-                               help="Number of bandwidth shares over non-I2 " +
-                               "routes")
-        self.parser.add_option("", "--net-thresh-kbyte", 
-                               metavar="<KBytes>", action="append",
-                               help="Limit sent to non-I2 hosts before " +
-                               "warning, throttling")
-        self.parser.add_option("", "--vsys", 
-                               metavar="<name>", action="append",
-                               help="Vsys script (e.g., fd_fusemount)")
-        self.parser.add_option("", "--vsys-vnet", 
-                               metavar="<IP network>", action="append",
-                               help="Allocate a virtual private network")
-
-    def get_attribute_dict(self):
-        attrlist = ['capabilities','codemux','cpu_pct','cpu_share',
-                    'delegations','disk_max','initscript','ip_addresses',
-                    'net_i2_max_kbyte','net_i2_max_rate','net_i2_min_rate',
-                    'net_i2_share','net_i2_thresh_kbyte',
-                    'net_max_kbyte','net_max_rate','net_min_rate',
-                    'net_share','net_thresh_kbyte',
-                    'vsys','vsys_vnet']
-        attrdict = {}
-        for attr in attrlist:
-            value = getattr(self.opts, attr, None)
-            if value is not None:
-                attrdict[attr] = value
-        return attrdict
-
-    def prep(self):
-        (self.opts, self.args) = self.parser.parse_args()
-
-        if self.opts.infile:
-            sys.stdin = open(self.opts.infile, "r")
-        xml = sys.stdin.read()
-        self.rspec = RSpec(xml)
-            
-        if self.nodefile:
-            if self.opts.nodefile:
-                f = open(self.opts.nodefile, "r")
-                self.nodes = f.read().split()
-                f.close()
-            else:
-                self.nodes = self.args
-
-        if self.opts.outfile:
-            sys.stdout = open(self.opts.outfile, "w")
-
-
-
-
-
-
-
index 75229b3..2e4640e 100644 (file)
@@ -68,8 +68,11 @@ class _SfaLogger:
             self.logger.setLevel(logging.WARNING)
         elif verbose==1:
             self.logger.setLevel(logging.INFO)
-        elif verbose==2:
+        elif verbose>=2:
             self.logger.setLevel(logging.DEBUG)
+    # in case some other code needs a boolean
+    def getBoolVerboseFromOpt(self,verbose):
+        return verbose>=1
 
     ####################
     def info(self, msg):
diff --git a/sfa/util/soapprotocol.py b/sfa/util/soapprotocol.py
deleted file mode 100644 (file)
index de3ee96..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-# SOAP-specific code for SFA Client
-
-from httplib import HTTPSConnection
-from ZSI.client import Binding
-
-def xmlrpc_like_callable (soap_callable, *x):
-    soap_result = soap_callable(*x)
-    xmlrpc_result = soap_result['Result']
-    return xmlrpc_result
-        
-class SFACallable:
-     def __init__(self, soap_callable):
-        self.soap_callable = soap_callable
-
-     def __call__(self, *args):
-         outer_result = self.soap_callable(*args)
-         return outer_result['Result']
-
-
-class SFASoapBinding(Binding):
-    def __getattr__(self, attr):
-        soap_callable = Binding.__getattr__(self, attr)
-        return SFACallable(soap_callable)
-
-
-def get_server(url, key_file, cert_file):
-    auth = {
-        'transport' : HTTPSConnection,
-        'transdict' : {'cert_file' : cert_file, 
-                       'key_file' : key_file
-                      },
-     }
-
-    return SFASoapBinding(url=url, **auth)
-
diff --git a/sfa/util/specdict.py b/sfa/util/specdict.py
deleted file mode 100644 (file)
index 7a6ff97..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-##
-# SpecDict
-#
-# SpecDict defines a means for converting a dictionary with plc specific keys
-# to a dict with rspec specific keys. 
-#
-# SpecDict.fields dict defines all the rspec specific attribute and their 
-# expected type. 
-# 
-# SpecDict.plc_fields defines a one to one mapping of plc attribute to rspec 
-# attribute
-
-### $Id$
-### $URL$
-
-from types import StringTypes, ListType
-
-class SpecDict(dict):
-    """
-    Base class of SpecDict objects. 
-    """
-    fields = {}
-    plc_fields = {}
-    type = None
-        
-    def __init__(self, spec_dict):
-        # convert plc dict and initialize self
-        sdict = self.plcToSpec(spec_dict)
-        dict.__init__(self, sdict)
-
-
-    def plcToSpec(self, spec_dict):
-        """
-        Defines how to convert a plc dict to rspec dict
-        """
-        spec = {}
-        for field in self.fields:
-            value = ""
-            expected = self.fields[field]
-            if isinstance(expected, StringTypes):
-                if self.plc_fields.has_key(field):
-                    plc_field = self.plc_fields[field]
-                    if spec_dict.has_key(plc_field):
-                        value = spec_dict[plc_field]
-            elif isinstance(expected, ListType):
-                expected = expected[0]
-                if self.plc_fields.has_key(field):
-                    plc_field = self.plc_fields[field]
-                    if spec_dict.has_key(plc_field):
-                        value = [expected(value) for value in spec_dict[plc_field]]
-            spec[field] = value
-        return {self.type: spec}
-
-#
-# fields = { geni_field:  type.  Could be class for nested classes, otherwise prob str}
-# plc_fields = {geni_field : plc_field}
-#
-class IfSpecDict(SpecDict):
-    type = 'IfSpec'
-    fields = {'name': '',
-              'addr': '',
-              'type': '',
-              'init_params': '',
-              'min_rate': '',
-              'max_rate': '',
-              'max_kbyte': '',
-              'ip_spoof': ''}
-    plc_fields = {'name': 'is_primary', # XXX needs munging to return name instead of True or False
-                 'addr': 'ip',
-                 'type': 'type'}
-class LinkSpecDict(SpecDict):
-    type = 'LinkSpec'
-    fields = {'min_alloc': '', 
-              'max_alloc': '', 
-              'type': '', 
-              'start_time': '', 
-              'bw': '', 
-              'duration': '', 
-              'init_params': '',
-              'endpoints': [IfSpecDict]}
-    plc_fields = {'min_alloc': 'min_alloc',
-              'max_alloc': 'max_alloc', 
-              'type': 'type', 
-              'start_time': 'start_time', 
-              'bw': 'bw', 
-              'duration': 'duration', 
-              'init_params': 'init_params',
-              'endpoints': 'endpoints'}
-  
-            
-class NodeSpecDict(SpecDict):
-    type = 'NodeSpec'
-    fields = {'name': '',
-              'type': '',
-              'init_params': '',
-              'cpu_min': '',
-              'cpu_share': '',
-              'cpu_pct': '',
-              'disk_max': '',
-              'start_time': '',
-              'duration': '',
-              'net_if': [IfSpecDict]}
-
-    plc_fields = {'name': 'hostname',
-                  'net_if': 'interfaces'}  
-
-class NetSpecDict(SpecDict):
-    type = 'NetSpec'
-    fields = {'name': '',
-              'start_time': '',
-              'duration': '',
-              'nodes': [NodeSpecDict],
-              'links': [LinkSpecDict],
-             }
-    plc_fields = {'name': 'name',
-                  'start_time': 'start_time',
-                  'duration': 'duration',
-                  'nodes': 'nodes',
-                  'links': 'links'}
-
-class RSpecDict(SpecDict):
-    type = 'RSpec'
-    fields = {'start_time': '',
-              'duration': '',
-              'networks': [NetSpecDict]
-             }
-    plc_fields = {'networks': 'networks',
-                  'start_time': 'start_tim',
-                  'duration': 'duration'
-                 }
-
-# vim:ts=4:expandtab
diff --git a/sfa/util/ssl_socket.py b/sfa/util/ssl_socket.py
deleted file mode 100644 (file)
index d221da3..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-from ssl import SSLSocket
-
-import textwrap
-
-import _ssl             # if we can't import it, let the error propagate
-
-from _ssl import SSLError
-from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
-from _ssl import PROTOCOL_SSLv2, PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
-from _ssl import RAND_status, RAND_egd, RAND_add
-from _ssl import \
-     SSL_ERROR_ZERO_RETURN, \
-     SSL_ERROR_WANT_READ, \
-     SSL_ERROR_WANT_WRITE, \
-     SSL_ERROR_WANT_X509_LOOKUP, \
-     SSL_ERROR_SYSCALL, \
-     SSL_ERROR_SSL, \
-     SSL_ERROR_WANT_CONNECT, \
-     SSL_ERROR_EOF, \
-     SSL_ERROR_INVALID_ERROR_CODE
-
-from socket import socket, _fileobject
-from socket import getnameinfo as _getnameinfo
-import base64        # for DER-to-PEM translation
-
-class SSLSocket(SSLSocket, socket):
-
-    """This class implements a subtype of socket.socket that wraps
-    the underlying OS socket in an SSL context when necessary, and
-    provides read and write methods over that channel."""
-
-    def __init__(self, sock, keyfile=None, certfile=None,
-                 server_side=False, cert_reqs=CERT_NONE,
-                 ssl_version=PROTOCOL_SSLv23, ca_certs=None,
-                 do_handshake_on_connect=True,
-                 suppress_ragged_eofs=True):
-        socket.__init__(self, _sock=sock._sock)
-        # the initializer for socket trashes the methods (tsk, tsk), so...
-        self.send = lambda data, flags=0: SSLSocket.send(self, data, flags)
-        self.sendto = lambda data, addr, flags=0: SSLSocket.sendto(self, data, addr, flags)
-        self.recv = lambda buflen=1024, flags=0: SSLSocket.recv(self, buflen, flags)
-        self.recvfrom = lambda addr, buflen=1024, flags=0: SSLSocket.recvfrom(self, addr, buflen, flags)
-        self.recv_into = lambda buffer, nbytes=None, flags=0: SSLSocket.recv_into(self, buffer, nbytes, flags)
-        self.recvfrom_into = lambda buffer, nbytes=None, flags=0: SSLSocket.recvfrom_into(self, buffer, nbytes, flags)
-
-        if certfile and not keyfile:
-            keyfile = certfile
-        # see if it's connected
-        try:
-            socket.getpeername(self)
-        except:
-            # no, no connection yet
-            self._sslobj = None
-        else:
-            # yes, create the SSL object
-            self._sslobj = _ssl.sslwrap(self._sock, server_side,
-                                        keyfile, certfile,
-                                        cert_reqs, ssl_version, ca_certs)
-            if do_handshake_on_connect:
-                timeout = self.gettimeout()
-                try:
-                    if timeout == 0:
-                        self.settimeout(None)
-                    self.do_handshake()
-                finally:
-                    self.settimeout(timeout)
-        self.keyfile = keyfile
-        self.certfile = certfile
-        self.cert_reqs = cert_reqs
-        self.ssl_version = ssl_version
-        self.ca_certs = ca_certs
-        self.do_handshake_on_connect = do_handshake_on_connect
-        self.suppress_ragged_eofs = suppress_ragged_eofs
-        self._makefile_refs = 0
-
-
index 5d91539..ee2e41b 100644 (file)
@@ -1,7 +1,5 @@
 import os
-
-from sfa.util.rspec import RecordSpec
-
+from sfa.util.xml import XML
 class SimpleStorage(dict):
     """
     Handles storing and loading python dictionaries. The storage file created
@@ -46,10 +44,9 @@ class XmlStorage(SimpleStorage):
         """
         Parse an xml file and store it as a dict
         """ 
-        data = RecordSpec()
         if os.path.exists(self.db_filename) and os.path.isfile(self.db_filename):
-            data.parseFile(self.db_filename)
-            dict.__init__(self, data.toDict())
+            xml = XML(self.db_filename)
+            dict.__init__(self, xml.todict())
         elif os.path.exists(self.db_filename) and not os.path.isfile(self.db_filename):
             raise IOError, '%s exists but is not a file. please remove it and try again' \
                            % self.db_filename
@@ -58,8 +55,8 @@ class XmlStorage(SimpleStorage):
             self.load()
 
     def write(self):
-        data = RecordSpec()
-        data.parseDict(self)
+        xml = XML()
+        xml.parseDict(self)
         db_file = open(self.db_filename, 'w')
         db_file.write(data.toprettyxml())
         db_file.close()
index 0e16289..065e8ab 100644 (file)
@@ -3,11 +3,13 @@
 #
 # TODO: Use existing PLC database methods? or keep this separate?
 
-from sfa.util.PostgreSQL import *
-from sfa.trust.gid import *
-from sfa.util.record import *
-from sfa.util.config import *
-from sfa.util.filter import *
+from types import StringTypes
+
+from sfa.util.config import Config
+from sfa.util.parameter import Parameter
+from sfa.util.filter import Filter
+from sfa.util.PostgreSQL import PostgreSQL
+from sfa.util.record import SfaRecord, AuthorityRecord, NodeRecord, SliceRecord, UserRecord
 
 class SfaTable(list):
 
diff --git a/sfa/util/topology.py b/sfa/util/topology.py
new file mode 100644 (file)
index 0000000..bd7eb18
--- /dev/null
@@ -0,0 +1,40 @@
+##
+# SFA Topology Info
+#
+# This module holds topology configuration for SFA. It is implemnted as a 
+# list of site_id tuples
+
+import os.path
+import traceback
+from sfa.util.sfalogging import logger
+
+class Topology(set):
+    """
+    Parse the topology configuration file. 
+    """
+
+    #def __init__(self, config_file = "/etc/sfa/topology"):
+    def __init__(self, config_file = "/tmp/topology"):
+        set.__init__(self) 
+        self.config_file = None
+        self.config_path = None
+        self.load(config_file)
+
+    def load(self, config_file):
+        try:
+            
+            self.config_file = config_file
+            # path to configuration data
+            self.config_path = os.path.dirname(config_file)
+            # load the links
+            f = open(config_file, 'r')
+            for line in f:
+                ignore = line.find('#')
+                if ignore > -1:
+                    line = line[0:ignore]
+                tup = line.split()
+                if len(tup) > 1:
+                    self.add((tup[0], tup[1]))    
+        except Exception, e:
+            logger.log_exc("Could not find or load the configuration file: %s" % config_file)
+            raise
diff --git a/sfa/util/xml.py b/sfa/util/xml.py
new file mode 100755 (executable)
index 0000000..b2aea13
--- /dev/null
@@ -0,0 +1,289 @@
+#!/usr/bin/python 
+from types import StringTypes
+from lxml import etree
+from StringIO import StringIO
+from sfa.util.faults import InvalidXML
+
+class XpathFilter:
+    @staticmethod
+
+    def filter_value(key, value):
+        xpath = ""    
+        if isinstance(value, str):
+            if '*' in value:
+                value = value.replace('*', '')
+                xpath = 'contains(%s, "%s")' % (key, value)
+            else:
+                xpath = '%s="%s"' % (key, value)                
+        return xpath
+
+    @staticmethod
+    def xpath(filter={}):
+        xpath = ""
+        if filter:
+            filter_list = []
+            for (key, value) in filter.items():
+                if key == 'text':
+                    key = 'text()'
+                else:
+                    key = '@'+key
+                if isinstance(value, str):
+                    filter_list.append(XpathFilter.filter_value(key, value))
+                elif isinstance(value, list):
+                    stmt = ' or '.join([XpathFilter.filter_value(key, str(val)) for val in value])
+                    filter_list.append(stmt)   
+            if filter_list:
+                xpath = ' and '.join(filter_list)
+                xpath = '[' + xpath + ']'
+        return xpath
+
+class XmlNode:
+    def __init__(self, node, namespaces):
+        self.node = node
+        self.namespaces = namespaces
+        self.attrib = node.attrib
+
+    def xpath(self, xpath, namespaces=None):
+        if not namespaces:
+            namespaces = self.namespaces 
+        return self.node.xpath(xpath, namespaces=namespaces)
+    
+    def add_element(name, *args, **kwds):
+        element = etree.SubElement(name, args, kwds)
+        return XmlNode(element, self.namespaces)
+
+    def remove_elements(name):
+        """
+        Removes all occurences of an element from the tree. Start at
+        specified root_node if specified, otherwise start at tree's root.
+        """
+        
+        if not element_name.startswith('//'):
+            element_name = '//' + element_name
+        elements = self.node.xpath('%s ' % name, namespaces=self.namespaces) 
+        for element in elements:
+            parent = element.getparent()
+            parent.remove(element)
+
+    def set(self, key, value):
+        self.node.set(key, value)
+    
+    def set_text(self, text):
+        self.node.text = text
+    
+    def unset(self, key):
+        del self.node.attrib[key]
+   
+    def toxml(self):
+        return etree.tostring(self.node, encoding='UTF-8', pretty_print=True)                    
+
+    def __str__(self):
+        return self.toxml()
+
+class XML:
+    def __init__(self, xml=None, namespaces=None):
+        self.root = None
+        self.namespaces = namespaces
+        self.default_namespace = None
+        self.schema = None
+        if isinstance(xml, basestring):
+            self.parse_xml(xml)
+        if isinstance(xml, XmlNode):
+            self.root = xml
+            self.namespces = xml.namespaces
+        elif isinstance(xml, etree._ElementTree) or isinstance(xml, etree._Element):
+            self.parse_xml(etree.tostring(xml))
+
+    def parse_xml(self, xml):
+        """
+        parse rspec into etree
+        """
+        parser = etree.XMLParser(remove_blank_text=True)
+        try:
+            tree = etree.parse(xml, parser)
+        except IOError:
+            # 'rspec' file doesnt exist. 'rspec' is proably an xml string
+            try:
+                tree = etree.parse(StringIO(xml), parser)
+            except Exception, e:
+                raise InvalidXML(str(e))
+        root = tree.getroot()
+        self.namespaces = dict(root.nsmap)
+        # set namespaces map
+        if 'default' not in self.namespaces and None in self.namespaces: 
+            # If the 'None' exist, then it's pointing to the default namespace. This makes 
+            # it hard for us to write xpath queries for the default naemspace because lxml 
+            # wont understand a None prefix. We will just associate the default namespeace 
+            # with a key named 'default'.     
+            self.namespaces['default'] = self.namespaces.pop(None)
+            
+        else:
+            self.namespaces['default'] = 'default' 
+
+        self.root = XmlNode(root, self.namespaces)
+        # set schema 
+        for key in self.root.attrib.keys():
+            if key.endswith('schemaLocation'):
+                # schema location should be at the end of the list
+                schema_parts  = self.root.attrib[key].split(' ')
+                self.schema = schema_parts[1]    
+                namespace, schema  = schema_parts[0], schema_parts[1]
+                break
+
+    def parse_dict(self, d, root_tag_name='xml', element = None):
+        if element is None: 
+            if self.root is None:
+                self.parse_xml('<%s/>' % root_tag_name)
+            element = self.root
+
+        if 'text' in d:
+            text = d.pop('text')
+            element.text = text
+
+        # handle repeating fields
+        for (key, value) in d.items():
+            if isinstance(value, list):
+                value = d.pop(key)
+                for val in value:
+                    if isinstance(val, dict):
+                        child_element = etree.SubElement(element, key)
+                        self.parse_dict(val, key, child_element)
+                    elif isinstance(val, basestring):
+                        child_element = etree.SubElement(element, key).text = val
+                        
+            elif isinstance(value, int):
+                d[key] = unicode(d[key])  
+            elif value is None:
+                d.pop(key)
+
+        # element.attrib.update will explode if DateTimes are in the
+        # dcitionary.
+        d=d.copy()
+        # looks like iteritems won't stand side-effects
+        for k in d.keys():
+            if not isinstance(d[k],StringTypes):
+                del d[k]
+
+        element.attrib.update(d)
+
+    def validate(self, schema):
+        """
+        Validate against rng schema
+        """
+        relaxng_doc = etree.parse(schema)
+        relaxng = etree.RelaxNG(relaxng_doc)
+        if not relaxng(self.root):
+            error = relaxng.error_log.last_error
+            message = "%s (line %s)" % (error.message, error.line)
+            raise InvalidXML(message)
+        return True
+
+    def xpath(self, xpath, namespaces=None):
+        if not namespaces:
+            namespaces = self.namespaces
+        return self.root.xpath(xpath, namespaces=namespaces)
+
+    def set(self, key, value, node=None):
+        if not node:
+            node = self.root 
+        return node.set(key, value)
+
+    def remove_attribute(self, name, node=None):
+        if not node:
+            node = self.root
+        node.remove_attribute(name) 
+        
+
+    def add_element(self, name, attrs={}, parent=None, text=""):
+        """
+        Wrapper around etree.SubElement(). Adds an element to 
+        specified parent node. Adds element to root node is parent is 
+        not specified. 
+        """
+        if parent == None:
+            parent = self.root
+        element = etree.SubElement(parent, name)
+        if text:
+            element.text = text
+        if isinstance(attrs, dict):
+            for attr in attrs:
+                element.set(attr, attrs[attr])  
+        return XmlNode(element, self.namespaces)
+
+    def remove_elements(self, name, node = None):
+        """
+        Removes all occurences of an element from the tree. Start at 
+        specified root_node if specified, otherwise start at tree's root.   
+        """
+        if not node:
+            node = self.root
+
+        node.remove_elements(name)
+
+    def attributes_list(self, elem):
+        # convert a list of attribute tags into list of tuples
+        # (tagnme, text_value)
+        opts = []
+        if elem is not None:
+            for e in elem:
+                opts.append((e.tag, str(e.text).strip()))
+        return opts
+
+    def get_element_attributes(self, elem=None, depth=0):
+        if elem == None:
+            elem = self.root_node
+        if not hasattr(elem, 'attrib'):
+            # this is probably not an element node with attribute. could be just and an
+            # attribute, return it
+            return elem
+        attrs = dict(elem.attrib)
+        attrs['text'] = str(elem.text).strip()
+        attrs['parent'] = elem.getparent()
+        if isinstance(depth, int) and depth > 0:
+            for child_elem in list(elem):
+                key = str(child_elem.tag)
+                if key not in attrs:
+                    attrs[key] = [self.get_element_attributes(child_elem, depth-1)]
+                else:
+                    attrs[key].append(self.get_element_attributes(child_elem, depth-1))
+        else:
+            attrs['child_nodes'] = list(elem)
+        return attrs
+
+    def merge(self, in_xml):
+        pass
+
+    def __str__(self):
+        return self.toxml()
+
+    def toxml(self):
+        return etree.tostring(self.root, encoding='UTF-8', pretty_print=True)  
+    
+    # XXX smbaker, for record.load_from_string
+    def todict(self, elem=None):
+        if elem is None:
+            elem = self.root
+        d = {}
+        d.update(elem.attrib)
+        d['text'] = elem.text
+        for child in elem.iterchildren():
+            if child.tag not in d:
+                d[child.tag] = []
+            d[child.tag].append(self.todict(child))
+
+        if len(d)==1 and ("text" in d):
+            d = d["text"]
+
+        return d
+        
+    def save(self, filename):
+        f = open(filename, 'w')
+        f.write(self.toxml())
+        f.close()
+
+# no RSpec in scope 
+#if __name__ == '__main__':
+#    rspec = RSpec('/tmp/resources.rspec')
+#    print rspec
+
index 3dc87b6..1f50628 100644 (file)
@@ -23,7 +23,7 @@
 
 import re
 
-from sfa.util.faults import *
+from sfa.util.faults import SfaAPIError
 
 # for convenience and smoother translation - we should get rid of these functions eventually 
 def get_leaf(hrn): return Xrn(hrn).get_leaf()
@@ -98,7 +98,7 @@ class Xrn:
     @staticmethod
     def urn_full (urn):
         if urn.startswith(Xrn.URN_PREFIX): return urn
-        else: return Xrn.URN_PREFIX+URN
+        else: return Xrn.URN_PREFIX+urn
     @staticmethod
     def urn_meaningful (urn):
         if urn.startswith(Xrn.URN_PREFIX): return urn[len(Xrn.URN_PREFIX):]
@@ -173,13 +173,18 @@ class Xrn:
             # or completely change how record types are generated/stored   
             if name != 'sa':
                 type = type + "+" + name
-
+            name =""
+        else:
+            name = parts.pop(len(parts)-1)
         # convert parts (list) into hrn (str) by doing the following
         # 1. remove blank parts
         # 2. escape dots inside parts
         # 3. replace ':' with '.' inside parts
-        # 3. join parts using '.' 
-        hrn = '.'.join([Xrn.escape(part).replace(':','.') for part in parts if part]) 
+        # 3. join parts using '.'
+        hrn = '.'.join([Xrn.escape(part).replace(':','.') for part in parts if part])
+        # dont replace ':' in the name section
+        if name:
+            hrn += '.%s' % Xrn.escape(name) 
 
         self.hrn=str(hrn)
         self.type=str(type)
index e7657ff..987cff5 100644 (file)
@@ -1,7 +1,7 @@
-import os, time
+import os
 import libxml2
 from sfatables.command import Command
-from sfatables.globals import *
+from sfatables.globals import sfatables_config, target_dir, match_dir
 
 class Add(Command):
     def __init__(self):
index 50b1d62..3646990 100644 (file)
@@ -1,5 +1,5 @@
 import os, time
-from sfatables.globals import *
+from sfatables.globals import sfatables_config
 from sfatables.command import Command
 
 class Delete(Command):
index d401092..852985e 100644 (file)
@@ -1,7 +1,7 @@
 import os, time
 import libxml2
 from sfatables.command import Command
-from sfatables.globals import *
+from sfatables.globals import sfatables_config, target_dir, match_dir
 
 class Insert(Command):
     def __init__(self):
index 70d7206..cea40bb 100644 (file)
@@ -1,7 +1,7 @@
 import os, time
 import libxml2
 
-from sfatables.globals import *
+from sfatables.globals import sfatables_config
 from sfatables.pretty import Pretty
 from sfatables.command import Command
 
index 99226f4..e22967c 100644 (file)
@@ -2,15 +2,13 @@
 
 import sys
 import os
-import pdb
-from optparse import OptionParser
 
 import libxml2
+import libxslt
 
-from sfatables import commands
-from sfatables.globals import *
-from sfatables.commands.List import *
-from sfatables.xmlrule import *
+from sfatables.globals import sfatables_config
+from sfatables.commands.List import List
+from sfatables.xmlrule import XMLRule
 
 class SFATablesRules:
     def __init__(self, chain_name):
index b413ef1..a06680b 100755 (executable)
@@ -8,14 +8,12 @@
 
 import sys
 import os
-import pdb
 import glob
-import libxml2
 
 from optparse import OptionParser
 from sfatables import commands
 from sfatables.xmlextension import Xmlextension
-from sfatables.globals import *
+from sfatables.globals import target_dir, match_dir
 
 def load_commands(module, list):
     command_dict={}
index 5e298db..f90e0fb 100644 (file)
@@ -5,7 +5,6 @@
 #   - The parameters that the processor needs to evaluate the context
 
 import libxml2
-from sfatables.globals import *
 
 class Xmlextension:
     def __init__(self, file_path):
index e21f9d8..46f3601 100644 (file)
@@ -1,10 +1,11 @@
+import sys,os
+
 import libxml2
 # allow to run sfa2wsdl if this is missing (for mac)
-import sys
 try:import libxslt
 except: print >>sys.stderr, "WARNING, could not import libxslt"
 
-from sfatables.globals import *
+from sfatables.globals import sfatables_config
 
 class XMLRule:
     def apply_processor(self, type, doc, output_xpath_filter=None):
@@ -88,14 +89,13 @@ class XMLRule:
         #       then target(target_args, rspec)
         #       else rspec
         
-        import pdb
         if (self.match(rspec)):
             return (True,self.wrap_up(self.target(rspec)))
         else:
             return (False,self.wrap_up(rspec))
 
 
-    def apply_compiled(rspec):
+    def apply_compiled(self, rspec):
         # Not supported yet
         return None
 
diff --git a/tests/client/README b/tests/client/README
deleted file mode 100644 (file)
index 6d4ae3d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-these files used to be in geniwrapper/cmdline
index 4d49d08..ce72936 100755 (executable)
@@ -4,16 +4,14 @@ import os
 import random
 import string
 import unittest
-import sfa.util.xmlrpcprotocol as xmlrpc
+import sfa.util.xmlrpcprotocol as xmlrpcprotocol
 from unittest import TestCase
 from optparse import OptionParser
-from sfa.util.xmlrpcprotocol import ServerException
 from sfa.util.xrn import get_authority
 from sfa.util.config import *
 from sfa.trust.certificate import *
 from sfa.trust.credential import *
-from sfa.util.sfaticket import *
-from sfa.util.rspec import *
+from sfa.trust.sfaticket import SfaTicket
 from sfa.client import sfi
 
 def random_string(size):
@@ -45,10 +43,10 @@ class Client:
         self.cert.save_to_file(cert_file)        
         SFI_AGGREGATE = config.SFI_SM.replace('12347', '12346')
         SFI_CM = 'http://' + options.cm_host + ':12346'
-        self.registry = xmlrpc.get_server(config.SFI_REGISTRY, key_file, cert_file)
-        self.aggregate = xmlrpc.get_server(SFI_AGGREGATE, key_file, cert_file)
-        self.sm = xmlrpc.get_server(config.SFI_SM, key_file, cert_file)
-        self.cm = xmlrpc.get_server(SFI_CM, key_file, cert_file)
+        self.registry = xmlrpcprotocol.server_proxy(config.SFI_REGISTRY, key_file, cert_file)
+        self.aggregate = xmlrpcprotocol.server_proxy(SFI_AGGREGATE, key_file, cert_file)
+        self.sm = xmlrpcprotocol.server_proxy(config.SFI_SM, key_file, cert_file)
+        self.cm = xmlrpcprotocol.server_proxy(SFI_CM, key_file, cert_file)
         self.hrn = config.SFI_USER
         # XX defaulting to user, but this should be configurable so we can
         # test from components persepctive
@@ -172,7 +170,7 @@ class RegistryTest(BasicTestCase):
         server_exception = False 
         try:
             callable(self.credential)
-        except ServerException:
+        except xmlrpcprotocol.ServerException:
             server_exception = True
         finally:
             if self.type in ['user'] and not server_exception:
diff --git a/tools/Makefile b/tools/Makefile
new file mode 100644 (file)
index 0000000..010f019
--- /dev/null
@@ -0,0 +1,18 @@
+########## compute dependency graphs
+DEPTOOLS=py2depgraph.py depgraph2dot.py
+
+all:deps
+
+deps: server.png client.png
+
+server.dg: $(DEPTOOLS)
+       py2depgraph.py ../sfa/server/sfa-start.py > $@
+
+client.dg: $(DEPTOOLS)
+       py2depgraph.py ../sfa/client/sfi.py > $@
+
+%.png: %.dg
+       depgraph2dot.py < $*.dg | dot -T png -o $*.png
+
+clean: 
+       rm -f *png *dg
diff --git a/tools/depgraph2dot.py b/tools/depgraph2dot.py
new file mode 100755 (executable)
index 0000000..b8ecbce
--- /dev/null
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+# Copyright 2004 Toby Dickenson
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject
+# to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+
+import sys, getopt, colorsys, imp, hashlib
+
+class pydepgraphdot:
+
+    def main(self,argv):    
+        opts,args = getopt.getopt(argv,'',['mono'])
+        self.colored = 1
+        for o,v in opts:
+            if o=='--mono':
+                self.colored = 0
+        self.render()
+
+    def fix(self,s):
+        # Convert a module name to a syntactically correct node name
+        return s.replace('.','_')
+    
+    def render(self):
+        p,t = self.get_data()
+
+        # normalise our input data
+        for k,d in p.items():
+            for v in d.keys():
+                if not p.has_key(v):
+                    p[v] = {}
+                    
+        f = self.get_output_file()                    
+                    
+        f.write('digraph G {\n')
+        #f.write('concentrate = true;\n')
+        #f.write('ordering = out;\n')
+        f.write('ranksep=1.0;\n')
+        f.write('node [style=filled,fontname=Helvetica,fontsize=10];\n')
+        allkd = p.items()
+        allkd.sort()
+        for k,d in allkd:
+            tk = t.get(k)
+            if self.use(k,tk):
+                allv = d.keys()
+                allv.sort()
+                for v in allv:
+                    tv = t.get(v)
+                    if self.use(v,tv) and not self.toocommon(v,tv):
+                        f.write('%s -> %s' % ( self.fix(k),self.fix(v) ) )
+                        self.write_attributes(f,self.edge_attributes(k,v))
+                        f.write(';\n')
+                f.write(self.fix(k))
+                self.write_attributes(f,self.node_attributes(k,tk))
+                f.write(';\n')
+        f.write('}\n')
+
+    def write_attributes(self,f,a):
+        if a:
+            f.write(' [')
+            f.write(','.join(a))
+            f.write(']')
+
+    def node_attributes(self,k,type):
+        a = []
+        a.append('label="%s"' % self.label(k))
+        if self.colored:
+            a.append('fillcolor="%s"' % self.color(k,type))
+        else:
+            a.append('fillcolor=white')
+        if self.toocommon(k,type):
+            a.append('peripheries=2')
+        return a
+                
+    def edge_attributes(self,k,v):
+        a = []
+        weight = self.weight(k,v)
+        if weight!=1:
+            a.append('weight=%d' % weight)
+        length = self.alien(k,v)
+        if length:
+            a.append('minlen=%d' % length)
+        return a
+            
+    def get_data(self):
+        t = eval(sys.stdin.read())
+        return t['depgraph'],t['types']
+    
+    def get_output_file(self):
+        return sys.stdout
+
+    def use(self,s,type):
+        # Return true if this module is interesting and should be drawn. Return false
+        # if it should be completely omitted. This is a default policy - please override.
+        if s in ('os','sys','qt','time','__future__','types','re','string'):
+            # nearly all modules use all of these... more or less. They add nothing to
+            # our diagram.
+            return 0
+        if s.startswith('encodings.'):
+            return 0
+        if s=='__main__':
+            return 1
+        if self.toocommon(s,type):
+            # A module where we dont want to draw references _to_. Dot doesnt handle these
+            # well, so it is probably best to not draw them at all.
+            return 0
+        return 1
+
+    def toocommon(self,s,type):
+        # Return true if references to this module are uninteresting. Such references
+        # do not get drawn. This is a default policy - please override.
+        #
+        if s=='__main__':
+            # references *to* __main__ are never interesting. omitting them means
+            # that main floats to the top of the page
+            return 1
+        if type==imp.PKG_DIRECTORY:
+            # dont draw references to packages.
+            return 1
+        return 0
+        
+    def weight(self,a,b):
+        # Return the weight of the dependency from a to b. Higher weights
+        # usually have shorter straighter edges. Return 1 if it has normal weight.
+        # A value of 4 is usually good for ensuring that a related pair of modules 
+        # are drawn next to each other. This is a default policy - please override.
+        #
+        if b.split('.')[-1].startswith('_'):
+            # A module that starts with an underscore. You need a special reason to
+            # import these (for example random imports _random), so draw them close
+            # together
+            return 4
+        return 1
+    
+    def alien(self,a,b):
+        # Return non-zero if references to this module are strange, and should be drawn
+        # extra-long. the value defines the length, in rank. This is also good for putting some
+        # vertical space between seperate subsystems. This is a default policy - please override.
+        #
+        return 0
+
+    def label(self,s):
+        # Convert a module name to a formatted node label. This is a default policy - please override.
+        #
+        return '\\.\\n'.join(s.split('.'))
+
+    def color(self,s,type):
+        # Return the node color for this module name. This is a default policy - please override.
+        #
+        # Calculate a color systematically based on the hash of the module name. Modules in the
+        # same package have the same color. Unpackaged modules are grey
+        t = self.normalise_module_name_for_hash_coloring(s,type)
+        return self.color_from_name(t)
+        
+    def normalise_module_name_for_hash_coloring(self,s,type):
+        if type==imp.PKG_DIRECTORY:
+            return s
+        else:
+            i = s.rfind('.')
+            if i<0:
+                return ''
+            else:
+                return s[:i]
+        
+    def color_from_name(self,name):
+        n = hashlib.md5(name).digest()
+        hf = float(ord(n[0])+ord(n[1])*0xff)/0xffff
+        sf = float(ord(n[2]))/0xff
+        vf = float(ord(n[3]))/0xff
+        r,g,b = colorsys.hsv_to_rgb(hf, 0.3+0.6*sf, 0.8+0.2*vf)
+        return '#%02x%02x%02x' % (r*256,g*256,b*256)
+
+
+def main():
+    pydepgraphdot().main(sys.argv[1:])
+
+if __name__=='__main__':
+    main()
+
+
+
diff --git a/tools/py2depgraph.py b/tools/py2depgraph.py
new file mode 100755 (executable)
index 0000000..022add3
--- /dev/null
@@ -0,0 +1,71 @@
+#!/usr/bin/python
+# Copyright 2004,2009 Toby Dickenson
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject
+# to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import sys, pprint
+import modulefinder
+
+focus=[ 'sfa' , 'OpenSSL', 'M2Crypto', 'xmlrpclib', 'threading' ]
+
+class mymf(modulefinder.ModuleFinder):
+    def __init__(self,*args,**kwargs):
+        self._depgraph = {}
+        self._types = {}
+        self._last_caller = None
+        modulefinder.ModuleFinder.__init__(self,*args,**kwargs)
+        
+    def import_hook(self, name, caller=None, fromlist=None, level=None):
+        old_last_caller = self._last_caller
+        try:
+            self._last_caller = caller
+            return modulefinder.ModuleFinder.import_hook(self,name,caller,fromlist)
+        finally:
+            self._last_caller = old_last_caller
+            
+    def import_module(self,partnam,fqname,parent):
+        keep=False
+        for start in focus:
+            if fqname.startswith(start): keep=True
+        if not keep:
+            print >> sys.stderr, "Trimmed fqname",fqname
+            return
+        r = modulefinder.ModuleFinder.import_module(self,partnam,fqname,parent)
+        if r is not None:
+            self._depgraph.setdefault(self._last_caller.__name__,{})[r.__name__] = 1
+        return r
+    
+    def load_module(self, fqname, fp, pathname, (suffix, mode, type)):
+        r = modulefinder.ModuleFinder.load_module(self, fqname, fp, pathname, (suffix, mode, type))
+        if r is not None:
+            self._types[r.__name__] = type
+        return r
+        
+        
+def main(argv):    
+    path = sys.path[:]
+    debug = 0
+    exclude = []
+    mf = mymf(path,debug,exclude)
+    mf.run_script(argv[0])
+    pprint.pprint({'depgraph':mf._depgraph,'types':mf._types})
+    
+if __name__=='__main__':
+    main(sys.argv[1:])
diff --git a/tools/readme b/tools/readme
new file mode 100644 (file)
index 0000000..bb82894
--- /dev/null
@@ -0,0 +1,5 @@
+initial version from
+ http://www.tarind.com/py2depgraph.py
+ http://www.tarind.com/depgraph2dot.py
+
+customized for trimming all non-project dependencies