Merge branch 'master' into senslab2
authorSandrine Avakian <sandrine.avakian@inria.fr>
Wed, 29 Feb 2012 12:33:11 +0000 (13:33 +0100)
committerSandrine Avakian <sandrine.avakian@inria.fr>
Wed, 29 Feb 2012 12:33:11 +0000 (13:33 +0100)
Conflicts:
sfa/server/sfa-start.py

79 files changed:
Makefile
config/default_config.xml
config/sfa-config-tty
cron.d/sfa.cron
init.d/sfa
setup.py
sfa.spec
sfa/client/sfaclientlib.py
sfa/client/sfi.py
sfa/clientbin/sfaadmin.py [new file with mode: 0755]
sfa/federica/__init__.py [new file with mode: 0644]
sfa/federica/fddriver.py [new file with mode: 0644]
sfa/federica/fdshell.py [new file with mode: 0644]
sfa/generic/__init__.py
sfa/generic/fd.py [new file with mode: 0644]
sfa/generic/max.py
sfa/generic/openstack.py [new file with mode: 0644]
sfa/generic/pl.py
sfa/importer/openstackimporter.py [new file with mode: 0644]
sfa/importer/plimporter.py [new file with mode: 0644]
sfa/importer/sfa-import-openstack.py [deleted file]
sfa/importer/sfa-import-plc.py [deleted file]
sfa/importer/sfa-import.py [new file with mode: 0755]
sfa/importer/sfa-nuke.py [moved from sfa/importer/sfa-nuke-plc.py with 65% similarity]
sfa/importer/sfaImport.py [deleted file]
sfa/importer/sfaimporter.py [new file with mode: 0644]
sfa/managers/aggregate_manager.py
sfa/managers/aggregate_manager_openstack.py [deleted file]
sfa/managers/registry_manager.py
sfa/managers/registry_manager_openstack.py
sfa/managers/slice_manager.py
sfa/methods/CreateSliver.py
sfa/methods/GetCredential.py
sfa/methods/GetSelfCredential.py
sfa/methods/List.py
sfa/methods/Resolve.py
sfa/openstack/euca_shell.py [new file with mode: 0644]
sfa/openstack/nova_driver.py [moved from sfa/openstack/openstack_driver.py with 62% similarity]
sfa/openstack/nova_shell.py [new file with mode: 0644]
sfa/openstack/openstack_shell.py [deleted file]
sfa/openstack/osaggregate.py [new file with mode: 0644]
sfa/plc/plaggregate.py
sfa/plc/pldriver.py
sfa/rspecs/elements/disk_image.py
sfa/rspecs/elements/element.py
sfa/rspecs/elements/sliver.py
sfa/rspecs/elements/versions/pgv2DiskImage.py [new file with mode: 0644]
sfa/rspecs/elements/versions/pgv2SliverType.py
sfa/rspecs/elements/versions/sfav1Node.py
sfa/rspecs/versions/federica.py [new file with mode: 0644]
sfa/rspecs/versions/sfav1.py
sfa/server/registry.py
sfa/server/sfa-ca.py
sfa/server/sfa-clean-peer-records.py [deleted file]
sfa/server/sfa-start.py
sfa/server/sfaapi.py
sfa/storage/PostgreSQL.py [deleted file]
sfa/storage/alchemy.py [new file with mode: 0644]
sfa/storage/dbschema.py [new file with mode: 0644]
sfa/storage/filter.py [deleted file]
sfa/storage/migrations/__init__.py [new file with mode: 0644]
sfa/storage/migrations/migrate.cfg [new file with mode: 0644]
sfa/storage/migrations/versions/001_slice_researchers.py [new file with mode: 0644]
sfa/storage/migrations/versions/__init__.py [new file with mode: 0644]
sfa/storage/model.py [new file with mode: 0644]
sfa/storage/record.py [deleted file]
sfa/storage/row.py [deleted file]
sfa/storage/sfa.sql [deleted file]
sfa/storage/table.py [deleted file]
sfa/trust/credential.py
sfa/trust/gid.py
sfa/trust/hierarchy.py
sfa/trust/rights.py
sfa/util/config.py
sfa/util/osxrn.py [new file with mode: 0644]
sfa/util/sfatablesRuntime.py
sfa/util/xrn.py
tests/testAll.py
tests/testStorage.py [moved from tests/testRecord.py with 58% similarity]

index a21ce8f..0fa80c8 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -37,7 +37,7 @@ python-install:
        python setup.py install --root=$(DESTDIR)       
        chmod 444 $(DESTDIR)/etc/sfa/default_config.xml
        rm -rf $(DESTDIR)/usr/lib*/python*/site-packages/*egg-info
-       rm -rf $(DESTDIR)/usr/lib*/python*/site-packages/sfa/storage/sfa.sql
+       rm -rf $(DESTDIR)/usr/lib*/python*/site-packages/sfa/storage/migrations
        (cd $(DESTDIR)/usr/bin ; ln -s sfi.py sfi; ln -s sfascan.py sfascan)
 
 python-clean: version-clean
@@ -135,7 +135,8 @@ RSYNC                       := rsync -a -v $(RSYNC_COND_DRY_RUN) --no-owner $(RSYNC_EXCLUDES)
 CLIENTS = $(shell ls sfa/clientbin/*.py)
 
 BINS = ./config/sfa-config-tty ./config/gen-sfa-cm-config.py \
-       ./sfa/importer/sfa-import-plc.py ./sfa/importer/sfa-nuke-plc.py ./sfa/server/sfa-start.py \
+       ./sfa/server/sfa-start.py \
+       ./sfa/importer/sfa-import.py ./sfa/importer/sfa-nuke.py \
        $(CLIENTS)
 
 synccheck: 
@@ -146,28 +147,30 @@ ifeq (,$(SSHURL))
        @exit 1
 endif
 
-sync: synccheck
-       +$(RSYNC) --relative ./sfa/ $(SSHURL)/usr/lib\*/python2.\*/site-packages/
-       +$(RSYNC) ./tests/ $(SSHURL)/root/tests-sfa
+
+synclib: synccheck
+       +$(RSYNC) --relative ./sfa/ --exclude migrations $(SSHURL)/usr/lib\*/python2.\*/site-packages/
+syncbin: synccheck
        +$(RSYNC)  $(BINS) $(SSHURL)/usr/bin/
+syncinit: synccheck
        +$(RSYNC) ./init.d/sfa  $(SSHURL)/etc/init.d/
+syncconfig:
        +$(RSYNC) ./config/default_config.xml $(SSHURL)/etc/sfa/
-       +$(RSYNC) ./sfa/storage/sfa.sql $(SSHURL)/usr/share/sfa/
+synctest: synccheck
+       +$(RSYNC) ./tests/ $(SSHURL)/root/tests-sfa
+syncrestart: synccheck
        $(SSHCOMMAND) exec service sfa restart
 
-# 99% of the time this is enough
-fastsync: synccheck
-       +$(RSYNC) --relative ./sfa/ $(SSHURL)/usr/lib\*/python2.\*/site-packages/
-       $(SSHCOMMAND) exec service sfa restart
+syncmig:
+       +$(RSYNC) ./sfa/storage/migrations $(SSHURL)/usr/share/sfa/
 
-clientsync: synccheck
-       +$(RSYNC)  $(BINS) $(SSHURL)/usr/bin/
 
-ricasync: synccheck
-       +$(RSYNC) --relative ./sfa/fd ./sfa/generic/fd.py ./sfa/rspecs/versions/federica.py $(SSHURL)/usr/lib\*/python2.\*/site-packages/
-       $(SSHCOMMAND) exec service sfa restart
+# full-fledged
+sync: synclib syncbin syncinit syncconfig syncrestart
+# 99% of the time this is enough
+syncfast: synclib syncrestart
 
-.PHONY: synccheck sync fastsync clientsync ricasync
+.PHONY: synccheck synclib syncbin syncconfig synctest syncrestart sync syncfast
 
 ##########
 CLIENTLIBFILES= \
index 6158905..b08f738 100644 (file)
@@ -223,6 +223,32 @@ Thierry Parmentelat
       </variablelist>
     </category>
 
+    <!-- ======================================== -->
+    <category id="sfa_flashpolicy">
+      <name>SFA Flash Policy</name>
+      <description>The settings that affect the flash policy server that will run
+      as part of this SFA instance.</description>
+
+      <variablelist>
+        <variable id="enabled" type="boolean">
+          <name>Enable Flash Policy Server</name>
+          <value>false</value>
+          <description>Allows this local SFA instance to run a
+          flash policy server.</description>
+        </variable>
+        <variable id="config_file" type="string">
+          <name>Flash policy config file</name>
+          <value>/etc/sfa/sfa_flashpolicy_config.xml</value>
+          <description>The path to where the flash policy config file can be reached.</description>
+        </variable>
+        <variable id="port" type="int">
+          <name>Flash policy port</name>
+          <value>843</value>
+          <description>The flash policy server port.</description>
+        </variable>
+      </variablelist>
+    </category>
+
     <!-- ======================================== -->
     <category id="sfa_plc">
       <name></name>
@@ -250,29 +276,41 @@ Thierry Parmentelat
       </variablelist>
     </category>
 
+    <!-- ======================================== -->
+    <category id="sfa_federica">
+      <name></name>
+      <description>The settings that tell this SFA instance how to interact with the FEDERICA testbed.</description>
+
+      <variablelist>
+       <variable id="url" type="string">
+         <name>XMLRPC URL</name>
+         <value>https://root:password@federica.sfa.wrapper.com:8443/fedewrapper/xmlrpc/</value>
+         <description>URL for the federica xmlrpc API; login and password need to be set like in http://login:password@hostname:port/the/path </description>
+       </variable>
+      </variablelist>
+    </category>
 
     <!-- ======================================== -->
-    <category id="sfa_flashpolicy">
+    <category id="sfa_nova">
       <name>SFA Flash Policy</name>
-      <description>The settings that affect the flash policy server that will run
-      as part of this SFA instance.</description>
-
+      <description>The settings that affect how SFA connects to 
+                   the Nova/EC2 API</description>
       <variablelist>
-        <variable id="enabled" type="boolean">
-          <name>Enable Flash Policy Server</name>
-          <value>false</value>
-          <description>Allows this local SFA instance to run a
-          flash policy server.</description>
+        <variable id="user" type="string">
+          <name>Sfa nova user</name>
+          <value>novaadmin</value>
+          <description>Account/context to use when performing 
+                       administrative nova operations</description>
         </variable>
-        <variable id="config_file" type="string">
-          <name>Flash policy config file</name>
-          <value>/etc/sfa/sfa_flashpolicy_config.xml</value>
-          <description>The path to where the flash policy config file can be reached.</description>
+        <variable id="api_url" type="string">
+          <name>Nova API url</name>
+          <value>127.0.0.1</value>
+          <description>The Nova/EC2 API url </description>
         </variable>
-        <variable id="port" type="int">
-          <name>Flash policy port</name>
-          <value>843</value>
-          <description>The flash policy server port.</description>
+        <variable id="api_port" type="int">
+          <name>Nova API Port</name>
+          <value>8773</value>
+          <description>The Nova/EC2 API port.</description>
         </variable>
       </variablelist>
     </category>
index 9fef0f9..4008cd1 100755 (executable)
@@ -13,15 +13,16 @@ def validator(validated_variables):
 #        raise plc_config.ConfigurationException(errStr)
 
 usual_variables = [
+    "SFA_GENERIC_FLAVOUR",
     "SFA_INTERFACE_HRN",
     "SFA_REGISTRY_ROOT_AUTH",
     "SFA_REGISTRY_HOST", 
     "SFA_AGGREGATE_HOST",
     "SFA_SM_HOST",
+    "SFA_DB_HOST",
     "SFA_PLC_URL",
     "SFA_PLC_USER",
     "SFA_PLC_PASSWORD",
-    "SFA_DB_HOST",
     ]
 
 configuration={ \
index 00d27b6..c541334 100644 (file)
@@ -9,6 +9,6 @@ HOME=/
 #
 # minute hour day-of-month month day-of-week user command
 # once or twice an hour makes sense
-0 * * * * root /usr/bin/sfa-import-plc.py         >> /var/log/sfa_import.log 2>&1
+0 * * * * root /usr/bin/sfa-import.py >> /var/log/sfa_import.log 2>&1
 # this is needed only if you run RefreshPeer
 #0 0 * * * root /usr/bin/sfa-clean-peer-records.py >> /var/log/sfa_import.log 2>&1
index 32cc1a0..7c51b48 100755 (executable)
@@ -59,6 +59,18 @@ function postgresql_check () {
     return 1
 }
 
+# use a single date of this script invocation for the dump_*_db functions.
+DATE=$(date +"%Y-%m-%d-%H-%M-%S")
+
+# Dumps the database - optional argument to specify filename suffix
+function dump_sfa_db() {
+    if [ -n "$1" ] ; then suffix="-$1" ; else suffix="" ; fi
+    mkdir -p /usr/share/sfa/backups
+    dumpfile=/usr/share/sfa/backups/$(date +"${SFA_DB_NAME}.${DATE}${suffix}.sql")
+    pg_dump -U $SFA_DB_USER $SFA_DB_NAME > $dumpfile
+    echo "Saved sfa database in $dumpfile"
+    check
+}
 
 # Regenerate configuration files - almost verbatim from plc.init
 function reload () {
@@ -217,9 +229,6 @@ function db_start () {
     if ! psql -U $SFA_DB_USER -c "" $SFA_DB_NAME >/dev/null 2>&1 ; then
        createdb -U postgres --template=template0 --encoding=UNICODE --owner=$SFA_DB_USER $SFA_DB_NAME
        check
-        # install db schema
-        psql -U $SFA_DB_USER -f /usr/share/sfa/sfa.sql $SFA_DB_NAME
-       check
     fi
     check
 
@@ -249,6 +258,7 @@ function start() {
     reload
 
     db_start
+    # migrations are now handled in the code by sfa.storage.dbschema
 
     # install peer certs
     action $"SFA installing peer certs" daemon /usr/bin/sfa-start.py -t -d $OPTIONS 
@@ -290,8 +300,11 @@ case "$1" in
        status sfa-start.py
        RETVAL=$?
        ;;
+    dbdump)
+       dump_sfa_db
+       ;;
     *)
-       echo $"Usage: $0 {start|stop|reload|restart|condrestart|status}"
+       echo $"Usage: $0 {start|stop|reload|restart|condrestart|status|dbdump}"
        exit 1
        ;;
 esac
index 41331de..97e0758 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -13,11 +13,10 @@ scripts = glob("sfa/clientbin/*.py") + \
     [ 
     'config/sfa-config-tty',
     'config/gen-sfa-cm-config.py',
-    'sfa/importer/sfa-import-plc.py', 
-    'sfa/importer/sfa-nuke-plc.py', 
+    'sfa/importer/sfa-import.py', 
+    'sfa/importer/sfa-nuke.py', 
     'sfa/server/sfa-ca.py', 
     'sfa/server/sfa-start.py', 
-    'sfa/server/sfa-clean-peer-records.py', 
     'sfa/server/sfa_component_setup.py', 
     'sfatables/sfatables',
     'keyconvert/keyconvert.py',
@@ -65,7 +64,8 @@ data_files = [ ('/etc/sfa/', [ 'config/aggregates.xml',
                ('/etc/sfatables/matches/', glob('sfatables/matches/*.xml')),
                ('/etc/sfatables/targets/', glob('sfatables/targets/*.xml')),
                ('/etc/init.d/', [ "init.d/%s"%x for x in initscripts ]),
-               ('/usr/share/sfa/', [ 'sfa/storage/sfa.sql' ] ),
+               ('/usr/share/sfa/migrations', glob('sfa/storage/migrations/*.*') ),
+               ('/usr/share/sfa/migrations/versions', glob('sfa/storage/migrations/versions/*') ),
                ('/usr/share/sfa/examples/', glob('sfa/examples/*' ) + [ 'cron.d/sfa.cron' ] ),
               ]
 
index 90f7677..c97cdf2 100644 (file)
--- a/sfa.spec
+++ b/sfa.spec
@@ -1,6 +1,6 @@
 %define name sfa
-%define version 2.0
-%define taglevel 9
+%define version 2.1
+%define taglevel 3
 
 %define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}}
 %global python_sitearch        %( python -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)" )
@@ -24,7 +24,10 @@ URL: %{SCMURL}
 Summary: the SFA python libraries
 Group: Applications/System
 BuildRequires: make
+
+Requires: myplc-config
 Requires: python >= 2.5
+Requires: pyOpenSSL >= 0.7
 Requires: m2crypto
 Requires: xmlsec1-openssl-devel
 Requires: libxslt-python
@@ -40,8 +43,10 @@ Requires: python-dateutil
 Requires: postgresql >= 8.2, postgresql-server >= 8.2
 Requires: postgresql-python
 Requires: python-psycopg2
-Requires: pyOpenSSL >= 0.7
-Requires: myplc-config
+# f8=0.4 - f12=0.5 f14=0.6 f16=0.7
+Requires: python-sqlalchemy
+Requires: python-migrate
+# the eucalyptus aggregate uses this module
 Requires: python-xmlbuilder
  
 # python 2.5 has uuid module added, for python 2.4 we still need it.
@@ -148,7 +153,7 @@ rm -rf $RPM_BUILD_ROOT
 %config /etc/sfa/default_config.xml
 %config (noreplace) /etc/sfa/aggregates.xml
 %config (noreplace) /etc/sfa/registries.xml
-/usr/share/sfa/sfa.sql
+/usr/share/sfa/migrations
 /usr/share/sfa/examples
 /var/www/html/wsdl/*.wsdl
 
@@ -161,9 +166,8 @@ rm -rf $RPM_BUILD_ROOT
 /etc/sfa/xml.xsd
 /etc/sfa/protogeni-rspec-common.xsd
 /etc/sfa/topology
-%{_bindir}/sfa-import-plc.py*
-%{_bindir}/sfa-nuke-plc.py*
-%{_bindir}/sfa-clean-peer-records.py*
+%{_bindir}/sfa-import.py*
+%{_bindir}/sfa-nuke.py*
 %{_bindir}/gen-sfa-cm-config.py*
 %{_bindir}/sfa-ca.py*
 
@@ -197,18 +201,18 @@ rm -rf $RPM_BUILD_ROOT
 %files tests
 %{_datadir}/sfa/tests
 
-### sfa-plc installs the 'sfa' service
-%post plc
+### sfa installs the 'sfa' service
+%post 
 chkconfig --add sfa
 
-%preun plc
+%preun 
 if [ "$1" = 0 ] ; then
   /sbin/service sfa stop || :
   /sbin/chkconfig --del sfa || :
 fi
 
-%postun plc
-[ "$1" -ge "1" ] && service sfa restart
+%postun
+[ "$1" -ge "1" ] && { service sfa dbdump ; service sfa restart ; }
 
 ### sfa-cm installs the 'sfa-cm' service
 %post cm
@@ -224,6 +228,41 @@ fi
 [ "$1" -ge "1" ] && service sfa-cm restart || :
 
 %changelog
+* Fri Feb 24 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-3
+- slice x researcher rel. in database,
+- plimporter to maintain that, as well as user.email, and more robust
+- ongoing draft for sfaadmin tool
+- support for a federica driver
+- support for a nova/euca driver
+- no more sfa-clean-peer-records script
+
+* Wed Feb 08 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-2
+- registry database has user's keys and mail (known as v0 for migrate)
+- pl importer properly maintains user's keys and mail
+- pl driver now to handle 'role' when adding person record (exp.)
+- first draft of federica driver with config section
+- SFA_GENERIC_FLAVOUR in usual variables for sfa-config-tty
+- plus, from master as of tag merged-in-sfa-2.1-2:
+- disk_image revisited
+- new nova_shell nova_driver & various tweaks for openstack
+
+* Fri Jan 27 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-1
+- uses sqlalchemy and related migrate
+- thorough migration and upgrade scheme
+- sfa-import.py and sfa-nuke.py (no more -plc), uses FLAVOUR
+- trashed dbinfo stuff in auth hierarchy
+- data model still has little more than plain records
+- checkpoint tag, not yet intended for release
+
+* Wed Jan 25 2012 Tony Mack <tmack@cs.princeton.edu> - sfa-2.0-10
+- client: added -R --raw sfi cmdline option that displays raw server response.
+- client: request GENI RSpec by default. 
+- server: remove database dependencies from sfa.server.sfaapi.
+- server: increased default credential lifetime to 31 days.
+- bugfix: fixed bug in sfa.storage.record.SfaRecord.delete().
+- bugfix: fixed server key path in sfa.server.sfa-clean-peer-records.
+- bugfix: fixed bug in sfa.server.sfa-start.install_peer_certs(). 
 * Sat Jan 7 2012 Tony Mack <tmack@cs.princeton.edu> - sfa-2.0-9
 - bugfix: 'geni_api' should be in the top level struct, not the code struct
 - bugfix: Display the correct host and port in 'geni_api_versions' field of the GetVersion
index 3f6f6bc..f4b9a7e 100644 (file)
@@ -238,7 +238,7 @@ class SfaClientBootstrap:
     def authority_credential_filename (self, hrn): 
         return self.credential_filename(hrn,'authority')
     def my_gid_filename (self):
-        return self.gid_filename ("user", self.hrn)
+        return self.gid_filename (self.hrn, "user")
     def gid_filename (self, hrn, type): 
         return self.fullpath ("%s.%s.gid"%(hrn,type))
     
index ca54c93..c2dc9ba 100644 (file)
@@ -1,8 +1,8 @@
-# 
+#
 # sfi.py - basic SFA command-line client
 # the actual binary in sfa/clientbin essentially runs main()
 # this module is used in sfascan
-# 
+#
 
 import sys
 sys.path.append('.')
@@ -12,6 +12,7 @@ import socket
 import datetime
 import codecs
 import pickle
+import json
 from lxml import etree
 from StringIO import StringIO
 from optparse import OptionParser
@@ -28,7 +29,8 @@ from sfa.util.config import Config
 from sfa.util.version import version_core
 from sfa.util.cache import Cache
 
-from sfa.storage.record import SfaRecord, UserRecord, SliceRecord, NodeRecord, AuthorityRecord
+from sfa.storage.model import RegRecord, RegAuthority, RegUser, RegSlice, RegNode
+from sfa.storage.model import make_record
 
 from sfa.rspecs.rspec import RSpec
 from sfa.rspecs.rspec_converter import RSpecConverter
@@ -87,16 +89,28 @@ def filter_records(type, records):
 
 
 # save methods
-def save_variable_to_file(var, filename, format="text"):
-    f = open(filename, "w")
+def save_raw_to_file(var, filename, format="text", banner=None):
+    if filename == "-":
+        # if filename is "-", send it to stdout
+        f = sys.stdout
+    else:
+        f = open(filename, "w")
+    if banner:
+        f.write(banner+"\n")
     if format == "text":
         f.write(str(var))
     elif format == "pickled":
         f.write(pickle.dumps(var))
+    elif format == "json":
+        if hasattr(json, "dumps"):
+            f.write(json.dumps(var))   # python 2.6
+        else:
+            f.write(json.write(var))   # python 2.5
     else:
         # this should never happen
         print "unknown output format", format
-
+    if banner:
+        f.write('\n'+banner+"\n")
 
 def save_rspec_to_file(rspec, filename):
     if not filename.endswith(".rspec"):
@@ -106,44 +120,35 @@ def save_rspec_to_file(rspec, filename):
     f.close()
     return
 
-def save_records_to_file(filename, recordList, format="xml"):
+def save_records_to_file(filename, record_dicts, format="xml"):
     if format == "xml":
         index = 0
-        for record in recordList:
+        for record_dict in record_dicts:
             if index > 0:
-                save_record_to_file(filename + "." + str(index), record)
+                save_record_to_file(filename + "." + str(index), record_dict)
             else:
-                save_record_to_file(filename, record)
+                save_record_to_file(filename, record_dict)
             index = index + 1
     elif format == "xmllist":
         f = open(filename, "w")
         f.write("<recordlist>\n")
-        for record in recordList:
-            record = SfaRecord(dict=record)
-            f.write('<record hrn="' + record.get_name() + '" type="' + record.get_type() + '" />\n')
+        for record_dict in record_dicts:
+            record_obj=make_record (dict=record_dict)
+            f.write('<record hrn="' + record_obj.hrn + '" type="' + record_obj.type + '" />\n')
         f.write("</recordlist>\n")
         f.close()
     elif format == "hrnlist":
         f = open(filename, "w")
-        for record in recordList:
-            record = SfaRecord(dict=record)
-            f.write(record.get_name() + "\n")
+        for record_dict in record_dicts:
+            record_obj=make_record (dict=record_dict)
+            f.write(record_obj.hrn + "\n")
         f.close()
     else:
         # this should never happen
         print "unknown output format", format
 
-def save_record_to_file(filename, record):
-    if record['type'] in ['user']:
-        record = UserRecord(dict=record)
-    elif record['type'] in ['slice']:
-        record = SliceRecord(dict=record)
-    elif record['type'] in ['node']:
-        record = NodeRecord(dict=record)
-    elif record['type'] in ['authority', 'ma', 'sa']:
-        record = AuthorityRecord(dict=record)
-    else:
-        record = SfaRecord(dict=record)
+def save_record_to_file(filename, record_dict):
+    rec_record = make_record (dict=record_dict)
     str = record.save_to_string()
     f=codecs.open(filename, encoding='utf-8',mode="w")
     f.write(str)
@@ -154,10 +159,9 @@ def save_record_to_file(filename, record):
 # load methods
 def load_record_from_file(filename):
     f=codecs.open(filename, encoding="utf-8", mode="r")
-    str = f.read()
+    xml_string = f.read()
     f.close()
-    record = SfaRecord(string=str)
-    return record
+    return make_record (xml=xml_string)
 
 
 import uuid
@@ -298,13 +302,6 @@ class Sfi:
                              help="output file format ([xml]|xmllist|hrnlist)", default="xml",
                              choices=("xml", "xmllist", "hrnlist"))
 
-        if command in ("status", "version"):
-           parser.add_option("-o", "--output", dest="file",
-                            help="output dictionary to file", metavar="FILE", default=None)
-           parser.add_option("-F", "--fileformat", dest="fileformat", type="choice",
-                             help="output file format ([text]|pickled)", default="text",
-                             choices=("text","pickled"))
-
         if command in ("delegate"):
            parser.add_option("-u", "--user",
                             action="store_true", dest="delegate_user", default=False,
@@ -332,6 +329,13 @@ class Sfi:
                          help="root registry", metavar="URL", default=None)
         parser.add_option("-s", "--sliceapi", dest="sm", default=None, metavar="URL",
                          help="slice API - in general a SM URL, but can be used to talk to an aggregate")
+        parser.add_option("-R", "--raw", dest="raw", default=None,
+                          help="Save raw, unparsed server response to a file")
+        parser.add_option("", "--rawformat", dest="rawformat", type="choice",
+                          help="raw file format ([text]|pickled|json)", default="text",
+                          choices=("text","pickled","json"))
+        parser.add_option("", "--rawbanner", dest="rawbanner", default=None,
+                          help="text string to write before and after raw output")
         parser.add_option("-d", "--dir", dest="sfi_dir",
                          help="config & working directory - default is %default",
                          metavar="PATH", default=Sfi.default_sfi_dir())
@@ -651,17 +655,17 @@ class Sfi:
        else:
           self.logger.critical("No such registry record file %s"%record)
           sys.exit(1)
-    
+
 
     #==========================================================================
     # Following functions implement the commands
     #
     # Registry-related commands
     #==========================================================================
-  
+
     def version(self, options, args):
         """
-        display an SFA server version (GetVersion) 
+        display an SFA server version (GetVersion)
 or version information about sfi itself
         """
         if options.version_local:
@@ -673,10 +677,11 @@ or version information about sfi itself
                 server = self.sliceapi()
             result = server.GetVersion()
             version = ReturnValue.get_value(result)
-        pprinter = PrettyPrinter(indent=4)
-        pprinter.pprint(version)
-        if options.file:
-            save_variable_to_file(version, options.file, options.fileformat)
+        if self.options.raw:
+            save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
+        else:
+            pprinter = PrettyPrinter(indent=4)
+            pprinter.pprint(version)
 
     def list(self, options, args):
         """
@@ -708,27 +713,16 @@ or version information about sfi itself
             self.print_help()
             sys.exit(1)
         hrn = args[0]
-        records = self.registry().Resolve(hrn, self.my_credential_string)
-        records = filter_records(options.type, records)
-        if not records:
+        record_dicts = self.registry().Resolve(hrn, self.my_credential_string)
+        record_dicts = filter_records(options.type, record_dicts)
+        if not record_dicts:
             self.logger.error("No record of type %s"% options.type)
+        records = [ make_record (dict=record_dict) for record_dict in record_dicts ]
         for record in records:
-            if record['type'] in ['user']:
-                record = UserRecord(dict=record)
-            elif record['type'] in ['slice']:
-                record = SliceRecord(dict=record)
-            elif record['type'] in ['node']:
-                record = NodeRecord(dict=record)
-            elif record['type'].startswith('authority'):
-                record = AuthorityRecord(dict=record)
-            else:
-                record = SfaRecord(dict=record)
-            if (options.format == "text"): 
-                record.dump()  
-            else:
-                print record.save_to_string() 
+            if (options.format == "text"):      record.dump()  
+            else:                               print record.save_as_xml() 
         if options.file:
-            save_records_to_file(options.file, records, options.fileformat)
+            save_records_to_file(options.file, record_dicts, options.fileformat)
         return
     
     def add(self, options, args):
@@ -739,7 +733,7 @@ or version information about sfi itself
             sys.exit(1)
         record_filepath = args[0]
         rec_file = self.get_record_file(record_filepath)
-        record = load_record_from_file(rec_file).as_dict()
+        record = load_record_from_file(rec_file).todict()
         return self.registry().Register(record, auth_cred)
     
     def update(self, options, args):
@@ -749,14 +743,14 @@ or version information about sfi itself
             sys.exit(1)
         rec_file = self.get_record_file(args[0])
         record = load_record_from_file(rec_file)
-        if record['type'] == "user":
-            if record.get_name() == self.user:
+        if record.type == "user":
+            if record.hrn == self.user:
                 cred = self.my_credential_string
             else:
                 cred = self.my_authority_credential_string()
-        elif record['type'] in ["slice"]:
+        elif record.type in ["slice"]:
             try:
-                cred = self.slice_credential_string(record.get_name())
+                cred = self.slice_credential_string(record.hrn)
             except ServerException, e:
                # XXX smbaker -- once we have better error return codes, update this
                # to do something better than a string compare
@@ -764,14 +758,14 @@ or version information about sfi itself
                    cred = self.my_authority_credential_string()
                else:
                    raise
-        elif record.get_type() in ["authority"]:
+        elif record.type in ["authority"]:
             cred = self.my_authority_credential_string()
-        elif record.get_type() == 'node':
+        elif record.type == 'node':
             cred = self.my_authority_credential_string()
         else:
-            raise "unknown record type" + record.get_type()
-        record = record.as_dict()
-        return self.registry().Update(record, cred)
+            raise "unknown record type" + record.type
+        record_dict = record.todict()
+        return self.registry().Update(record_dict, cred)
   
     def remove(self, options, args):
         "remove registry record by name (Remove)"
@@ -802,9 +796,12 @@ or version information about sfi itself
        api_options['call_id']=unique_call_id()
         result = server.ListSlices(creds, *self.ois(server,api_options))
         value = ReturnValue.get_value(result)
-        display_list(value)
+        if self.options.raw:
+            save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
+        else:
+            display_list(value)
         return
-    
+
     # show rspec for named slice
     def resources(self, options, args):
         """
@@ -821,9 +818,9 @@ or with an slice hrn, shows currently provisioned resources
             creds.append(self.my_credential_string)
         if options.delegate:
             creds.append(self.delegate_cred(cred, get_authority(self.authority)))
-       
+
         # no need to check if server accepts the options argument since the options has
-        # been a required argument since v1 API   
+        # been a required argument since v1 API
         api_options = {}
         # always send call_id to v2 servers
         api_options ['call_id'] = unique_call_id()
@@ -846,15 +843,18 @@ or with an slice hrn, shows currently provisioned resources
                 # just request the version the client wants
                 api_options['geni_rspec_version'] = version_manager.get_version(options.rspec_version).to_dict()
             else:
-                api_options['geni_rspec_version'] = {'type': 'geni', 'version': '3.0'}    
+                api_options['geni_rspec_version'] = {'type': 'geni', 'version': '3.0'}
         else:
-            api_options['geni_rspec_version'] = {'type': 'geni', 'version': '3.0'}    
+            api_options['geni_rspec_version'] = {'type': 'geni', 'version': '3.0'}
         result = server.ListResources (creds, api_options)
         value = ReturnValue.get_value(result)
-        if options.file is None:
-            display_rspec(value, options.format)
-        else:
+        if self.options.raw:
+            save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
+        if options.file is not None:
             save_rspec_to_file(value, options.file)
+        if (self.options.raw is None) and (options.file is None):
+            display_rspec(value, options.format)
+
         return
 
     def create(self, options, args):
@@ -880,8 +880,8 @@ or with an slice hrn, shows currently provisioned resources
             #    delegated_cred = self.delegate_cred(slice_cred, server_version['hrn'])
             #elif server_version.get('urn'):
             #    delegated_cred = self.delegate_cred(slice_cred, urn_to_hrn(server_version['urn']))
-                
-        # rspec 
+
+        # rspec
         rspec_file = self.get_rspec_file(args[1])
         rspec = open(rspec_file).read()
 
@@ -907,8 +907,8 @@ or with an slice hrn, shows currently provisioned resources
             else:
                 print >>sys.stderr, "\r\n \r\n \r\n WOOOOOO"
                 users = sfa_users_arg(user_records, slice_record)
-        
-        # do not append users, keys, or slice tags. Anything 
+
+        # do not append users, keys, or slice tags. Anything
         # not contained in this request will be removed from the slice
 
         # CreateSliver has supported the options argument for a while now so it should
@@ -919,10 +919,13 @@ or with an slice hrn, shows currently provisioned resources
 
         result = server.CreateSliver(slice_urn, creds, rspec, users, *self.ois(server, api_options))
         value = ReturnValue.get_value(result)
-        if options.file is None:
-            print value
-        else:
+        if self.options.raw:
+            save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
+        if options.file is not None:
             save_rspec_to_file (value, options.file)
+        if (self.options.raw is None) and (options.file is None):
+            print value
+
         return value
 
     def delete(self, options, args):
@@ -946,8 +949,12 @@ or with an slice hrn, shows currently provisioned resources
         api_options = {}
         api_options ['call_id'] = unique_call_id()
         result = server.DeleteSliver(slice_urn, creds, *self.ois(server, api_options ) )
-        # xxx no ReturnValue ??
-        return result
+        value = ReturnValue.get_value(result)
+        if self.options.raw:
+            save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
+        else:
+            print value
+        return value 
   
     def status(self, options, args):
         """
@@ -968,12 +975,13 @@ or with an slice hrn, shows currently provisioned resources
 
         # options and call_id when supported
         api_options = {}
-       api_options['call_id']=unique_call_id()
+        api_options['call_id']=unique_call_id()
         result = server.SliverStatus(slice_urn, creds, *self.ois(server,api_options))
         value = ReturnValue.get_value(result)
-        print value
-        if options.file:
-            save_variable_to_file(value, options.file, options.fileformat)
+        if self.options.raw:
+            save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
+        else:
+            print value
 
     def start(self, options, args):
         """
@@ -992,7 +1000,13 @@ or with an slice hrn, shows currently provisioned resources
             delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
             creds.append(delegated_cred)
         # xxx Thierry - does this not need an api_options as well ?
-        return server.Start(slice_urn, creds)
+        result = server.Start(slice_urn, creds)
+        value = ReturnValue.get_value(result)
+        if self.options.raw:
+            save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
+        else:
+            print value
+        return value
     
     def stop(self, options, args):
         """
@@ -1008,7 +1022,13 @@ or with an slice hrn, shows currently provisioned resources
         if options.delegate:
             delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
             creds.append(delegated_cred)
-        return server.Stop(slice_urn, creds)
+        result =  server.Stop(slice_urn, creds)
+        value = ReturnValue.get_value(result)
+        if self.options.raw:
+            save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
+        else:
+            print value
+        return value
     
     # reset named slice
     def reset(self, options, args):
@@ -1025,7 +1045,13 @@ or with an slice hrn, shows currently provisioned resources
         if options.delegate:
             delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
             creds.append(delegated_cred)
-        return server.reset_slice(creds, slice_urn)
+        result = server.reset_slice(creds, slice_urn)
+        value = ReturnValue.get_value(result)
+        if self.options.raw:
+            save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
+        else:
+            print value
+        return value
 
     def renew(self, options, args):
         """
@@ -1048,6 +1074,10 @@ or with an slice hrn, shows currently provisioned resources
        api_options['call_id']=unique_call_id()
         result =  server.RenewSliver(slice_urn, creds, time, *self.ois(server,api_options))
         value = ReturnValue.get_value(result)
+        if self.options.raw:
+            save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
+        else:
+            print value
         return value
 
 
@@ -1065,7 +1095,13 @@ or with an slice hrn, shows currently provisioned resources
         if options.delegate:
             delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
             creds.append(delegated_cred)
-        return server.Shutdown(slice_urn, creds)         
+        result = server.Shutdown(slice_urn, creds)
+        value = ReturnValue.get_value(result)
+        if self.options.raw:
+            save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
+        else:
+            print value
+        return value         
     
 
     def get_ticket(self, options, args):
diff --git a/sfa/clientbin/sfaadmin.py b/sfa/clientbin/sfaadmin.py
new file mode 100755 (executable)
index 0000000..1531886
--- /dev/null
@@ -0,0 +1,214 @@
+#!/usr/bin/python
+import sys
+import copy
+from pprint import pformat 
+from sfa.generic import Generic
+from optparse import OptionParser
+from pprint import PrettyPrinter
+from sfa.util.xrn import Xrn
+from sfa.storage.record import SfaRecord 
+from sfa.client.sfi import save_records_to_file
+pprinter = PrettyPrinter(indent=4)
+
+
+def args(*args, **kwargs):
+    def _decorator(func):
+        func.__dict__.setdefault('options', []).insert(0, (args, kwargs))
+        return func
+    return _decorator
+
+class Commands(object):
+
+    def _get_commands(self):
+        available_methods = []
+        for attrib in dir(self):
+            if callable(getattr(self, attrib)) and not attrib.startswith('_'):
+                available_methods.append(attrib)
+        return available_methods         
+
+class RegistryCommands(Commands):
+
+    def __init__(self, *args, **kwds):
+        self.api= Generic.the_flavour().make_api(interface='registry')
+    def version(self):
+        version = self.api.manager.GetVersion(self.api, {})
+        pprinter.pprint(version)
+
+    @args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn') 
+    @args('-t', '--type', dest='type', metavar='<type>', help='object type', default=None) 
+    def list(self, xrn, type=None):
+        xrn = Xrn(xrn, type) 
+        records = self.api.manager.List(self.api, xrn.get_hrn())
+        for record in records:
+            if not type or record['type'] == type:
+                print "%s (%s)" % (record['hrn'], record['type'])
+
+
+    @args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn') 
+    @args('-t', '--type', dest='type', metavar='<type>', help='object type', default=None) 
+    @args('-o', '--outfile', dest='outfile', metavar='<outfile>', help='save record to file') 
+    @args('-f', '--format', dest='format', metavar='<display>', type='choice', 
+          choices=('text', 'xml', 'simple'), help='display record in different formats') 
+    def show(self, xrn, type=None, format=None, outfile=None):
+        records = self.api.manager.Resolve(self.api, xrn, type, True)
+        for record in records:
+            sfa_record = SfaRecord(dict=record)
+            sfa_record.dump(format) 
+        if outfile:
+            save_records_to_file(outfile, records)                
+
+    def register(self, record):
+        pass
+
+    def update(self, record):
+        pass
+        
+    @args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn') 
+    @args('-t', '--type', dest='type', metavar='<type>', help='object type', default=None) 
+    def remove(self, xrn, type=None):
+        xrn = Xrn(xrn, type)
+        self.api.manager.Remove(self.api, xrn)            
+
+
+    @args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn') 
+    @args('-t', '--type', dest='type', metavar='<type>', help='object type', default=None) 
+    def credential(self, xrn, type=None):
+        cred = self.api.manager.GetCredential(self.api, xrn, type, self.api.hrn)
+        print cred
+    
+    
+class CerficiateCommands(Commands):
+    
+    def import_records(self, xrn):
+        pass
+
+    def export(self, xrn):
+        pass
+
+    def display(self, xrn):
+        pass
+
+    def nuke(self):
+        pass  
+
+class AggregateCommands(Commands):
+
+    def __init__(self, *args, **kwds):
+        self.api= Generic.the_flavour().make_api(interface='aggregate')
+   
+    def version(self):
+        version = self.api.manager.GetVersion(self.api, {})
+        pprinter.pprint(version)
+
+    def slices(self):
+        print self.api.manager.ListSlices(self.api, [], {})
+
+    @args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn') 
+    def status(self, xrn):
+        urn = Xrn(xrn, 'slice').get_urn()
+        status = self.api.manager.SliverStatus(self.api, urn, [], {})
+        pprinter.pprint(status)
+    @args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn', default=None)
+    @args('-r', '--rspec-version', dest='rspec_version', metavar='<rspec_version>', 
+          default='GENI', help='version/format of the resulting rspec response')  
+    def resources(self, xrn=None, rspec_version='GENI'):
+        options = {'geni_rspec_version': rspec_version}
+        if xrn:
+            options['geni_slice_urn'] = xrn
+        resources = self.api.manager.ListResources(self.api, [], options)
+        pprinter.pprint(resources)
+        
+    def create(self, xrn, rspec):
+        pass
+
+    def delete(self, xrn):
+        pass 
+    
+    def start(self, xrn):
+        pass
+
+    def stop(self, xrn):
+        pass      
+
+    def reset(self, xrn):
+        pass
+
+    def ticket(self):
+        pass
+
+
+class SliceManagerCommands(AggregateCommands):
+    
+    def __init__(self, *args, **kwds):
+        self.api= Generic().make_api(interface='slicemgr')
+
+
+CATEGORIES = {'registry': RegistryCommands,
+              'aggregate': AggregateCommands,
+              'slicemgr': SliceManagerCommands}
+
+def main():
+    argv = copy.deepcopy(sys.argv)
+    script_name = argv.pop(0)
+    if len(argv) < 1:
+        print script_name + " category action [<args>]"
+        print "Available categories:"
+        for k in CATEGORIES:
+            print "\t%s" % k
+        sys.exit(2)
+
+    category = argv.pop(0)
+    usage = "%%prog %s action <args> [options]" % (category)
+    parser = OptionParser(usage=usage)
+    command_class =  CATEGORIES[category]
+    command_instance = command_class()
+    actions = command_instance._get_commands()
+    if len(argv) < 1:
+        if hasattr(command_instance, '__call__'):
+            action = ''
+            command = command_instance.__call__
+        else:
+            print script_name + " category action [<args>]"
+            print "Available actions for %s category:" % category
+            for k in actions:
+                print "\t%s" % k 
+            sys.exit(2)
+    else:
+        action = argv.pop(0)
+        command = getattr(command_instance, action)
+
+    options = getattr(command, 'options', [])
+    usage = "%%prog %s %s <args> [options]" % (category, action)
+    parser = OptionParser(usage=usage)
+    for arg, kwd in options:
+        parser.add_option(*arg, **kwd)
+    (opts, cmd_args) = parser.parse_args(argv)
+    cmd_kwds = vars(opts)
+
+    # dont overrride meth
+    for k, v in cmd_kwds.items():
+        if v is None:
+            del cmd_kwds[k]
+
+    try:
+        command(*cmd_args, **cmd_kwds)
+        sys.exit(0)
+    except TypeError:
+        print "Possible wrong number of arguments supplied"
+        print command.__doc__
+        parser.print_help()
+        #raise
+        raise
+    except Exception:
+        print "Command failed, please check log for more info"
+        raise
+
+
+if __name__ == '__main__':
+    main()
+    
+     
+        
+     
diff --git a/sfa/federica/__init__.py b/sfa/federica/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sfa/federica/fddriver.py b/sfa/federica/fddriver.py
new file mode 100644 (file)
index 0000000..1e16d7f
--- /dev/null
@@ -0,0 +1,112 @@
+from sfa.util.sfalogging import logger
+from sfa.util.faults import SfaFault
+
+# this is probably too big to swallow but for a starting point..
+from sfa.plc.pldriver import PlDriver
+
+from sfa.federica.fdshell import FdShell
+
+# hardwired for now
+# this could/should be obtained by issuing getRSpecVersion
+federica_version_string="RSpecV2"
+
+#### avail. methods on the federica side as of 2012/02/13
+# listAvailableResources(String credentials, String rspecVersion) 
+# listSliceResources(String credentials, String rspecVersion, String sliceUrn) 
+# createSlice(String credentials, String sliceUrn, String rspecVersion, String rspecString) 
+# deleteSlice(String credentials, String sliceUrn) 
+# listSlices() 
+# getRSpecVersion()
+##### all return
+# Result: {'code': 0, 'value': RSpec} if success
+#      {'code': code_id, 'output': Error message} if error
+
+class FdDriver (PlDriver):
+
+    def __init__ (self,config): 
+        PlDriver.__init__ (self, config)
+        self.shell=FdShell(config)
+
+    # the agreement with the federica driver is for them to expose results in a way
+    # compliant with the avpi v2 return code, i.e. a dict with 'code' 'value' 'output'
+    # essentially, either 'code'==0, then 'value' is set to the actual result
+    # otherwise, 'code' is set to an error code and 'output' holds an error message
+    def response (self, from_xmlrpc):
+        if isinstance (from_xmlrpc, dict) and 'code' in from_xmlrpc:
+            if from_xmlrpc['code']==0:
+                return from_xmlrpc['value']
+            else:
+                raise SfaFault(from_xmlrpc['code'],from_xmlrpc['output'])
+        else:
+            logger.warning("unexpected result from federica xmlrpc api")
+            return from_xmlrpc
+
+    def aggregate_version (self):
+        result={}
+        federica_version_string_api = self.response(self.shell.getRSpecVersion())
+        result ['federica_version_string_api']=federica_version_string_api
+        if federica_version_string_api != federica_version_string:
+            result['WARNING']="hard-wired rspec version %d differs from what the API currently exposes"%\
+                        federica_version_string
+        return result
+
+    def testbed_name (self):
+        return "federica"
+
+    def list_slices (self, creds, options):
+        return self.response(self.shell.listSlices())
+
+    def sliver_status (self, slice_urn, slice_hrn):
+        return "fddriver.sliver_status: undefined/todo for slice %s"%slice_hrn
+
+    def list_resources (self, slice_urn, slice_hrn, creds, options):
+        # right now rspec_version is ignored on the federica side
+        # we normally derive it from options
+        # look in cache if client has requested so
+        cached_requested = options.get('cached', True) 
+        # global advertisement
+        if not slice_hrn:
+            # self.cache is initialized unless the global config has it turned off
+            if cached_requested and self.cache:
+                # using federica_version_string as the key into the cache
+                rspec = self.cache.get(federica_version_string)
+                if rspec:
+                    logger.debug("FdDriver.ListResources: returning cached advertisement")
+                    return self.response(rspec)
+            # otherwise, need to get it
+                # java code expects creds as a String
+#            rspec = self.shell.listAvailableResources (creds, federica_version_string)
+            rspec = self.shell.listAvailableResources ("", federica_version_string)
+#            rspec = self.shell.listAvailableResources (federica_version_string)
+            # cache it for future use
+            if self.cache:
+                logger.debug("FdDriver.ListResources: stores advertisement in cache")
+                self.cache.add(federica_version_string, rspec)
+            return self.response(rspec)
+        # about a given slice : don't cache
+        else:
+                # java code expects creds as a String
+#            return self.response(self.shell.listSliceResources(creds, federica_version_string, slice_urn))
+            return self.response(self.shell.listSliceResources("", federica_version_string, slice_urn))
+
+    def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
+        # right now version_string is ignored on the federica side
+        # we normally derive it from options
+                # java code expects creds as a String
+#        return self.response(self.shell.createSlice(creds, slice_urn, federica_version_string, rspec_string))
+        return self.response(self.shell.createSlice("", slice_urn, federica_version_string, rspec_string))
+
+    def delete_sliver (self, slice_urn, slice_hrn, creds, options):
+        # right now version_string is ignored on the federica side
+        # we normally derive it from options
+        # xxx not sure if that's currentl supported at all
+                # java code expects creds as a String
+#        return self.response(self.shell.deleteSlice(creds, slice_urn))
+        return self.response(self.shell.deleteSlice("", slice_urn))
+
+    # for the the following methods we use what is provided by the default driver class
+    #def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
+    #def start_slice (self, slice_urn, slice_xrn, creds):
+    #def stop_slice (self, slice_urn, slice_xrn, creds):
+    #def reset_slice (self, slice_urn, slice_xrn, creds):
+    #def get_ticket (self, slice_urn, slice_xrn, creds, rspec, options):
diff --git a/sfa/federica/fdshell.py b/sfa/federica/fdshell.py
new file mode 100644 (file)
index 0000000..42ec030
--- /dev/null
@@ -0,0 +1,41 @@
+import xmlrpclib
+
+from sfa.util.sfalogging import logger
+
+class FdShell:
+    """
+    A simple xmlrpc shell to a federica API server
+    This class can receive the XMLRPC calls to the federica testbed
+    For safety this is limited to a set of hard-coded calls
+    """
+    
+    direct_calls = [ 'listAvailableResources',
+                     'listSliceResources',
+                     'createSlice',
+                     'deleteSlice',
+                     'getRSpecVersion',
+                     'listSlices',
+                    ]
+
+    def __init__ ( self, config ) :
+        url=config.SFA_FEDERICA_URL
+        # xxx not sure if java xmlrpc has support for None
+        # self.proxy = xmlrpclib.Server(url, verbose = False, allow_none = True)
+        # xxx turn on verbosity
+        self.proxy = xmlrpclib.Server(url, verbose = True)
+
+    # xxx get credentials from the config ?
+    # right now basic auth data goes into the URL
+    # so do *not* add any credential at that point
+    def __getattr__(self, name):
+        def func(*args, **kwds):
+            if name not in FdShell.direct_calls:
+                raise Exception, "Illegal method call %s for FEDERICA driver"%(name)
+            logger.info("Issuing %s args=%s kwds=%s to federica"%\
+                            (name,args,kwds))
+#            result=getattr(self.proxy, "AggregateManager.%s"%name)(credential, *args, **kwds)
+            result=getattr(self.proxy, "AggregateManager.%s"%name)(*args, **kwds)
+            logger.debug('FdShell %s (%s) returned ... '%(name,name))
+            return result
+        return func
+
index de1a9e1..99d15bc 100644 (file)
@@ -42,6 +42,10 @@ class Generic:
         except:
             logger.log_exc("Cannot locate generic instance with flavour=%s"%flavour)
 
+    # provide default for importer_class
+    def importer_class (self): 
+        return None
+
     # in the simplest case these can be redefined to the class/module objects to be used
     # see pl.py for an example
     # some descendant of SfaApi
@@ -111,4 +115,3 @@ class Generic:
         except:
             logger.log_exc_critical(message)
         
-        
diff --git a/sfa/generic/fd.py b/sfa/generic/fd.py
new file mode 100644 (file)
index 0000000..2ba52fd
--- /dev/null
@@ -0,0 +1,11 @@
+# 
+from sfa.generic.pl import pl
+
+import sfa.federica.fddriver
+
+class fd (pl):
+
+# the max flavour behaves like pl, except for 
+# the aggregate
+    def driver_class (self) :
+        return sfa.federica.fddriver.FdDriver
index 8920ae7..d54afc5 100644 (file)
@@ -3,13 +3,12 @@
 # 
 from sfa.generic.pl import pl
 
-import sfa.managers.aggregate_manager_max
-
 class max (pl):
 
 # the max flavour behaves like pl, except for 
 # the aggregate
     def aggregate_manager_class (self) :
+        import sfa.managers.aggregate_manager_max
         return sfa.managers.aggregate_manager_max.AggregateManagerMax
 
 # I believe the component stuff is not implemented
diff --git a/sfa/generic/openstack.py b/sfa/generic/openstack.py
new file mode 100644 (file)
index 0000000..bac57dc
--- /dev/null
@@ -0,0 +1,30 @@
+from sfa.generic import Generic
+
+import sfa.server.sfaapi
+import sfa.openstack.nova_driver
+import sfa.managers.registry_manager_openstack
+import sfa.managers.aggregate_manager
+import sfa.managers.slice_manager
+
+# use pl as a model so we only redefine what's different
+from sfa.generic.pl import pl
+
+class openstack (pl):
+    
+    # the importer class
+    def importer_class (self): 
+        import sfa.importer.openstackimporter
+        return sfa.importer.openstackimporter.OpenstackImporter
+        
+    # the manager classes for the server-side services
+    def registry_manager_class (self) : 
+        return sfa.managers.registry_manager_openstack.RegistryManager
+    def aggregate_manager_class (self) :
+        return sfa.managers.aggregate_manager.AggregateManager
+
+    # driver class for server-side services, talk to the whole testbed
+    def driver_class (self):
+        return sfa.openstack.nova_driver.NovaDriver
+
+
+
index 92b7266..c8b1bc6 100644 (file)
@@ -1,8 +1,12 @@
 from sfa.generic import Generic
 
-
 class pl (Generic):
     
+    # the importer class
+    def importer_class (self): 
+        import sfa.importer.plimporter
+        return sfa.importer.plimporter.PlImporter
+        
     # use the standard api class
     def api_class (self):
         import sfa.server.sfaapi
@@ -27,9 +31,10 @@ class pl (Generic):
     # for the component mode, to be run on board planetlab nodes
     # manager class
     def component_manager_class (self):
+        import sfa.managers
         return sfa.managers.component_manager_pl
     # driver_class
     def component_driver_class (self):
+        import sfa.plc.plcomponentdriver
         return sfa.plc.plcomponentdriver.PlComponentDriver
 
-
diff --git a/sfa/importer/openstackimporter.py b/sfa/importer/openstackimporter.py
new file mode 100644 (file)
index 0000000..2bf1da3
--- /dev/null
@@ -0,0 +1,143 @@
+import os
+
+from sfa.util.config import Config
+from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
+from sfa.util.plxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_pl_slicename
+
+from sfa.trust.gid import create_uuid    
+from sfa.trust.certificate import convert_public_key, Keypair
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegAuthority, RegUser, RegSlice, RegNode
+
+from sfa.openstack.nova_shell import NovaShell    
+
+def load_keys(filename):
+    keys = {}
+    tmp_dict = {}
+    try:
+        execfile(filename, tmp_dict)
+        if 'keys' in tmp_dict:
+            keys = tmp_dict['keys']
+        return keys
+    except:
+        return keys
+
+def save_keys(filename, keys):
+    f = open(filename, 'w')
+    f.write("keys = %s" % str(keys))
+    f.close()
+
+class OpenstackImporter:
+
+    def __init__ (self, auth_hierarchy, logger):
+        self.auth_hierarchy = auth_hierarchy
+        self.logger=logger
+
+    def add_options (self, parser):
+        self.logger.debug ("OpenstackImporter: no options yet")
+        pass
+
+    def run (self, options):
+        # we don't have any options for now
+        self.logger.info ("PlImporter.run : to do")
+
+        config = Config ()
+        interface_hrn = config.SFA_INTERFACE_HRN
+        root_auth = config.SFA_REGISTRY_ROOT_AUTH
+        shell = NovaShell (config)
+
+        # create dict of all existing sfa records
+        existing_records = {}
+        existing_hrns = []
+        key_ids = []
+        for record in dbsession.query(RegRecord):
+            existing_records[ (record.hrn, record.type,) ] = record
+            existing_hrns.append(record.hrn) 
+            
+        # Get all users
+        persons = shell.user_get_all()
+        persons_dict = {}
+        keys_filename = config.config_path + os.sep + 'person_keys.py' 
+        old_person_keys = load_keys(keys_filename)
+        person_keys = {} 
+        for person in persons:
+            hrn = config.SFA_INTERFACE_HRN + "." + person.id
+            persons_dict[hrn] = person
+            old_keys = old_person_keys.get(person.id, [])
+            keys = [k.public_key for k in shell.key_pair_get_all_by_user(person.id)]
+            person_keys[person.id] = keys
+            update_record = False
+            if old_keys != keys:
+                update_record = True
+            if hrn not in existing_hrns or \
+                   (hrn, 'user') not in existing_records or update_record:    
+                urn = hrn_to_urn(hrn, 'user')
+            
+                if keys:
+                    try:
+                        pkey = convert_public_key(keys[0])
+                    except:
+                        logger.log_exc('unable to convert public key for %s' % hrn)
+                        pkey = Keypair(create=True)
+                else:
+                    logger.warn("OpenstackImporter: person %s does not have a PL public key"%hrn)
+                    pkey = Keypair(create=True) 
+                person_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+                person_record = RegUser ()
+                person_record.type='user'
+                person_record.hrn=hrn
+                person_record.gid=person_gid
+                person_record.authority=get_authority(hrn)
+                dbsession.add(person_record)
+                dbsession.commit()
+                logger.info("OpenstackImporter: imported person %s" % person_record)
+
+        # Get all projects
+        projects = shell.project_get_all()
+        projects_dict = {}
+        for project in projects:
+            hrn = config.SFA_INTERFACE_HRN + '.' + project.id
+            projects_dict[hrn] = project
+            if hrn not in existing_hrns or \
+            (hrn, 'slice') not in existing_records:
+                pkey = Keypair(create=True)
+                urn = hrn_to_urn(hrn, 'slice')
+                project_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+                project_record = RegSlice ()
+                project_record.type='slice'
+                project_record.hrn=hrn
+                project_record.gid=project_gid
+                project_record.authority=get_authority(hrn)
+                dbsession.add(project_record)
+                dbsession.commit()
+                logger.info("OpenstackImporter: imported slice: %s" % project_record)  
+    
+        # remove stale records    
+        system_records = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
+        for (record_hrn, type) in existing_records.keys():
+            if record_hrn in system_records:
+                continue
+        
+            record = existing_records[(record_hrn, type)]
+            if record.peer_authority:
+                continue
+
+            if type == 'user':
+                if record_hrn in persons_dict:
+                    continue  
+            elif type == 'slice':
+                if record_hrn in projects_dict:
+                    continue
+            else:
+                continue 
+        
+            record_object = existing_records[ (record_hrn, type) ]
+            logger.info("OpenstackImporter: removing %s " % record)
+            dbsession.delete(record_object)
+            dbsession.commit()
+                                   
+        # save pub keys
+        logger.info('OpenstackImporter: saving current pub keys')
+        save_keys(keys_filename, person_keys)                
+        
diff --git a/sfa/importer/plimporter.py b/sfa/importer/plimporter.py
new file mode 100644 (file)
index 0000000..153104b
--- /dev/null
@@ -0,0 +1,374 @@
+#
+# PlanetLab importer
+# 
+# requirements
+# 
+# read the planetlab database and update the local registry database accordingly
+# (in other words, with this testbed, the SFA registry is *not* authoritative)
+# so we update the following collections
+# . authorities                 (from pl sites)
+# . node                        (from pl nodes)
+# . users+keys                  (from pl persons and attached keys)
+#                       known limitation : *one* of the ssh keys is chosen at random here
+#                       xxx todo/check xxx at the very least, when a key is known to the registry 
+#                       and is still current in plc
+#                       then we should definitely make sure to keep that one in sfa...
+# . slice+researchers           (from pl slices and attached users)
+# 
+
+import os
+
+from sfa.util.config import Config
+from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
+from sfa.util.plxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_pl_slicename
+
+from sfa.trust.gid import create_uuid    
+from sfa.trust.certificate import convert_public_key, Keypair
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegAuthority, RegSlice, RegNode, RegUser, RegKey
+
+from sfa.plc.plshell import PlShell    
+
+def _get_site_hrn(interface_hrn, site):
+    # Hardcode 'internet2' into the hrn for sites hosting
+    # internet2 nodes. This is a special operation for some vini
+    # sites only
+    hrn = ".".join([interface_hrn, site['login_base']]) 
+    if ".vini" in interface_hrn and interface_hrn.endswith('vini'):
+        if site['login_base'].startswith("i2") or site['login_base'].startswith("nlr"):
+            hrn = ".".join([interface_hrn, "internet2", site['login_base']])
+    return hrn
+
+
+class PlImporter:
+
+    def __init__ (self, auth_hierarchy, logger):
+        self.auth_hierarchy = auth_hierarchy
+        self.logger=logger
+
+    def add_options (self, parser):
+        # we don't have any options for now
+        pass
+
+    # hrn hash is initialized from current db
+    # remember just-created records as we go
+    # xxx might make sense to add a UNIQUE constraint in the db itself
+    def remember_record_by_hrn (self, record):
+        tuple = (record.type, record.hrn)
+        if tuple in self.records_by_type_hrn:
+            self.logger.warning ("PlImporter.remember_record_by_hrn: duplicate (%s,%s)"%tuple)
+            return
+        self.records_by_type_hrn [ tuple ] = record
+
+    # ditto for pointer hash
+    def remember_record_by_pointer (self, record):
+        if record.pointer == -1:
+            self.logger.warning ("PlImporter.remember_record_by_pointer: pointer is void")
+            return
+        tuple = (record.type, record.pointer)
+        if tuple in self.records_by_type_pointer:
+            self.logger.warning ("PlImporter.remember_record_by_pointer: duplicate (%s,%s)"%tuple)
+            return
+        self.records_by_type_pointer [ ( record.type, record.pointer,) ] = record
+
+    def remember_record (self, record):
+        self.remember_record_by_hrn (record)
+        self.remember_record_by_pointer (record)
+
+    def locate_by_type_hrn (self, type, hrn):
+        return self.records_by_type_hrn.get ( (type, hrn), None)
+
+    def locate_by_type_pointer (self, type, pointer):
+        return self.records_by_type_pointer.get ( (type, pointer), None)
+
+    # convenience : try to locate first based on type+pointer
+    # if so, the record was created already even if e.g. its hrn has changed meanwhile
+    # otherwise we try by type+hrn (is this truly useful ?)
+    def locate (self, type, hrn=None, pointer=-1):
+        if pointer!=-1:
+            attempt = self.locate_by_type_pointer (type, pointer)
+            if attempt : return attempt
+        if hrn is not None:
+            attempt = self.locate_by_type_hrn (type, hrn,)
+            if attempt : return attempt
+        return None
+
+    # this makes the run method a bit abtruse - out of the way
+    def create_special_vini_record (self, interface_hrn):
+        # special case for vini
+        if ".vini" in interface_hrn and interface_hrn.endswith('vini'):
+            # create a fake internet2 site first
+            i2site = {'name': 'Internet2', 'login_base': 'internet2', 'site_id': -1}
+            site_hrn = _get_site_hrn(interface_hrn, i2site)
+            # import if hrn is not in list of existing hrns or if the hrn exists
+            # but its not a site record
+            if ( 'authority', site_hrn, ) not in self.records_by_type_hrn:
+                urn = hrn_to_urn(site_hrn, 'authority')
+                if not self.auth_hierarchy.auth_exists(urn):
+                    self.auth_hierarchy.create_auth(urn)
+                auth_info = self.auth_hierarchy.get_auth_info(urn)
+                auth_record = RegAuthority(hrn=site_hrn, gid=auth_info.get_gid_object(),
+                                           pointer=site['site_id'],
+                                           authority=get_authority(site_hrn))
+                auth_record.just_created()
+                dbsession.add(auth_record)
+                dbsession.commit()
+                self.logger.info("PlImporter: Imported authority (vini site) %s"%auth_record)
+                self.remember_record ( site_record )
+
+    def run (self, options):
+        config = Config ()
+        interface_hrn = config.SFA_INTERFACE_HRN
+        root_auth = config.SFA_REGISTRY_ROOT_AUTH
+        shell = PlShell (config)
+
+        ######## retrieve all existing SFA objects
+        all_records = dbsession.query(RegRecord).all()
+
+        # create hash by (type,hrn) 
+        # we essentially use this to know if a given record is already known to SFA 
+        self.records_by_type_hrn = \
+            dict ( [ ( (record.type, record.hrn) , record ) for record in all_records ] )
+        # create hash by (type,pointer) 
+        self.records_by_type_pointer = \
+            dict ( [ ( (record.type, record.pointer) , record ) for record in all_records 
+                     if record.pointer != -1] )
+
+        # initialize record.stale to True by default, then mark stale=False on the ones that are in use
+        for record in all_records: record.stale=True
+
+        ######## retrieve PLC data
+        # Get all plc sites
+        # retrieve only required stuf
+        sites = shell.GetSites({'peer_id': None, 'enabled' : True},
+                               ['site_id','login_base','node_ids','slice_ids','person_ids',])
+        # create a hash of sites by login_base
+#        sites_by_login_base = dict ( [ ( site['login_base'], site ) for site in sites ] )
+        # Get all plc users
+        persons = shell.GetPersons({'peer_id': None, 'enabled': True}, 
+                                   ['person_id', 'email', 'key_ids', 'site_ids'])
+        # create a hash of persons by person_id
+        persons_by_id = dict ( [ ( person['person_id'], person) for person in persons ] )
+        # Get all plc public keys
+        # accumulate key ids for keys retrieval
+        key_ids = []
+        for person in persons:
+            key_ids.extend(person['key_ids'])
+        keys = shell.GetKeys( {'peer_id': None, 'key_id': key_ids} )
+        # create a hash of keys by key_id
+        keys_by_id = dict ( [ ( key['key_id'], key ) for key in keys ] ) 
+        # create a dict person_id -> [ (plc)keys ]
+        keys_by_person_id = {} 
+        for person in persons:
+            pubkeys = []
+            for key_id in person['key_ids']:
+                pubkeys.append(keys_by_id[key_id])
+            keys_by_person_id[person['person_id']] = pubkeys
+        # Get all plc nodes  
+        nodes = shell.GetNodes( {'peer_id': None}, ['node_id', 'hostname', 'site_id'])
+        # create hash by node_id
+        nodes_by_id = dict ( [ ( node['node_id'], node, ) for node in nodes ] )
+        # Get all plc slices
+        slices = shell.GetSlices( {'peer_id': None}, ['slice_id', 'name', 'person_ids'])
+        # create hash by slice_id
+        slices_by_id = dict ( [ (slice['slice_id'], slice ) for slice in slices ] )
+
+        # isolate special vini case in separate method
+        self.create_special_vini_record (interface_hrn)
+
+        # start importing 
+        for site in sites:
+            site_hrn = _get_site_hrn(interface_hrn, site)
+            # import if hrn is not in list of existing hrns or if the hrn exists
+            # but its not a site record
+            site_record=self.locate ('authority', site_hrn, site['site_id'])
+            if not site_record:
+                try:
+                    urn = hrn_to_urn(site_hrn, 'authority')
+                    if not self.auth_hierarchy.auth_exists(urn):
+                        self.auth_hierarchy.create_auth(urn)
+                    auth_info = self.auth_hierarchy.get_auth_info(urn)
+                    site_record = RegAuthority(hrn=site_hrn, gid=auth_info.get_gid_object(),
+                                               pointer=site['site_id'],
+                                               authority=get_authority(site_hrn))
+                    site_record.just_created()
+                    dbsession.add(site_record)
+                    dbsession.commit()
+                    self.logger.info("PlImporter: imported authority (site) : %s" % site_record) 
+                    self.remember_record (site_record)
+                except:
+                    # if the site import fails then there is no point in trying to import the
+                    # site's child records (node, slices, persons), so skip them.
+                    self.logger.log_exc("PlImporter: failed to import site. Skipping child records") 
+                    continue 
+            else:
+                # xxx update the record ...
+                pass
+            site_record.stale=False
+             
+            # import node records
+            for node_id in site['node_ids']:
+                try:
+                    node = nodes_by_id[node_id]
+                except:
+                    self.logger.warning ("PlImporter: cannot find node_id %s - ignored"%node_id)
+                    continue 
+                site_auth = get_authority(site_hrn)
+                site_name = site['login_base']
+                hrn =  hostname_to_hrn(site_auth, site_name, node['hostname'])
+                # xxx this sounds suspicious
+                if len(hrn) > 64: hrn = hrn[:64]
+                node_record = self.locate ( 'node', hrn , node['node_id'] )
+                if not node_record:
+                    try:
+                        pkey = Keypair(create=True)
+                        urn = hrn_to_urn(hrn, 'node')
+                        node_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+                        node_record = RegNode (hrn=hrn, gid=node_gid, 
+                                               pointer =node['node_id'],
+                                               authority=get_authority(hrn))
+                        node_record.just_created()
+                        dbsession.add(node_record)
+                        dbsession.commit()
+                        self.logger.info("PlImporter: imported node: %s" % node_record)  
+                        self.remember_record (node_record)
+                    except:
+                        self.logger.log_exc("PlImporter: failed to import node") 
+                else:
+                    # xxx update the record ...
+                    pass
+                node_record.stale=False
+
+            # import persons
+            for person_id in site['person_ids']:
+                try:
+                    person = persons_by_id[person_id]
+                except:
+                    self.logger.warning ("PlImporter: cannot locate person_id %s - ignored"%person_id)
+                person_hrn = email_to_hrn(site_hrn, person['email'])
+                # xxx suspicious again
+                if len(person_hrn) > 64: person_hrn = person_hrn[:64]
+                person_urn = hrn_to_urn(person_hrn, 'user')
+
+                user_record = self.locate ( 'user', person_hrn, person['person_id'])
+
+                # return a tuple pubkey (a plc key object) and pkey (a Keypair object)
+                def init_person_key (person, plc_keys):
+                    pubkey=None
+                    if  person['key_ids']:
+                        # randomly pick first key in set
+                        pubkey = plc_keys[0]
+                        try:
+                            pkey = convert_public_key(pubkey['key'])
+                        except:
+                            self.logger.warn('PlImporter: unable to convert public key for %s' % person_hrn)
+                            pkey = Keypair(create=True)
+                    else:
+                        # the user has no keys. Creating a random keypair for the user's gid
+                        self.logger.warn("PlImporter: person %s does not have a PL public key"%person_hrn)
+                        pkey = Keypair(create=True)
+                    return (pubkey, pkey)
+
+                # new person
+                try:
+                    plc_keys = keys_by_person_id.get(person['person_id'],[])
+                    if not user_record:
+                        (pubkey,pkey) = init_person_key (person, plc_keys )
+                        person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey)
+                        person_gid.set_email(person['email'])
+                        user_record = RegUser (hrn=person_hrn, gid=person_gid, 
+                                                 pointer=person['person_id'], 
+                                                 authority=get_authority(person_hrn),
+                                                 email=person['email'])
+                        if pubkey: 
+                            user_record.reg_keys=[RegKey (pubkey['key'], pubkey['key_id'])]
+                        else:
+                            self.logger.warning("No key found for user %s"%user_record)
+                        user_record.just_created()
+                        dbsession.add (user_record)
+                        dbsession.commit()
+                        self.logger.info("PlImporter: imported person: %s" % user_record)
+                        self.remember_record ( user_record )
+                    else:
+                        # update the record ?
+                        # if user's primary key has changed then we need to update the 
+                        # users gid by forcing an update here
+                        sfa_keys = user_record.reg_keys
+                        def key_in_list (key,sfa_keys):
+                            for reg_key in sfa_keys:
+                                if reg_key.key==key['key']: return True
+                            return False
+                        # is there a new key in myplc ?
+                        new_keys=False
+                        for key in plc_keys:
+                            if not key_in_list (key,sfa_keys):
+                                new_keys = True
+                        if new_keys:
+                            (pubkey,pkey) = init_person_key (person, plc_keys)
+                            person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey)
+                            if not pubkey:
+                                user_record.reg_keys=[]
+                            else:
+                                user_record.reg_keys=[ RegKey (pubkey['key'], pubkey['key_id'])]
+                            self.logger.info("PlImporter: updated person: %s" % user_record)
+                    user_record.email = person['email']
+                    dbsession.commit()
+                    user_record.stale=False
+                except:
+                    self.logger.log_exc("PlImporter: failed to import person %d %s"%(person['person_id'],person['email']))
+    
+            # import slices
+            for slice_id in site['slice_ids']:
+                try:
+                    slice = slices_by_id[slice_id]
+                except:
+                    self.logger.warning ("PlImporter: cannot locate slice_id %s - ignored"%slice_id)
+                slice_hrn = slicename_to_hrn(interface_hrn, slice['name'])
+                slice_record = self.locate ('slice', slice_hrn, slice['slice_id'])
+                if not slice_record:
+                    try:
+                        pkey = Keypair(create=True)
+                        urn = hrn_to_urn(slice_hrn, 'slice')
+                        slice_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+                        slice_record = RegSlice (hrn=slice_hrn, gid=slice_gid, 
+                                                 pointer=slice['slice_id'],
+                                                 authority=get_authority(slice_hrn))
+                        slice_record.just_created()
+                        dbsession.add(slice_record)
+                        dbsession.commit()
+                        self.logger.info("PlImporter: imported slice: %s" % slice_record)  
+                        self.remember_record ( slice_record )
+                    except:
+                        self.logger.log_exc("PlImporter: failed to import slice")
+                else:
+                    # xxx update the record ...
+                    self.logger.warning ("Slice update not yet implemented")
+                    pass
+                # record current users affiliated with the slice
+                slice_record.reg_researchers = \
+                    [ self.locate_by_type_pointer ('user',user_id) for user_id in slice['person_ids'] ]
+                dbsession.commit()
+                slice_record.stale=False
+
+        ### remove stale records
+        # special records must be preserved
+        system_hrns = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
+        for record in all_records: 
+            if record.hrn in system_hrns: 
+                record.stale=False
+            if record.peer_authority:
+                record.stale=False
+            if ".vini" in interface_hrn and interface_hrn.endswith('vini') and \
+                record.hrn.endswith("internet2"):
+                record.stale=False
+
+        for record in all_records:
+            try:        stale=record.stale
+            except:     
+                stale=True
+                self.logger.warning("stale not found with %s"%record)
+            if stale:
+                self.logger.info("PlImporter: deleting stale record: %s" % record)
+                dbsession.delete(record)
+                dbsession.commit()
diff --git a/sfa/importer/sfa-import-openstack.py b/sfa/importer/sfa-import-openstack.py
deleted file mode 100755 (executable)
index ec785e8..0000000
+++ /dev/null
@@ -1,161 +0,0 @@
-#!/usr/bin/python
-#
-##
-# Import PLC records into the SFA database. It is indended that this tool be
-# run once to create SFA records that reflect the current state of the
-# planetlab database.
-#
-# The import tool assumes that the existing PLC hierarchy should all be part
-# of "planetlab.us" (see the root_auth and level1_auth variables below).
-#
-# Public keys are extracted from the users' SSH keys automatically and used to
-# create GIDs. This is relatively experimental as a custom tool had to be
-# written to perform conversion from SSH to OpenSSL format. It only supports
-# RSA keys at this time, not DSA keys.
-##
-
-import os
-import getopt
-import sys
-
-from sfa.util.config import Config
-from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
-from sfa.util.plxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_pl_slicename
-from sfa.storage.table import SfaTable
-from sfa.storage.record import SfaRecord
-from sfa.trust.certificate import convert_public_key, Keypair
-from sfa.trust.gid import create_uuid
-from sfa.importer.sfaImport import sfaImport, _cleanup_string
-from sfa.util.sfalogging import logger
-from sfa.openstack.openstack_shell import OpenstackShell    
-
-def process_options():
-
-   (options, args) = getopt.getopt(sys.argv[1:], '', [])
-   for opt in options:
-       name = opt[0]
-       val = opt[1]
-
-
-def load_keys(filename):
-    keys = {}
-    tmp_dict = {}
-    try:
-        execfile(filename, tmp_dict)
-        if 'keys' in tmp_dict:
-            keys = tmp_dict['keys']
-        return keys
-    except:
-        return keys
-
-def save_keys(filename, keys):
-    f = open(filename, 'w')
-    f.write("keys = %s" % str(keys))
-    f.close()
-
-def main():
-
-    process_options()
-    config = Config()
-    sfaImporter = sfaImport()
-    logger=sfaImporter.logger
-    logger.setLevelFromOptVerbose(config.SFA_API_LOGLEVEL)
-    if not config.SFA_REGISTRY_ENABLED:
-        sys.exit(0)
-    root_auth = config.SFA_REGISTRY_ROOT_AUTH
-    interface_hrn = config.SFA_INTERFACE_HRN
-    shell = OpenstackShell(config)
-    sfaImporter.create_top_level_records()
-    
-    # create dict of all existing sfa records
-    existing_records = {}
-    existing_hrns = []
-    key_ids = []
-    table = SfaTable()
-    results = table.find()
-    for result in results:
-        existing_records[(result['hrn'], result['type'])] = result
-        existing_hrns.append(result['hrn']) 
-            
-        
-    # Get all users
-    persons = shell.user_get_all()
-    persons_dict = {}
-    keys_filename = config.config_path + os.sep + 'person_keys.py' 
-    old_person_keys = load_keys(keys_filename)    
-    person_keys = {} 
-    for person in persons:
-        hrn = config.SFA_INTERFACE_HRN + "." + person.id
-        persons_dict[hrn] = person
-        old_keys = old_person_keys.get(person.id, [])
-        keys = [k.public_key for k in shell.key_pair_get_all_by_user(person.id)]
-        person_keys[person.id] = keys
-        update_record = False
-        if old_keys != keys:
-            update_record = True
-        if hrn not in existing_hrns or \
-               (hrn, 'user') not in existing_records or update_record:    
-            urn = hrn_to_urn(hrn, 'user')
-            
-            if keys:
-                try:
-                    pkey = convert_public_key(keys[0])
-                except:
-                    logger.log_exc('unable to convert public key for %s' % hrn)
-                    pkey = Keypair(create=True)
-            else:
-                logger.warn("Import: person %s does not have a PL public key"%hrn)
-                pkey = Keypair(create=True) 
-            person_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
-            person_record = SfaRecord(hrn=hrn, gid=person_gid, type="user", \
-                                          authority=get_authority(hrn))
-            logger.info("Import: importing %s " % person_record.summary_string())
-            person_record.sync()
-
-    # Get all projects
-    projects = shell.project_get_all()
-    projects_dict = {}
-    for project in projects:
-        hrn = config.SFA_INTERFACE_HRN + '.' + project.id
-        projects_dict[hrn] = project
-        if hrn not in existing_hrns or \
-        (hrn, 'slice') not in existing_records:
-            pkey = Keypair(create=True)
-            urn = hrn_to_urn(hrn, 'slice')
-            project_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
-            project_record = SfaRecord(hrn=hrn, gid=project_gid, type="slice",
-                                       authority=get_authority(hrn))
-            projects_dict[project_record['hrn']] = project_record
-            logger.info("Import: importing %s " % project_record.summary_string())
-            project_record.sync() 
-    
-    # remove stale records    
-    system_records = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
-    for (record_hrn, type) in existing_records.keys():
-        if record_hrn in system_records:
-            continue
-        
-        record = existing_records[(record_hrn, type)]
-        if record['peer_authority']:
-            continue
-
-        if type == 'user':
-            if record_hrn in persons_dict:
-                continue  
-        elif type == 'slice':
-            if record_hrn in projects_dict:
-                continue
-        else:
-            continue 
-        
-        record_object = existing_records[(record_hrn, type)]
-        record = SfaRecord(dict=record_object)
-        logger.info("Import: removing %s " % record.summary_string())
-        record.delete()
-                                   
-    # save pub keys
-    logger.info('Import: saving current pub keys')
-    save_keys(keys_filename, person_keys)                
-        
-if __name__ == "__main__":
-    main()
diff --git a/sfa/importer/sfa-import-plc.py b/sfa/importer/sfa-import-plc.py
deleted file mode 100755 (executable)
index 723f473..0000000
+++ /dev/null
@@ -1,342 +0,0 @@
-#!/usr/bin/python
-#
-##
-# Import PLC records into the SFA database. It is indended that this tool be
-# run once to create SFA records that reflect the current state of the
-# planetlab database.
-#
-# The import tool assumes that the existing PLC hierarchy should all be part
-# of "planetlab.us" (see the root_auth and level1_auth variables below).
-#
-# Public keys are extracted from the users' SSH keys automatically and used to
-# create GIDs. This is relatively experimental as a custom tool had to be
-# written to perform conversion from SSH to OpenSSL format. It only supports
-# RSA keys at this time, not DSA keys.
-##
-
-import os
-import getopt
-import sys
-
-from sfa.util.config import Config
-from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
-from sfa.util.plxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_pl_slicename
-from sfa.storage.table import SfaTable
-from sfa.storage.record import SfaRecord
-from sfa.trust.gid import create_uuid    
-from sfa.trust.certificate import convert_public_key, Keypair
-from sfa.importer.sfaImport import sfaImport, _cleanup_string
-from sfa.plc.plshell import PlShell    
-
-def process_options():
-
-   (options, args) = getopt.getopt(sys.argv[1:], '', [])
-   for opt in options:
-       name = opt[0]
-       val = opt[1]
-
-
-def load_keys(filename):
-    keys = {}
-    tmp_dict = {}
-    try:
-        execfile(filename, tmp_dict)
-        if 'keys' in tmp_dict:
-            keys = tmp_dict['keys']
-        return keys
-    except:
-        return keys
-
-def save_keys(filename, keys):
-    f = open(filename, 'w')
-    f.write("keys = %s" % str(keys))
-    f.close()
-
-def _get_site_hrn(interface_hrn, site):
-    # Hardcode 'internet2' into the hrn for sites hosting
-    # internet2 nodes. This is a special operation for some vini
-    # sites only
-    hrn = ".".join([interface_hrn, site['login_base']]) 
-    if ".vini" in interface_hrn and interface_hrn.endswith('vini'):
-        if site['login_base'].startswith("i2") or site['login_base'].startswith("nlr"):
-            hrn = ".".join([interface_hrn, "internet2", site['login_base']])
-    return hrn
-
-def main():
-
-    process_options()
-    config = Config()
-    if not config.SFA_REGISTRY_ENABLED:
-        sys.exit(0)
-    root_auth = config.SFA_REGISTRY_ROOT_AUTH
-    interface_hrn = config.SFA_INTERFACE_HRN
-    keys_filename = config.config_path + os.sep + 'person_keys.py' 
-    sfaImporter = sfaImport()
-    sfaImporter.create_top_level_records()
-    logger=sfaImporter.logger
-    logger.setLevelFromOptVerbose(config.SFA_API_LOGLEVEL)
-    shell = PlShell (config)
-    
-    # create dict of all existing sfa records
-    existing_records = {}
-    existing_hrns = []
-    key_ids = []
-    person_keys = {} 
-    table = SfaTable()
-    results = table.find()
-    for result in results:
-        existing_records[(result['hrn'], result['type'])] = result
-        existing_hrns.append(result['hrn']) 
-            
-    # Get all plc sites
-    sites = shell.GetSites({'peer_id': None})
-    sites_dict = {}
-    for site in sites:
-        sites_dict[site['login_base']] = site 
-    
-    # Get all plc users
-    persons = shell.GetPersons({'peer_id': None, 'enabled': True}, 
-                               ['person_id', 'email', 'key_ids', 'site_ids'])
-    persons_dict = {}
-    for person in persons:
-        persons_dict[person['person_id']] = person
-        key_ids.extend(person['key_ids'])
-
-    # Get all public keys
-    keys = shell.GetKeys( {'peer_id': None, 'key_id': key_ids})
-    keys_dict = {}
-    for key in keys:
-        keys_dict[key['key_id']] = key['key']
-
-    # create a dict of person keys keyed on key_id 
-    old_person_keys = load_keys(keys_filename)
-    for person in persons:
-        pubkeys = []
-        for key_id in person['key_ids']:
-            pubkeys.append(keys_dict[key_id])
-        person_keys[person['person_id']] = pubkeys
-
-    # Get all plc nodes  
-    nodes = shell.GetNodes( {'peer_id': None}, ['node_id', 'hostname', 'site_id'])
-    nodes_dict = {}
-    for node in nodes:
-        nodes_dict[node['node_id']] = node
-
-    # Get all plc slices
-    slices = shell.GetSlices( {'peer_id': None}, ['slice_id', 'name'])
-    slices_dict = {}
-    for slice in slices:
-        slices_dict[slice['slice_id']] = slice
-
-    # special case for vini
-    if ".vini" in interface_hrn and interface_hrn.endswith('vini'):
-        # create a fake internet2 site first
-        i2site = {'name': 'Internet2', 'abbreviated_name': 'I2',
-                    'login_base': 'internet2', 'site_id': -1}
-        site_hrn = _get_site_hrn(interface_hrn, i2site)
-        logger.info("Importing site: %s" % site_hrn)
-        # import if hrn is not in list of existing hrns or if the hrn exists
-        # but its not a site record
-        if site_hrn not in existing_hrns or \
-           (site_hrn, 'authority') not in existing_records:
-            logger.info("Import: site %s " % site_hrn)
-            urn = hrn_to_urn(site_hrn, 'authority')
-            if not sfaImporter.AuthHierarchy.auth_exists(urn):
-                sfaImporter.AuthHierarchy.create_auth(urn)
-            auth_info = sfaImporter.AuthHierarchy.get_auth_info(urn)
-            auth_record = SfaRecord(hrn=site_hrn, gid=auth_info.get_gid_object(), \
-                                    type="authority", pointer=site['site_id'], 
-                                    authority=get_authority(site_hrn))
-            auth_record.sync(verbose=True)
-
-    # start importing 
-    for site in sites:
-        site_hrn = _get_site_hrn(interface_hrn, site)
-        logger.info("Importing site: %s" % site_hrn)
-
-        # import if hrn is not in list of existing hrns or if the hrn exists
-        # but its not a site record
-        if site_hrn not in existing_hrns or \
-           (site_hrn, 'authority') not in existing_records:
-            try:
-                logger.info("Import: site %s " % site_hrn)
-                urn = hrn_to_urn(site_hrn, 'authority')
-                if not sfaImporter.AuthHierarchy.auth_exists(urn):
-                    sfaImporter.AuthHierarchy.create_auth(urn)
-                auth_info = sfaImporter.AuthHierarchy.get_auth_info(urn)
-                auth_record = SfaRecord(hrn=site_hrn, gid=auth_info.get_gid_object(), \
-                                        type="authority", pointer=site['site_id'], 
-                                        authority=get_authority(site_hrn))
-                logger.info("Import: importing site: %s" % auth_record.summary_string())  
-                auth_record.sync()
-            except:
-                # if the site import fails then there is no point in trying to import the
-                # site's child records (node, slices, persons), so skip them.
-                logger.log_exc("Import: failed to import site. Skipping child records") 
-                continue 
-             
-        # import node records
-        for node_id in site['node_ids']:
-            if node_id not in nodes_dict:
-                continue 
-            node = nodes_dict[node_id]
-            site_auth = get_authority(site_hrn)
-            site_name = get_leaf(site_hrn)
-            hrn =  hostname_to_hrn(site_auth, site_name, node['hostname'])
-            if len(hrn) > 64:
-                hrn = hrn[:64]
-            if hrn not in existing_hrns or \
-               (hrn, 'node') not in existing_records:
-                try:
-                    pkey = Keypair(create=True)
-                    urn = hrn_to_urn(hrn, 'node')
-                    node_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
-                    node_record = SfaRecord(hrn=hrn, gid=node_gid, type="node", pointer=node['node_id'], authority=get_authority(hrn))    
-                    logger.info("Import: importing node: %s" % node_record.summary_string())  
-                    node_record.sync()
-                except:
-                    logger.log_exc("Import: failed to import node") 
-                    
-
-        # import slices
-        for slice_id in site['slice_ids']:
-            if slice_id not in slices_dict:
-                continue 
-            slice = slices_dict[slice_id]
-            hrn = slicename_to_hrn(interface_hrn, slice['name'])
-            #slicename = slice['name'].split("_",1)[-1]
-            #slicename = _cleanup_string(slicename)
-            if hrn not in existing_hrns or \
-               (hrn, 'slice') not in existing_records:
-                try:
-                    pkey = Keypair(create=True)
-                    urn = hrn_to_urn(hrn, 'slice')
-                    slice_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
-                    slice_record = SfaRecord(hrn=hrn, gid=slice_gid, type="slice", pointer=slice['slice_id'],
-                                             authority=get_authority(hrn))
-                    logger.info("Import: importing slice: %s" % slice_record.summary_string())  
-                    slice_record.sync()
-                except:
-                    logger.log_exc("Import: failed to  import slice")
-
-        # import persons
-        for person_id in site['person_ids']:
-            if person_id not in persons_dict:
-                continue 
-            person = persons_dict[person_id]
-            hrn = email_to_hrn(site_hrn, person['email'])
-            if len(hrn) > 64:
-                hrn = hrn[:64]
-
-            # if user's primary key has chnaged then we need to update the 
-            # users gid by forcing a update here
-            old_keys = []
-            new_keys = []
-            if person_id in old_person_keys:
-                old_keys = old_person_keys[person_id]
-            if person_id in person_keys:
-                new_keys = person_keys[person_id]
-            update_record = False
-            for key in new_keys:
-                if key not in old_keys:
-                    update_record = True 
-
-            if hrn not in existing_hrns or \
-               (hrn, 'user') not in existing_records or update_record:
-                try:
-                    if 'key_ids' in person and person['key_ids']:
-                        key = new_keys[0]
-                        try:
-                            pkey = convert_public_key(key)
-                        except:
-                            logger.warn('unable to convert public key for %s' % hrn)
-                            pkey = Keypair(create=True)
-                    else:
-                        # the user has no keys. Creating a random keypair for the user's gid
-                        logger.warn("Import: person %s does not have a PL public key"%hrn)
-                        pkey = Keypair(create=True) 
-                    urn = hrn_to_urn(hrn, 'user')
-                    person_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
-                    person_record = SfaRecord(hrn=hrn, gid=person_gid, type="user", \
-                                              pointer=person['person_id'], authority=get_authority(hrn))
-                    logger.info("Import: importing person: %s" % person_record.summary_string())  
-                    person_record.sync()
-                except:
-                    logger.log_exc("Import: failed to import person.") 
-    
-    # remove stale records    
-    system_records = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
-    for (record_hrn, type) in existing_records.keys():
-        if record_hrn in system_records:
-            continue
-        
-        record = existing_records[(record_hrn, type)]
-        if record['peer_authority']:
-            continue
-
-        # dont delete vini's internet2 placeholdder record
-        # normally this would be deleted becuase it does not have a plc record 
-        if ".vini" in interface_hrn and interface_hrn.endswith('vini') and \
-           record_hrn.endswith("internet2"):     
-            continue
-
-        found = False
-        
-        if type == 'authority':    
-            for site in sites:
-                site_hrn = interface_hrn + "." + site['login_base']
-                if site_hrn == record_hrn and site['site_id'] == record['pointer']:
-                    found = True
-                    break
-
-        elif type == 'user':
-            login_base = get_leaf(get_authority(record_hrn))
-            username = get_leaf(record_hrn)
-            if login_base in sites_dict:
-                site = sites_dict[login_base]
-                for person in persons:
-                    tmp_username = person['email'].split("@")[0]
-                    alt_username = person['email'].split("@")[0].replace(".", "_").replace("+", "_")
-                    if username in [tmp_username, alt_username] and \
-                       site['site_id'] in person['site_ids'] and \
-                       person['person_id'] == record['pointer']:
-                        found = True
-                        break
-        
-        elif type == 'slice':
-            slicename = hrn_to_pl_slicename(record_hrn)
-            for slice in slices:
-                if slicename == slice['name'] and \
-                   slice['slice_id'] == record['pointer']:
-                    found = True
-                    break    
-        elif type == 'node':
-            login_base = get_leaf(get_authority(record_hrn))
-            nodename = Xrn.unescape(get_leaf(record_hrn))
-            if login_base in sites_dict:
-                site = sites_dict[login_base]
-                for node in nodes:
-                    tmp_nodename = node['hostname']
-                    if tmp_nodename == nodename and \
-                       node['site_id'] == site['site_id'] and \
-                       node['node_id'] == record['pointer']:
-                        found = True
-                        break  
-        else:
-            continue 
-        
-        if not found:
-            try:
-                record_object = existing_records[(record_hrn, type)]
-                record = SfaRecord(dict=record_object)
-                logger.info("Import: deleting record: %s" % record.summary_string())
-                record.delete()
-            except:
-                logger.log_exc("Import: failded to delete record")                    
-    # save pub keys
-    logger.info('Import: saving current pub keys')
-    save_keys(keys_filename, person_keys)                
-        
-if __name__ == "__main__":
-    main()
diff --git a/sfa/importer/sfa-import.py b/sfa/importer/sfa-import.py
new file mode 100755 (executable)
index 0000000..2788282
--- /dev/null
@@ -0,0 +1,56 @@
+#!/usr/bin/python
+
+import sys
+
+from optparse import OptionParser
+
+from sfa.generic import Generic
+
+from sfa.util.config import Config
+from sfa.util.sfalogging import _SfaLogger
+
+from sfa.trust.hierarchy import Hierarchy
+
+from sfa.importer.sfaimporter import SfaImporter
+
+COMMAND=sys.argv[0]
+
+def main ():
+
+    config = Config()
+    logger = _SfaLogger(logfile='/var/log/sfa_import.log', loggername='importlog')
+    logger.setLevelFromOptVerbose(config.SFA_API_LOGLEVEL)
+    if not config.SFA_REGISTRY_ENABLED:
+        logger.critical("COMMAND: need SFA_REGISTRY_ENABLED to run import")
+
+    # testbed-neutral : create local certificates and the like
+    auth_hierarchy = Hierarchy ()
+    sfa_importer = SfaImporter(auth_hierarchy, logger)
+    # testbed-specific
+    testbed_importer = None
+    generic=Generic.the_flavour()
+    importer_class = generic.importer_class()
+    if importer_class:
+        logger.info ("Using flavour %s for importing (class %s)"%\
+                         (generic.flavour,importer_class.__name__))
+        testbed_importer = importer_class (auth_hierarchy, logger)
+
+    parser = OptionParser ()
+    sfa_importer.add_options (parser)
+    if testbed_importer:
+        testbed_importer.add_options (parser)
+
+    (options, args) = parser.parse_args ()
+    # no args supported ?
+    if args:
+        parser.print_help()
+        sys.exit(1)
+
+    sfa_importer.run (options)
+    if testbed_importer:
+        testbed_importer.run (parser)
+    
+
+if __name__ == '__main__':
+    main()
+
similarity index 65%
rename from sfa/importer/sfa-nuke-plc.py
rename to sfa/importer/sfa-nuke.py
index bd116c8..a4967c3 100755 (executable)
@@ -13,22 +13,31 @@ from optparse import OptionParser
 
 from sfa.util.sfalogging import logger
 
-from sfa.storage.table import SfaTable
+from sfa.storage.alchemy import engine
+from sfa.storage.dbschema import DBSchema
 
 def main():
-   usage="%prog: trash the registry DB (the 'sfa' table in the 'planetlab5' database)"
+   usage="%prog: trash the registry DB"
    parser = OptionParser(usage=usage)
-   parser.add_option('-f','--file-system',dest='clean_fs',action='store_true',default=False,
-                     help='Clean up the /var/lib/sfa/authorities area as well')
-   parser.add_option('-c','--certs',dest='clean_certs',action='store_true',default=False,
-                     help='Remove all cached certs/gids found in /var/lib/sfa/authorities area as well')
+   parser.add_option("-f","--file-system",dest='clean_fs',action='store_true',default=False,
+                     help="Clean up the /var/lib/sfa/authorities area as well")
+   parser.add_option("-c","--certs",dest='clean_certs',action='store_true',default=False,
+                     help="Remove all cached certs/gids found in /var/lib/sfa/authorities area as well")
+   parser.add_option("-0","--no-reinit",dest='reinit',action='store_false',default=True,
+                     help="By default a new DB schema is installed after the cleanup; this option prevents that")
    (options,args)=parser.parse_args()
    if args:
       parser.print_help()
       sys.exit(1)
+   dbschema=DBSchema()
    logger.info("Purging SFA records from database")
-   table = SfaTable()
-   table.nuke()
+   dbschema.nuke()
+   # for convenience we re-create the schema here, so there's no need for an explicit
+   # service sfa restart
+   # however in some (upgrade) scenarios this might be wrong
+   if options.reinit:
+      logger.info("re-creating empty schema")
+      dbschema.init_or_upgrade()
 
    if options.clean_certs:
       # remove the server certificate and all gids found in /var/lib/sfa/authorities
diff --git a/sfa/importer/sfaImport.py b/sfa/importer/sfaImport.py
deleted file mode 100644 (file)
index ca0bff2..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-#
-# The import tool assumes that the existing PLC hierarchy should all be part
-# of "planetlab.us" (see the root_auth and level1_auth variables below).
-#
-# Public keys are extracted from the users' SSH keys automatically and used to
-# create GIDs. This is relatively experimental as a custom tool had to be
-# written to perform conversion from SSH to OpenSSL format. It only supports
-# RSA keys at this time, not DSA keys.
-##
-
-from sfa.util.sfalogging import _SfaLogger
-from sfa.util.xrn import get_authority, hrn_to_urn
-from sfa.util.plxrn import email_to_hrn
-from sfa.util.config import Config
-from sfa.trust.certificate import convert_public_key, Keypair
-from sfa.trust.trustedroots import TrustedRoots
-from sfa.trust.hierarchy import Hierarchy
-from sfa.trust.gid import create_uuid
-from sfa.storage.table import SfaTable
-from sfa.storage.record import SfaRecord
-
-
-def _un_unicode(str):
-   if isinstance(str, unicode):
-       return str.encode("ascii", "ignore")
-   else:
-       return str
-
-def _cleanup_string(str):
-    # pgsql has a fit with strings that have high ascii in them, so filter it
-    # out when generating the hrns.
-    tmp = ""
-    for c in str:
-        if ord(c) < 128:
-            tmp = tmp + c
-    str = tmp
-
-    str = _un_unicode(str)
-    str = str.replace(" ", "_")
-    str = str.replace(".", "_")
-    str = str.replace("(", "_")
-    str = str.replace("'", "_")
-    str = str.replace(")", "_")
-    str = str.replace('"', "_")
-    return str
-
-class sfaImport:
-
-    def __init__(self):
-       self.logger = _SfaLogger(logfile='/var/log/sfa_import.log', loggername='importlog')
-       self.AuthHierarchy = Hierarchy()
-#       self.table = SfaTable()     
-       self.config = Config()
-       self.TrustedRoots = TrustedRoots(Config.get_trustedroots_dir(self.config))
-       self.root_auth = self.config.SFA_REGISTRY_ROOT_AUTH
-
-    def create_top_level_records(self):
-        """
-        Create top level and interface records
-        """
-        # create root authority
-        interface_hrn = self.config.SFA_INTERFACE_HRN
-        self.create_top_level_auth_records(interface_hrn)
-
-        # create s user record for the slice manager
-        self.create_sm_client_record()
-
-        # create interface records
-        self.logger.info("Import: creating interface records")
-        self.create_interface_records()
-
-        # add local root authority's cert  to trusted list
-        self.logger.info("Import: adding " + interface_hrn + " to trusted list")
-        authority = self.AuthHierarchy.get_auth_info(interface_hrn)
-        self.TrustedRoots.add_gid(authority.get_gid_object())
-
-    def create_top_level_auth_records(self, hrn):
-        """
-        Create top level db records (includes root and sub authorities (local/remote)
-        """
-        # make sure parent exists
-        parent_hrn = get_authority(hrn)
-        if not parent_hrn:
-            parent_hrn = hrn
-        if not parent_hrn == hrn:
-            self.create_top_level_auth_records(parent_hrn)
-
-        # enxure key and cert exists:
-        self.AuthHierarchy.create_top_level_auth(hrn)    
-        # create the db record if it doesnt already exist    
-        auth_info = self.AuthHierarchy.get_auth_info(hrn)
-        auth_record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="authority", pointer=-1, authority=get_authority(hrn))
-        self.logger.info("Import: importing %s " % auth_record.summary_string())
-        auth_record.sync()
-
-    def create_sm_client_record(self):
-        """
-        Create a user record for the Slicemanager service.
-        """
-        hrn = self.config.SFA_INTERFACE_HRN + '.slicemanager'
-        urn = hrn_to_urn(hrn, 'user')
-        if not self.AuthHierarchy.auth_exists(urn):
-            self.logger.info("Import: creating Slice Manager user")
-            self.AuthHierarchy.create_auth(urn)
-
-        auth_info = self.AuthHierarchy.get_auth_info(hrn)
-        record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), \
-                           type="user", pointer=-1, authority=get_authority(hrn))
-        self.logger.info("Import: importing %s " % record.summary_string())
-        record.sync()
-
-    def create_interface_records(self):
-        """
-        Create a record for each SFA interface
-        """
-        # just create certs for all sfa interfaces even if they
-        # arent enabled
-        hrn = self.config.SFA_INTERFACE_HRN
-        interfaces = ['authority+sa', 'authority+am', 'authority+sm']
-        table = SfaTable()
-        auth_info = self.AuthHierarchy.get_auth_info(hrn)
-        pkey = auth_info.get_pkey_object()
-        for interface in interfaces:
-            urn = hrn_to_urn(hrn, interface)
-            gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
-            interface_record = SfaRecord(hrn=hrn, type=interface, pointer=-1,
-                                         gid = gid, authority=get_authority(hrn))
-            self.logger.info("Import: importing %s " % interface_record.summary_string())
-            interface_record.sync()
-             
-    def delete_record(self, hrn, type):
-        # delete the record
-        table = SfaTable()
-        record_list = table.find({'type': type, 'hrn': hrn})
-        for record in record_list:
-            self.logger.info("Import: removing record %s %s" % (type, hrn))
-            table.remove(record)        
diff --git a/sfa/importer/sfaimporter.py b/sfa/importer/sfaimporter.py
new file mode 100644 (file)
index 0000000..4f17c2d
--- /dev/null
@@ -0,0 +1,148 @@
+#
+# Public keys are extracted from the users' SSH keys automatically and used to
+# create GIDs. This is relatively experimental as a custom tool had to be
+# written to perform conversion from SSH to OpenSSL format. It only supports
+# RSA keys at this time, not DSA keys.
+##
+
+from sfa.util.xrn import get_authority, hrn_to_urn
+from sfa.util.plxrn import email_to_hrn
+from sfa.util.config import Config
+from sfa.trust.certificate import convert_public_key, Keypair
+from sfa.trust.trustedroots import TrustedRoots
+from sfa.trust.gid import create_uuid
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegAuthority, RegUser
+
+def _un_unicode(str):
+   if isinstance(str, unicode):
+       return str.encode("ascii", "ignore")
+   else:
+       return str
+
+def _cleanup_string(str):
+    # pgsql has a fit with strings that have high ascii in them, so filter it
+    # out when generating the hrns.
+    tmp = ""
+    for c in str:
+        if ord(c) < 128:
+            tmp = tmp + c
+    str = tmp
+
+    str = _un_unicode(str)
+    str = str.replace(" ", "_")
+    str = str.replace(".", "_")
+    str = str.replace("(", "_")
+    str = str.replace("'", "_")
+    str = str.replace(")", "_")
+    str = str.replace('"', "_")
+    return str
+
+class SfaImporter:
+
+    def __init__(self, auth_hierarchy, logger):
+       self.logger=logger
+       self.auth_hierarchy = auth_hierarchy
+       config = Config()
+       self.TrustedRoots = TrustedRoots(Config.get_trustedroots_dir(config))
+       self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
+       self.interface_hrn = config.SFA_INTERFACE_HRN
+
+    # check before creating a RegRecord entry as we run this over and over
+    def record_exists (self, type, hrn):
+       return dbsession.query(RegRecord).filter_by(hrn=hrn,type=type).count()!=0
+
+    # record options into an OptionParser
+    def add_options (self, parser):
+       # no generic option
+       pass
+
+    def run (self, options):
+       self.logger.info ("SfaImporter.run : no options used")
+       self.create_top_level_records()
+
+    def create_top_level_records(self):
+        """
+        Create top level and interface records
+        """
+        # create root authority
+        self.create_top_level_auth_records(self.interface_hrn)
+
+        # create s user record for the slice manager
+        self.create_sm_client_record()
+
+        # create interface records
+        # xxx turning off the creation of authority+*
+        # in fact his is required - used in SfaApi._getCredentialRaw
+        # that tries to locate 'authority+sa'
+        self.create_interface_records()
+
+        # add local root authority's cert  to trusted list
+        self.logger.info("SfaImporter: adding " + self.interface_hrn + " to trusted list")
+        authority = self.auth_hierarchy.get_auth_info(self.interface_hrn)
+        self.TrustedRoots.add_gid(authority.get_gid_object())
+
+    def create_top_level_auth_records(self, hrn):
+        """
+        Create top level db records (includes root and sub authorities (local/remote)
+        """
+        # make sure parent exists
+        parent_hrn = get_authority(hrn)
+        if not parent_hrn:
+            parent_hrn = hrn
+        if not parent_hrn == hrn:
+            self.create_top_level_auth_records(parent_hrn)
+
+        # ensure key and cert exists:
+        self.auth_hierarchy.create_top_level_auth(hrn)    
+        # create the db record if it doesnt already exist    
+        if self.record_exists ('authority',hrn): return
+        auth_info = self.auth_hierarchy.get_auth_info(hrn)
+        auth_record = RegAuthority(hrn=hrn, gid=auth_info.get_gid_object(),
+                                   authority=get_authority(hrn))
+        auth_record.just_created()
+        dbsession.add (auth_record)
+        dbsession.commit()
+        self.logger.info("SfaImporter: imported authority (parent) %s " % auth_record)
+
+    def create_sm_client_record(self):
+        """
+        Create a user record for the Slicemanager service.
+        """
+        hrn = self.interface_hrn + '.slicemanager'
+        urn = hrn_to_urn(hrn, 'user')
+        if not self.auth_hierarchy.auth_exists(urn):
+            self.logger.info("SfaImporter: creating Slice Manager user")
+            self.auth_hierarchy.create_auth(urn)
+
+        if self.record_exists ('user',hrn): return
+        auth_info = self.auth_hierarchy.get_auth_info(hrn)
+        user_record = RegUser(hrn=hrn, gid=auth_info.get_gid_object(),
+                              authority=get_authority(hrn))
+        user_record.just_created()
+        dbsession.add (user_record)
+        dbsession.commit()
+        self.logger.info("SfaImporter: importing user (slicemanager) %s " % user_record)
+
+    def create_interface_records(self):
+        """
+        Create a record for each SFA interface
+        """
+        # just create certs for all sfa interfaces even if they
+        # aren't enabled
+        auth_info = self.auth_hierarchy.get_auth_info(self.interface_hrn)
+        pkey = auth_info.get_pkey_object()
+        hrn=self.interface_hrn
+        for type in  [ 'authority+sa', 'authority+am', 'authority+sm', ]:
+            urn = hrn_to_urn(hrn, type)
+            gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+            # for now we have to preserve the authority+<> stuff
+            if self.record_exists (type,hrn): continue
+            interface_record = RegAuthority(type=type, hrn=hrn, gid=gid,
+                                            authority=get_authority(hrn))
+            interface_record.just_created()
+            dbsession.add (interface_record)
+            dbsession.commit()
+            self.logger.info("SfaImporter: imported authority (%s) %s " % (type,interface_record))
+             
index fab2af8..90344ae 100644 (file)
@@ -9,7 +9,6 @@ class AggregateManager:
     # essentially a union of the core version, the generic version (this code) and
     # whatever the driver needs to expose
     def GetVersion(self, api, options):
-    
         xrn=Xrn(api.hrn)
         version = version_core()
         version_generic = {
diff --git a/sfa/managers/aggregate_manager_openstack.py b/sfa/managers/aggregate_manager_openstack.py
deleted file mode 100644 (file)
index 3ed0bba..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-from sfa.util.version import version_core
-from sfa.util.xrn import Xrn
-from sfa.util.callids import Callids
-from sfa.managers import aggregate_manager 
-
-class AggregateManager(aggregate_manager.AggregateManager):
-
-    def __init__ (self, config): pass
-    
-    # essentially a union of the core version, the generic version (this code) and
-    # whatever the driver needs to expose
-    def GetVersion(self, api, options):
-    
-        xrn=Xrn(api.hrn)
-        version = version_core()
-        version_generic = {
-            'interface':'aggregate',
-            'sfa': 2,
-            'geni_api': 2,
-            'geni_api_versions': {'2': 'http://%s:%s' % (api.config.SFA_AGGREGATE_HOST, api.config.SFA_AGGREGATE_PORT)}, 
-            'hrn':xrn.get_hrn(),
-            'urn':xrn.get_urn(),
-            }
-        version.update(version_generic)
-        testbed_version = self.driver.aggregate_version()
-        version.update(testbed_version)
-        return version
-    
-    def ListSlices(self, api, creds, options):
-        call_id = options.get('call_id')
-        if Callids().already_handled(call_id): return []
-        return self.driver.list_slices (creds, options)
-
-    def ListResources(self, api, creds, options):
-        call_id = options.get('call_id')
-        if Callids().already_handled(call_id): return ""
-
-        # get slice's hrn from options
-        slice_xrn = options.get('geni_slice_urn', None)
-        # pass None if no slice is specified
-        if not slice_xrn:
-            slice_hrn, slice_urn = None, None
-        else:
-            xrn = Xrn(slice_xrn)
-            slice_urn=xrn.get_urn()
-            slice_hrn=xrn.get_hrn()
-
-        return self.driver.list_resources (slice_urn, slice_hrn, creds, options)
-    
-    def SliverStatus (self, api, xrn, creds, options):
-        call_id = options.get('call_id')
-        if Callids().already_handled(call_id): return {}
-    
-        xrn = Xrn(xrn)
-        slice_urn=xrn.get_urn()
-        slice_hrn=xrn.get_hrn()
-        return self.driver.sliver_status (slice_urn, slice_hrn)
-    
-    def CreateSliver(self, api, xrn, creds, rspec_string, users, options):
-        """
-        Create the sliver[s] (slice) at this aggregate.    
-        Verify HRN and initialize the slice record in PLC if necessary.
-        """
-        call_id = options.get('call_id')
-        if Callids().already_handled(call_id): return ""
-    
-        xrn = Xrn(xrn)
-        slice_urn=xrn.get_urn()
-        slice_hrn=xrn.get_hrn()
-
-        return self.driver.create_sliver (slice_urn, slice_hrn, creds, rspec_string, users, options)
-    
-    def DeleteSliver(self, api, xrn, creds, options):
-        call_id = options.get('call_id')
-        if Callids().already_handled(call_id): return True
-
-        xrn = Xrn(xrn)
-        slice_urn=xrn.get_urn()
-        slice_hrn=xrn.get_hrn()
-        return self.driver.delete_sliver (slice_urn, slice_hrn, creds, options)
-
-    def RenewSliver(self, api, xrn, creds, expiration_time, options):
-        call_id = options.get('call_id')
-        if Callids().already_handled(call_id): return True
-        
-        xrn = Xrn(xrn)
-        slice_urn=xrn.get_urn()
-        slice_hrn=xrn.get_hrn()
-        return self.driver.renew_sliver (slice_urn, slice_hrn, creds, expiration_time, options)
-    
-    ### these methods could use an options extension for at least call_id
-    def start_slice(self, api, xrn, creds):
-        xrn = Xrn(xrn)
-        slice_urn=xrn.get_urn()
-        slice_hrn=xrn.get_hrn()
-        return self.driver.start_slice (slice_urn, slice_hrn, creds)
-     
-    def stop_slice(self, api, xrn, creds):
-        xrn = Xrn(xrn)
-        slice_urn=xrn.get_urn()
-        slice_hrn=xrn.get_hrn()
-        return self.driver.stop_slice (slice_urn, slice_hrn, creds)
-
-    def reset_slice(self, api, xrn):
-        xrn = Xrn(xrn)
-        slice_urn=xrn.get_urn()
-        slice_hrn=xrn.get_hrn()
-        return self.driver.reset_slice (slice_urn, slice_hrn)
-
-    def GetTicket(self, api, xrn, creds, rspec, users, options):
-    
-        xrn = Xrn(xrn)
-        slice_urn=xrn.get_urn()
-        slice_hrn=xrn.get_hrn()
-
-        return self.driver.get_ticket (slice_urn, slice_hrn, creds, rspec, options)
-
index b5be45f..db4348d 100644 (file)
@@ -1,5 +1,4 @@
 import types
-import time 
 # for get_key_from_incoming_ip
 import tempfile
 import os
@@ -19,8 +18,8 @@ from sfa.trust.credential import Credential
 from sfa.trust.certificate import Certificate, Keypair, convert_public_key
 from sfa.trust.gid import create_uuid
 
-from sfa.storage.record import SfaRecord
-from sfa.storage.table import SfaTable
+from sfa.storage.model import make_record, RegRecord, RegAuthority, RegUser, RegSlice, RegKey
+from sfa.storage.alchemy import dbsession
 
 class RegistryManager:
 
@@ -38,7 +37,7 @@ class RegistryManager:
                              'urn':xrn.get_urn(),
                              'peers':peers})
     
-    def GetCredential(self, api, xrn, type, is_self=False):
+    def GetCredential(self, api, xrn, type, caller_xrn=None):
         # convert xrn to hrn     
         if type:
             hrn = urn_to_hrn(xrn)[0]
@@ -49,36 +48,42 @@ class RegistryManager:
         auth_hrn = api.auth.get_authority(hrn)
         if not auth_hrn or hrn == api.config.SFA_INTERFACE_HRN:
             auth_hrn = hrn
-        # get record info
         auth_info = api.auth.get_auth_info(auth_hrn)
-        table = SfaTable()
-        records = table.findObjects({'type': type, 'hrn': hrn})
-        if not records:
-            raise RecordNotFound(hrn)
-        record = records[0]
+        # get record info
+        record=dbsession.query(RegRecord).filter_by(type=type,hrn=hrn).first()
+        if not record:
+            raise RecordNotFound("hrn=%s, type=%s"%(hrn,type))
     
         # verify_cancreate_credential requires that the member lists
         # (researchers, pis, etc) be filled in
-        self.driver.augment_records_with_testbed_info (record)
-        if not self.driver.is_enabled (record):
-              raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record['email']))
+        logger.debug("get credential before augment dict, keys=%s"%record.__dict__.keys())
+        self.driver.augment_records_with_testbed_info (record.__dict__)
+        logger.debug("get credential after augment dict, keys=%s"%record.__dict__.keys())
+        if not self.driver.is_enabled (record.__dict__):
+              raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record.email))
     
         # get the callers gid
-        # if this is a self cred the record's gid is the caller's gid
-        if is_self:
+        # if caller_xrn is not specified assume the caller is the record
+        # object itself.
+        if not caller_xrn:
             caller_hrn = hrn
             caller_gid = record.get_gid_object()
         else:
-            caller_gid = api.auth.client_cred.get_gid_caller() 
-            caller_hrn = caller_gid.get_hrn()
-        
+            caller_hrn, caller_type = urn_to_hrn(caller_xrn)
+            caller_record = dbsession.query(RegRecord).filter_by(hrn=caller_hrn).first()
+            if caller_type:
+                caller_record = caller_record.filter_by(type=caller_type)
+            if not caller_record:
+                raise RecordNotFound("Unable to associated caller (hrn=%s, type=%s) with credential for (hrn: %s, type: %s)"%(caller_hrn, caller_type, hrn, type))
+            caller_gid = GID(string=caller_record.gid)
         object_hrn = record.get_gid_object().get_hrn()
-        rights = api.auth.determine_user_rights(caller_hrn, record)
+        rights = api.auth.determine_user_rights(caller_hrn, record.__dict__)
         # make sure caller has rights to this object
         if rights.is_empty():
-            raise PermissionError(caller_hrn + " has no rights to " + record['name'])
-    
-        object_gid = GID(string=record['gid'])
+            raise PermissionError("%s has no rights to %s (%s)" % \
+                                  (caller_hrn, object_hrn, xrn))    
+        object_gid = GID(string=record.gid)
         new_cred = Credential(subject = object_gid.get_subject())
         new_cred.set_gid_caller(caller_gid)
         new_cred.set_gid_object(object_gid)
@@ -86,8 +91,8 @@ class RegistryManager:
         #new_cred.set_pubkey(object_gid.get_pubkey())
         new_cred.set_privileges(rights)
         new_cred.get_privileges().delegate_all_privileges(True)
-        if 'expires' in record:
-            date = utcparse(record['expires'])
+        if hasattr(record,'expires'):
+            date = utcparse(record.expires)
             expires = datetime_to_epoch(date)
             new_cred.set_expiration(int(expires))
         auth_kind = "authority,ma,sa"
@@ -102,15 +107,16 @@ class RegistryManager:
     def Resolve(self, api, xrns, type=None, full=True):
     
         if not isinstance(xrns, types.ListType):
-            xrns = [xrns]
             # try to infer type if not set and we get a single input
             if not type:
                 type = Xrn(xrns).get_type()
+            xrns = [xrns]
         hrns = [urn_to_hrn(xrn)[0] for xrn in xrns] 
+
         # load all known registry names into a prefix tree and attempt to find
         # the longest matching prefix
-        # create a dict where key is a registry hrn and its value is a
-        # hrns at that registry (determined by the known prefix tree).  
+        # create a dict where key is a registry hrn and its value is a list
+        # of hrns at that registry (determined by the known prefix tree).  
         xrn_dict = {}
         registries = api.registries
         tree = prefixTree()
@@ -136,47 +142,52 @@ class RegistryManager:
                 credential = api.getCredential()
                 interface = api.registries[registry_hrn]
                 server_proxy = api.server_proxy(interface, credential)
-                peer_records = server_proxy.Resolve(xrns, credential)
-                records.extend([SfaRecord(dict=record).as_dict() for record in peer_records])
+                peer_records = server_proxy.Resolve(xrns, credential,type)
+                # pass foreign records as-is
+                # previous code used to read
+                # records.extend([SfaRecord(dict=record).as_dict() for record in peer_records])
+                # not sure why the records coming through xmlrpc had to be processed at all
+                records.extend(peer_records)
     
         # try resolving the remaining unfound records at the local registry
         local_hrns = list ( set(hrns).difference([record['hrn'] for record in records]) )
         # 
-        table = SfaTable()
-        local_records = table.findObjects({'hrn': local_hrns})
+        local_records = dbsession.query(RegRecord).filter(RegRecord.hrn.in_(local_hrns))
+        if type:
+            local_records = local_records.filter_by(type=type)
+        local_records=local_records.all()
+        logger.info("Resolve: local_records=%s (type=%s)"%(local_records,type))
+        local_dicts = [ record.__dict__ for record in local_records ]
         
         if full:
             # in full mode we get as much info as we can, which involves contacting the 
             # testbed for getting implementation details about the record
-            self.driver.augment_records_with_testbed_info(local_records)
+            self.driver.augment_records_with_testbed_info(local_dicts)
             # also we fill the 'url' field for known authorities
             # used to be in the driver code, sounds like a poorman thing though
             def solve_neighbour_url (record):
-                if not record['type'].startswith('authority'): return 
-                hrn=record['hrn']
+                if not record.type.startswith('authority'): return 
+                hrn=record.hrn
                 for neighbour_dict in [ api.aggregates, api.registries ]:
                     if hrn in neighbour_dict:
-                        record['url']=neighbour_dict[hrn].get_url()
+                        record.url=neighbour_dict[hrn].get_url()
                         return 
-            [ solve_neighbour_url (record) for record in local_records ]
-                    
-        
+            for record in local_records: solve_neighbour_url (record)
         
-        # convert local record objects to dicts
-        records.extend([dict(record) for record in local_records])
-        if type:
-            records = filter(lambda rec: rec['type'] in [type], records)
-    
+        # convert local record objects to dicts for xmlrpc
+        # xxx somehow here calling dict(record) issues a weird error
+        # however record.todict() seems to work fine
+        # records.extend( [ dict(record) for record in local_records ] )
+        records.extend( [ record.todict() for record in local_records ] )    
         if not records:
             raise RecordNotFound(str(hrns))
     
         return records
     
-    def List(self, api, xrn, origin_hrn=None):
+    def List (self, api, xrn, origin_hrn=None):
         hrn, type = urn_to_hrn(xrn)
         # load all know registry names into a prefix tree and attempt to find
         # the longest matching prefix
-        records = []
         registries = api.registries
         registry_hrns = registries.keys()
         tree = prefixTree()
@@ -188,23 +199,24 @@ class RegistryManager:
             raise MissingAuthority(xrn)
         # if the best match (longest matching hrn) is not the local registry,
         # forward the request
-        records = []    
+        record_dicts = []    
         if registry_hrn != api.hrn:
             credential = api.getCredential()
             interface = api.registries[registry_hrn]
             server_proxy = api.server_proxy(interface, credential)
             record_list = server_proxy.List(xrn, credential)
-            records = [SfaRecord(dict=record).as_dict() for record in record_list]
+            # same as above, no need to process what comes from through xmlrpc
+            # pass foreign records as-is
+            record_dicts = record_list
         
         # if we still have not found the record yet, try the local registry
-        if not records:
+        if not record_dicts:
             if not api.auth.hierarchy.auth_exists(hrn):
                 raise MissingAuthority(hrn)
+            records = dbsession.query(RegRecord).filter_by(authority=hrn)
+            record_dicts=[ record.todict() for record in records ]
     
-            table = SfaTable()
-            records = table.find({'authority': hrn})
-    
-        return records
+        return record_dicts
     
     
     def CreateGid(self, api, xrn, cert):
@@ -227,73 +239,93 @@ class RegistryManager:
     # subject_record describes the subject of the relationships
     # ref_record contains the target values for the various relationships we need to manage
     # (to begin with, this is just the slice x person relationship)
-    def update_relations (self, subject_record, ref_record):
-        type=subject_record['type']
+    def update_relations (self, subject_obj, ref_obj):
+        type=subject_obj.type
         if type=='slice':
-            self.update_relation(subject_record, 'researcher', ref_record.get('researcher'), 'user')
+            self.update_relation(subject_obj, 'researcher', ref_obj.researcher, 'user')
         
     # field_key is the name of one field in the record, typically 'researcher' for a 'slice' record
     # hrns is the list of hrns that should be linked to the subject from now on
     # target_type would be e.g. 'user' in the 'slice' x 'researcher' example
-    def update_relation (self, sfa_record, field_key, hrns, target_type):
+    def update_relation (self, record_obj, field_key, hrns, target_type):
         # locate the linked objects in our db
-        subject_type=sfa_record['type']
-        subject_id=sfa_record['pointer']
-        table = SfaTable()
-        link_sfa_records = table.find ({'type':target_type, 'hrn': hrns})
-        link_ids = [ rec.get('pointer') for rec in link_sfa_records ]
+        subject_type=record_obj.type
+        subject_id=record_obj.pointer
+        # get the 'pointer' field of all matching records
+        link_id_tuples = dbsession.query(RegRecord.pointer).filter_by(type=target_type).filter(RegRecord.hrn.in_(hrns)).all()
+        # sqlalchemy returns named tuples for columns
+        link_ids = [ tuple.pointer for tuple in link_id_tuples ]
         self.driver.update_relation (subject_type, target_type, subject_id, link_ids)
-        
 
-    def Register(self, api, record):
+    def Register(self, api, record_dict):
     
-        hrn, type = record['hrn'], record['type']
+        hrn, type = record_dict['hrn'], record_dict['type']
         urn = hrn_to_urn(hrn,type)
         # validate the type
         if type not in ['authority', 'slice', 'node', 'user']:
             raise UnknownSfaType(type) 
         
-        # check if record already exists
-        table = SfaTable()
-        existing_records = table.find({'type': type, 'hrn': hrn})
+        # check if record_dict already exists
+        existing_records = dbsession.query(RegRecord).filter_by(type=type,hrn=hrn).all()
         if existing_records:
             raise ExistingRecord(hrn)
            
-        record = SfaRecord(dict = record)
-        record['authority'] = get_authority(record['hrn'])
-        auth_info = api.auth.get_auth_info(record['authority'])
+        assert ('type' in record_dict)
+        # returns the right type of RegRecord according to type in record
+        record = make_record(dict=record_dict)
+        record.just_created()
+        record.authority = get_authority(record.hrn)
+        auth_info = api.auth.get_auth_info(record.authority)
         pub_key = None
         # make sure record has a gid
-        if 'gid' not in record:
+        if not record.gid:
             uuid = create_uuid()
             pkey = Keypair(create=True)
-            if 'keys' in record and record['keys']:
-                pub_key=record['keys']
+            if getattr(record,'keys',None):
+                pub_key=record.keys
                 # use only first key in record
-                if isinstance(record['keys'], types.ListType):
-                    pub_key = record['keys'][0]
+                if isinstance(record.keys, types.ListType):
+                    pub_key = record.keys[0]
                 pkey = convert_public_key(pub_key)
     
             gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
             gid = gid_object.save_to_string(save_parents=True)
-            record['gid'] = gid
-            record.set_gid(gid)
+            record.gid = gid
     
-        if type in ["authority"]:
+        if isinstance (record, RegAuthority):
             # update the tree
             if not api.auth.hierarchy.auth_exists(hrn):
                 api.auth.hierarchy.create_auth(hrn_to_urn(hrn,'authority'))
     
             # get the GID from the newly created authority
             gid = auth_info.get_gid_object()
-            record.set_gid(gid.save_to_string(save_parents=True))
+            record.gid=gid.save_to_string(save_parents=True)
 
+        elif isinstance (record, RegSlice):
+            # locate objects for relationships
+            if hasattr (record, 'researcher'):
+                # we get the list of researcher hrns as
+                researcher_hrns = record.researcher
+                # strip that in case we have <researcher> words </researcher>
+                researcher_hrns = [ x.strip() for x in researcher_hrns ]
+                logger.info ("incoming researchers %s"%researcher_hrns)
+                request = dbsession.query (RegUser).filter(RegUser.hrn.in_(researcher_hrns))
+                logger.info ("%d incoming hrns, %d matches found"%(len(researcher_hrns),request.count()))
+                researchers = dbsession.query (RegUser).filter(RegUser.hrn.in_(researcher_hrns)).all()
+                record.reg_researchers = researchers
+        
+        elif isinstance (record, RegUser):
+            # create RegKey objects for incoming keys
+            if hasattr(record,'keys'): 
+                logger.debug ("creating %d keys for user %s"%(len(record.keys),record.hrn))
+                record.reg_keys = [ RegKey (key) for key in record.keys ]
+            
         # update testbed-specific data if needed
-        pointer = self.driver.register (record, hrn, pub_key)
+        pointer = self.driver.register (record.__dict__, hrn, pub_key)
 
-        record.set_pointer(pointer)
-        record_id = table.insert(record)
-        record['record_id'] = record_id
+        record.pointer=pointer
+        dbsession.add(record)
+        dbsession.commit()
     
         # update membership for researchers, pis, owners, operators
         self.update_relations (record, record)
@@ -301,17 +333,16 @@ class RegistryManager:
         return record.get_gid_object().save_to_string(save_parents=True)
     
     def Update(self, api, record_dict):
-        new_record = SfaRecord(dict = record_dict)
-        type = new_record['type']
-        hrn = new_record['hrn']
-        urn = hrn_to_urn(hrn,type)
-        table = SfaTable()
+        assert ('type' in record_dict)
+        new_record=RegRecord(dict=record_dict)
+        type = new_record.type
+        hrn = new_record.hrn
+        
         # make sure the record exists
-        records = table.findObjects({'type': type, 'hrn': hrn})
-        if not records:
-            raise RecordNotFound(hrn)
-        record = records[0]
-        record['last_updated'] = time.gmtime()
+        record = dbsession.query(RegRecord).filter_by(type=type,hrn=hrn).first()
+        if not record:
+            raise RecordNotFound("hrn=%s, type=%s"%(hrn,type))
+        record.just_updated()
     
         # validate the type
         if type not in ['authority', 'slice', 'node', 'user']:
@@ -319,18 +350,18 @@ class RegistryManager:
 
         # Use the pointer from the existing record, not the one that the user
         # gave us. This prevents the user from inserting a forged pointer
-        pointer = record['pointer']
+        pointer = record.pointer
     
         # is the a change in keys ?
         new_key=None
         if type=='user':
-            if 'keys' in new_record and new_record['keys']:
-                new_key=new_record['keys']
+            if getattr(new_key,'keys',None):
+                new_key=new_record.keys
                 if isinstance (new_key,types.ListType):
                     new_key=new_key[0]
 
         # update the PLC information that was specified with the record
-        if not self.driver.update (record, new_record, hrn, new_key):
+        if not self.driver.update (record.__dict__, new_record.__dict__, hrn, new_key):
             logger.warning("driver.update failed")
     
         # take new_key into account
@@ -338,11 +369,11 @@ class RegistryManager:
             # update the openssl key and gid
             pkey = convert_public_key(new_key)
             uuid = create_uuid()
+            urn = hrn_to_urn(hrn,type)
             gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
             gid = gid_object.save_to_string(save_parents=True)
-            record['gid'] = gid
-            record = SfaRecord(dict=record)
-            table.update(record)
+            record.gid = gid
+            dsession.commit()
         
         # update membership for researchers, pis, owners, operators
         self.update_relations (record, new_record)
@@ -351,19 +382,19 @@ class RegistryManager:
     
     # expecting an Xrn instance
     def Remove(self, api, xrn, origin_hrn=None):
-    
-        table = SfaTable()
-        filter = {'hrn': xrn.get_hrn()}
         hrn=xrn.get_hrn()
         type=xrn.get_type()
+        request=dbsession.query(RegRecord).filter_by(hrn=hrn)
         if type and type not in ['all', '*']:
-            filter['type'] = type
+            request=request.filter_by(type=type)
     
-        records = table.find(filter)
-        if not records: raise RecordNotFound(hrn)
-        record = records[0]
-        type = record['type']
-        
+        record = request.first()
+        if not record:
+            msg="Could not find hrn %s"%hrn
+            if type: msg += " type=%s"%type
+            raise RecordNotFound(msg)
+
+        type = record.type
         if type not in ['slice', 'user', 'node', 'authority'] :
             raise UnknownSfaType(type)
 
@@ -382,15 +413,16 @@ class RegistryManager:
 
         # call testbed callback first
         # IIUC this is done on the local testbed TOO because of the refreshpeer link
-        if not self.driver.remove(record):
+        if not self.driver.remove(record.__dict__):
             logger.warning("driver.remove failed")
 
         # delete from sfa db
-        table.remove(record)
+        dbsession.delete(record)
+        dbsession.commit()
     
         return 1
 
-    # This is a PLC-specific thing...
+    # This is a PLC-specific thing, won't work with other platforms
     def get_key_from_incoming_ip (self, api):
         # verify that the callers's ip address exist in the db and is an interface
         # for a node in the db
@@ -404,23 +436,20 @@ class RegistryManager:
         node = nodes[0]
        
         # look up the sfa record
-        table = SfaTable()
-        records = table.findObjects({'type': 'node', 'pointer': node['node_id']})
-        if not records:
-            raise RecordNotFound("pointer:" + str(node['node_id']))  
-        record = records[0]
+        record=dbsession.query(RegRecord).filter_by(type='node',pointer=node['node_id']).first()
+        if not record:
+            raise RecordNotFound("node with pointer %s"%node['node_id'])
         
         # generate a new keypair and gid
         uuid = create_uuid()
         pkey = Keypair(create=True)
-        urn = hrn_to_urn(record['hrn'], record['type'])
+        urn = hrn_to_urn(record.hrn, record.type)
         gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
         gid = gid_object.save_to_string(save_parents=True)
-        record['gid'] = gid
-        record.set_gid(gid)
+        record.gid = gid
 
         # update the record
-        table.update(record)
+        dbsession.commit()
   
         # attempt the scp the key
         # and gid onto the node
index 51c4472..c940e15 100644 (file)
@@ -1,5 +1,4 @@
 import types
-import time 
 # for get_key_from_incoming_ip
 import tempfile
 import os
@@ -13,29 +12,20 @@ from sfa.util.xrn import Xrn, get_authority, hrn_to_urn, urn_to_hrn
 from sfa.util.plxrn import hrn_to_pl_login_base
 from sfa.util.version import version_core
 from sfa.util.sfalogging import logger
+
 from sfa.trust.gid import GID 
 from sfa.trust.credential import Credential
 from sfa.trust.certificate import Certificate, Keypair, convert_public_key
 from sfa.trust.gid import create_uuid
-from sfa.storage.record import SfaRecord
-from sfa.storage.table import SfaTable
-from sfa.managers import registry_manager
 
-class RegistryManager(registry_manager.RegistryManager):
+from sfa.storage.model import make_record,RegRecord
+from sfa.storage.alchemy import dbsession
 
-    def __init__ (self, config): pass
+from sfa.managers.registry_manager import RegistryManager
 
-    # The GENI GetVersion call
-    def GetVersion(self, api, options):
-        peers = dict ( [ (hrn,interface.get_url()) for (hrn,interface) in api.registries.iteritems() 
-                       if hrn != api.hrn])
-        xrn=Xrn(api.hrn)
-        return version_core({'interface':'registry',
-                             'hrn':xrn.get_hrn(),
-                             'urn':xrn.get_urn(),
-                             'peers':peers})
-    
-    def GetCredential(self, api, xrn, type, is_self=False):
+class RegistryManager(RegistryManager):
+
+    def GetCredential(self, api, xrn, type, caller_xrn = None):
         # convert xrn to hrn     
         if type:
             hrn = urn_to_hrn(xrn)[0]
@@ -46,36 +36,44 @@ class RegistryManager(registry_manager.RegistryManager):
         auth_hrn = api.auth.get_authority(hrn)
         if not auth_hrn or hrn == api.config.SFA_INTERFACE_HRN:
             auth_hrn = hrn
-        # get record info
         auth_info = api.auth.get_auth_info(auth_hrn)
-        table = SfaTable()
-        records = table.findObjects({'type': type, 'hrn': hrn})
-        if not records:
-            raise RecordNotFound(hrn)
-        record = records[0]
+        # get record info
+        record=dbsession.query(RegRecord).filter_by(hrn=hrn).first()
+        if type:
+            record = record.filter_by(type=type)
+        if not record:
+            raise RecordNotFound("hrn=%s, type=%s"%(hrn,type))
     
         # verify_cancreate_credential requires that the member lists
         # (researchers, pis, etc) be filled in
-        self.driver.augment_records_with_testbed_info (record)
-        if not self.driver.is_enabled (record):
-              raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record['email']))
+        logger.debug("get credential before augment dict, keys=%s"%record.__dict__.keys())
+        self.driver.augment_records_with_testbed_info (record.__dict__)
+        logger.debug("get credential after augment dict, keys=%s"%record.__dict__.keys())
+        if not self.driver.is_enabled (record.__dict__):
+              raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record.email))
     
         # get the callers gid
-        # if this is a self cred the record's gid is the caller's gid
-        if is_self:
+        # if caller_xrn is not specified assume the caller is the record
+        # object itself.  
+        if not caller_xrn:
             caller_hrn = hrn
             caller_gid = record.get_gid_object()
         else:
-            caller_gid = api.auth.client_cred.get_gid_caller() 
-            caller_hrn = caller_gid.get_hrn()
+            caller_hrn, caller_type = urn_to_hrn(caller_xrn)
+            caller_record = dbsession.query(RegRecord).filter_by(hrn=caller_hrn).first()
+            if caller_type:
+                caller_record = caller_record.filter_by(type=caller_type)
+            if not caller_record:
+                raise RecordNotFound("Unable to associated caller (hrn=%s, type=%s) with credential for (hrn: %s, type: %s)"%(caller_hrn, caller_type, hrn, type))
+            caller_gid = GID(string=caller_record.gid) 
         
         object_hrn = record.get_gid_object().get_hrn()
-        rights = api.auth.determine_user_rights(caller_hrn, record)
+        rights = api.auth.determine_user_rights(caller_hrn, record.__dict__)
         # make sure caller has rights to this object
         if rights.is_empty():
-            raise PermissionError(caller_hrn + " has no rights to " + record['name'])
+            raise PermissionError(caller_hrn + " has no rights to " + record.hrn)
     
-        object_gid = GID(string=record['gid'])
+        object_gid = GID(string=record.gid)
         new_cred = Credential(subject = object_gid.get_subject())
         new_cred.set_gid_caller(caller_gid)
         new_cred.set_gid_object(object_gid)
@@ -83,8 +81,8 @@ class RegistryManager(registry_manager.RegistryManager):
         #new_cred.set_pubkey(object_gid.get_pubkey())
         new_cred.set_privileges(rights)
         new_cred.get_privileges().delegate_all_privileges(True)
-        if 'expires' in record:
-            date = utcparse(record['expires'])
+        if hasattr(record,'expires'):
+            date = utcparse(record.expires)
             expires = datetime_to_epoch(date)
             new_cred.set_expiration(int(expires))
         auth_kind = "authority,ma,sa"
@@ -96,357 +94,24 @@ class RegistryManager(registry_manager.RegistryManager):
         return new_cred.save_to_string(save_parents=True)
     
     
-    def Resolve(self, api, xrns, type=None, full=True):
-    
-        if not isinstance(xrns, types.ListType):
-            xrns = [xrns]
-            # try to infer type if not set and we get a single input
-            if not type:
-                type = Xrn(xrns).get_type()
-        hrns = [urn_to_hrn(xrn)[0] for xrn in xrns] 
-        # load all known registry names into a prefix tree and attempt to find
-        # the longest matching prefix
-        # create a dict where key is a registry hrn and its value is a
-        # hrns at that registry (determined by the known prefix tree).  
-        xrn_dict = {}
-        registries = api.registries
-        tree = prefixTree()
-        registry_hrns = registries.keys()
-        tree.load(registry_hrns)
-        for xrn in xrns:
-            registry_hrn = tree.best_match(urn_to_hrn(xrn)[0])
-            if registry_hrn not in xrn_dict:
-                xrn_dict[registry_hrn] = []
-            xrn_dict[registry_hrn].append(xrn)
-            
-        records = [] 
-        for registry_hrn in xrn_dict:
-            # skip the hrn without a registry hrn
-            # XX should we let the user know the authority is unknown?       
-            if not registry_hrn:
-                continue
-    
-            # if the best match (longest matching hrn) is not the local registry,
-            # forward the request
-            xrns = xrn_dict[registry_hrn]
-            if registry_hrn != api.hrn:
-                credential = api.getCredential()
-                interface = api.registries[registry_hrn]
-                server_proxy = api.server_proxy(interface, credential)
-                peer_records = server_proxy.Resolve(xrns, credential)
-                records.extend([SfaRecord(dict=record).as_dict() for record in peer_records])
-    
-        # try resolving the remaining unfound records at the local registry
-        local_hrns = list ( set(hrns).difference([record['hrn'] for record in records]) )
-        # 
-        table = SfaTable()
-        local_records = table.findObjects({'hrn': local_hrns})
-        
-        if full:
-            # in full mode we get as much info as we can, which involves contacting the 
-            # testbed for getting implementation details about the record
-            self.driver.augment_records_with_testbed_info(local_records)
-            # also we fill the 'url' field for known authorities
-            # used to be in the driver code, sounds like a poorman thing though
-            def solve_neighbour_url (record):
-                if not record['type'].startswith('authority'): return 
-                hrn=record['hrn']
-                for neighbour_dict in [ api.aggregates, api.registries ]:
-                    if hrn in neighbour_dict:
-                        record['url']=neighbour_dict[hrn].get_url()
-                        return 
-            [ solve_neighbour_url (record) for record in local_records ]
-                    
-        
-        
-        # convert local record objects to dicts
-        records.extend([dict(record) for record in local_records])
-        if type:
-            records = filter(lambda rec: rec['type'] in [type], records)
-    
-        if not records:
-            raise RecordNotFound(str(hrns))
-    
-        return records
-    
-    def List(self, api, xrn, origin_hrn=None):
-        hrn, type = urn_to_hrn(xrn)
-        # load all know registry names into a prefix tree and attempt to find
-        # the longest matching prefix
-        records = []
-        registries = api.registries
-        registry_hrns = registries.keys()
-        tree = prefixTree()
-        tree.load(registry_hrns)
-        registry_hrn = tree.best_match(hrn)
-       
-        #if there was no match then this record belongs to an unknow registry
-        if not registry_hrn:
-            raise MissingAuthority(xrn)
-        # if the best match (longest matching hrn) is not the local registry,
-        # forward the request
-        records = []    
-        if registry_hrn != api.hrn:
-            credential = api.getCredential()
-            interface = api.registries[registry_hrn]
-            server_proxy = api.server_proxy(interface, credential)
-            record_list = server_proxy.List(xrn, credential)
-            records = [SfaRecord(dict=record).as_dict() for record in record_list]
-        
-        # if we still have not found the record yet, try the local registry
-        if not records:
-            if not api.auth.hierarchy.auth_exists(hrn):
-                raise MissingAuthority(hrn)
-    
-            table = SfaTable()
-            records = table.find({'authority': hrn})
-    
-        return records
-    
-    
-    def CreateGid(self, api, xrn, cert):
-        # get the authority
-        authority = Xrn(xrn=xrn).get_authority_hrn()
-        auth_info = api.auth.get_auth_info(authority)
-        if not cert:
-            pkey = Keypair(create=True)
-        else:
-            certificate = Certificate(string=cert)
-            pkey = certificate.get_pubkey()    
-        gid = api.auth.hierarchy.create_gid(xrn, create_uuid(), pkey) 
-        return gid.save_to_string(save_parents=True)
-    
-    ####################
-    # utility for handling relationships among the SFA objects 
-    # given that the SFA db does not handle this sort of relationsships
-    # it will rely on side-effects in the testbed to keep this persistent
-    
     # subject_record describes the subject of the relationships
     # ref_record contains the target values for the various relationships we need to manage
     # (to begin with, this is just the slice x person relationship)
-    def update_relations (self, subject_record, ref_record):
-        type=subject_record['type']
+    def update_relations (self, subject_obj, ref_obj):
+        type=subject_obj.type
         if type=='slice':
-            self.update_relation(subject_record, 'researcher', ref_record.get('researcher'), 'user')
+            self.update_relation(subject_obj, 'researcher', ref_obj.researcher, 'user')
         
     # field_key is the name of one field in the record, typically 'researcher' for a 'slice' record
     # hrns is the list of hrns that should be linked to the subject from now on
     # target_type would be e.g. 'user' in the 'slice' x 'researcher' example
-    def update_relation (self, sfa_record, field_key, hrns, target_type):
+    def update_relation (self, record_obj, field_key, hrns, target_type):
         # locate the linked objects in our db
-        subject_type=sfa_record['type']
-        subject_id=sfa_record['pointer']
-        table = SfaTable()
-        link_sfa_records = table.find ({'type':target_type, 'hrn': hrns})
-        link_ids = [ rec.get('pointer') for rec in link_sfa_records ]
+        subject_type=record_obj.type
+        subject_id=record_obj.pointer
+        # get the 'pointer' field of all matching records
+        link_id_tuples = dbsession.query(RegRecord.pointer).filter_by(type=target_type).filter(RegRecord.hrn.in_(hrns)).all()
+        # sqlalchemy returns named tuples for columns
+        link_ids = [ tuple.pointer for tuple in link_id_tuples ]
         self.driver.update_relation (subject_type, target_type, subject_id, link_ids)
-        
-
-    def Register(self, api, record):
-    
-        hrn, type = record['hrn'], record['type']
-        urn = hrn_to_urn(hrn,type)
-        # validate the type
-        if type not in ['authority', 'slice', 'node', 'user']:
-            raise UnknownSfaType(type) 
-        
-        # check if record already exists
-        table = SfaTable()
-        existing_records = table.find({'type': type, 'hrn': hrn})
-        if existing_records:
-            raise ExistingRecord(hrn)
-           
-        record = SfaRecord(dict = record)
-        record['authority'] = get_authority(record['hrn'])
-        auth_info = api.auth.get_auth_info(record['authority'])
-        pub_key = None
-        # make sure record has a gid
-        if 'gid' not in record:
-            uuid = create_uuid()
-            pkey = Keypair(create=True)
-            if 'keys' in record and record['keys']:
-                pub_key=record['keys']
-                # use only first key in record
-                if isinstance(record['keys'], types.ListType):
-                    pub_key = record['keys'][0]
-                pkey = convert_public_key(pub_key)
-    
-            gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
-            gid = gid_object.save_to_string(save_parents=True)
-            record['gid'] = gid
-            record.set_gid(gid)
-    
-        if type in ["authority"]:
-            # update the tree
-            if not api.auth.hierarchy.auth_exists(hrn):
-                api.auth.hierarchy.create_auth(hrn_to_urn(hrn,'authority'))
-    
-            # get the GID from the newly created authority
-            gid = auth_info.get_gid_object()
-            record.set_gid(gid.save_to_string(save_parents=True))
-
-        # update testbed-specific data if needed
-        pointer = self.driver.register (record, hrn, pub_key)
-
-        record.set_pointer(pointer)
-        record_id = table.insert(record)
-        record['record_id'] = record_id
-    
-        # update membership for researchers, pis, owners, operators
-        self.update_relations (record, record)
-        
-        return record.get_gid_object().save_to_string(save_parents=True)
-    
-    def Update(self, api, record_dict):
-        new_record = SfaRecord(dict = record_dict)
-        type = new_record['type']
-        hrn = new_record['hrn']
-        urn = hrn_to_urn(hrn,type)
-        table = SfaTable()
-        # make sure the record exists
-        records = table.findObjects({'type': type, 'hrn': hrn})
-        if not records:
-            raise RecordNotFound(hrn)
-        record = records[0]
-        record['last_updated'] = time.gmtime()
-    
-        # validate the type
-        if type not in ['authority', 'slice', 'node', 'user']:
-            raise UnknownSfaType(type) 
-
-        # Use the pointer from the existing record, not the one that the user
-        # gave us. This prevents the user from inserting a forged pointer
-        pointer = record['pointer']
-    
-        # is the a change in keys ?
-        new_key=None
-        if type=='user':
-            if 'keys' in new_record and new_record['keys']:
-                new_key=new_record['keys']
-                if isinstance (new_key,types.ListType):
-                    new_key=new_key[0]
-
-        # update the PLC information that was specified with the record
-        if not self.driver.update (record, new_record, hrn, new_key):
-            logger.warning("driver.update failed")
-    
-        # take new_key into account
-        if new_key:
-            # update the openssl key and gid
-            pkey = convert_public_key(new_key)
-            uuid = create_uuid()
-            gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
-            gid = gid_object.save_to_string(save_parents=True)
-            record['gid'] = gid
-            record = SfaRecord(dict=record)
-            table.update(record)
-        
-        # update membership for researchers, pis, owners, operators
-        self.update_relations (record, new_record)
-        
-        return 1 
-    
-    # expecting an Xrn instance
-    def Remove(self, api, xrn, origin_hrn=None):
-    
-        table = SfaTable()
-        filter = {'hrn': xrn.get_hrn()}
-        hrn=xrn.get_hrn()
-        type=xrn.get_type()
-        if type and type not in ['all', '*']:
-            filter['type'] = type
-    
-        records = table.find(filter)
-        if not records: raise RecordNotFound(hrn)
-        record = records[0]
-        type = record['type']
-        
-        if type not in ['slice', 'user', 'node', 'authority'] :
-            raise UnknownSfaType(type)
-
-        credential = api.getCredential()
-        registries = api.registries
-    
-        # Try to remove the object from the PLCDB of federated agg.
-        # This is attempted before removing the object from the local agg's PLCDB and sfa table
-        if hrn.startswith(api.hrn) and type in ['user', 'slice', 'authority']:
-            for registry in registries:
-                if registry not in [api.hrn]:
-                    try:
-                        result=registries[registry].remove_peer_object(credential, record, origin_hrn)
-                    except:
-                        pass
-
-        # call testbed callback first
-        # IIUC this is done on the local testbed TOO because of the refreshpeer link
-        if not self.driver.remove(record):
-            logger.warning("driver.remove failed")
-
-        # delete from sfa db
-        table.remove(record)
-    
-        return 1
-
-    # This is a PLC-specific thing...
-    def get_key_from_incoming_ip (self, api):
-        # verify that the callers's ip address exist in the db and is an interface
-        # for a node in the db
-        (ip, port) = api.remote_addr
-        interfaces = self.driver.shell.GetInterfaces({'ip': ip}, ['node_id'])
-        if not interfaces:
-            raise NonExistingRecord("no such ip %(ip)s" % locals())
-        nodes = self.driver.shell.GetNodes([interfaces[0]['node_id']], ['node_id', 'hostname'])
-        if not nodes:
-            raise NonExistingRecord("no such node using ip %(ip)s" % locals())
-        node = nodes[0]
-       
-        # look up the sfa record
-        table = SfaTable()
-        records = table.findObjects({'type': 'node', 'pointer': node['node_id']})
-        if not records:
-            raise RecordNotFound("pointer:" + str(node['node_id']))  
-        record = records[0]
-        
-        # generate a new keypair and gid
-        uuid = create_uuid()
-        pkey = Keypair(create=True)
-        urn = hrn_to_urn(record['hrn'], record['type'])
-        gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
-        gid = gid_object.save_to_string(save_parents=True)
-        record['gid'] = gid
-        record.set_gid(gid)
-
-        # update the record
-        table.update(record)
-  
-        # attempt the scp the key
-        # and gid onto the node
-        # this will only work for planetlab based components
-        (kfd, key_filename) = tempfile.mkstemp() 
-        (gfd, gid_filename) = tempfile.mkstemp() 
-        pkey.save_to_file(key_filename)
-        gid_object.save_to_file(gid_filename, save_parents=True)
-        host = node['hostname']
-        key_dest="/etc/sfa/node.key"
-        gid_dest="/etc/sfa/node.gid" 
-        scp = "/usr/bin/scp" 
-        #identity = "/etc/planetlab/root_ssh_key.rsa"
-        identity = "/etc/sfa/root_ssh_key"
-        scp_options=" -i %(identity)s " % locals()
-        scp_options+="-o StrictHostKeyChecking=no " % locals()
-        scp_key_command="%(scp)s %(scp_options)s %(key_filename)s root@%(host)s:%(key_dest)s" %\
-                         locals()
-        scp_gid_command="%(scp)s %(scp_options)s %(gid_filename)s root@%(host)s:%(gid_dest)s" %\
-                         locals()    
-
-        all_commands = [scp_key_command, scp_gid_command]
-        
-        for command in all_commands:
-            (status, output) = commands.getstatusoutput(command)
-            if status:
-                raise Exception, output
-
-        for filename in [key_filename, gid_filename]:
-            os.unlink(filename)
 
-        return 1 
index e9e446a..b261fe2 100644 (file)
@@ -135,7 +135,7 @@ class SliceManager:
     
         # look in cache first
         cached_requested = options.get('cached', True)
-        if not xrn and self.cache and cached_request:
+        if not xrn and self.cache and cached_requested:
             rspec =  self.cache.get(version_string)
             if rspec:
                 api.logger.debug("SliceManager.ListResources returns cached advertisement")
index b28d1a5..b898c9d 100644 (file)
@@ -41,7 +41,7 @@ class CreateSliver(Method):
 
         # make sure users info is specified
         if not users:
-            msg = "'users' musst be specified and cannot be null. You may need to update your client." 
+            msg = "'users' must be specified and cannot be null. You may need to update your client." 
             raise SfaInvalidArgument(name='users', extra=msg)  
 
         # flter rspec through sfatables
index 50525c2..f5344a2 100644 (file)
@@ -44,5 +44,5 @@ class GetCredential(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))        
 
-        return self.api.manager.GetCredential(self.api, xrn, type)
+        return self.api.manager.GetCredential(self.api, xrn, self.api.auth.client_gid.get_urn())
 
index fd87ca0..c67bf4b 100644 (file)
@@ -6,7 +6,6 @@ from sfa.util.method import Method
 from sfa.trust.certificate import Certificate
 
 from sfa.storage.parameter import Parameter, Mixed
-from sfa.storage.record import SfaRecord
 
 class GetSelfCredential(Method):
     """
@@ -55,12 +54,24 @@ class GetSelfCredential(Method):
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
         
  
-        # authenticate the gid
+        ### authenticate the gid
+        # import here so we can load this module at build-time for sfa2wsdl
+        #from sfa.storage.alchemy import dbsession
+        from sfa.storage.model import RegRecord
+
+        # xxx-local - the current code runs Resolve, which would forward to 
+        # another registry if needed
+        # I wonder if this is truly the intention, or shouldn't we instead 
+        # only look in the local db ?
         records = self.api.manager.Resolve(self.api, xrn, type)
         if not records:
             raise RecordNotFound(hrn)
-        record = SfaRecord(dict=records[0])
-        gid = record.get_gid_object()
+
+        record_obj = RegRecord (dict=records[0])
+        # xxx-local the local-only version would read 
+        #record_obj = dbsession.query(RegRecord).filter_by(hrn=hrn).first()
+        #if not record_obj: raise RecordNotFound(hrn)
+        gid = record_obj.get_gid_object()
         gid_str = gid.save_to_string(save_parents=True)
         self.api.auth.authenticateGid(gid_str, [cert, type, hrn])
         # authenticate the certificate against the gid in the db
@@ -73,4 +84,4 @@ class GetSelfCredential(Method):
                     self.api.logger.debug("ConnectionKeyGIDMismatch, %s filename: %s"%(name,obj.filename))
             raise ConnectionKeyGIDMismatch(gid.get_subject())
         
-        return self.api.manager.GetCredential(self.api, xrn, type, is_self=True)
+        return self.api.manager.GetCredential(self.api, xrn, type)
index bbd3e47..c023fa0 100644 (file)
@@ -5,7 +5,6 @@ from sfa.util.method import Method
 from sfa.trust.credential import Credential
 
 from sfa.storage.parameter import Parameter, Mixed
-from sfa.storage.record import SfaRecord
 
 class List(Method):
     """
@@ -23,7 +22,8 @@ class List(Method):
               Parameter(type([str]), "List of credentials")),
         ]
 
-    returns = [SfaRecord]
+    # xxx used to be [SfaRecord]
+    returns = [Parameter(dict, "registry record")]
     
     def call(self, xrn, creds):
         hrn, type = urn_to_hrn(xrn)
index 9a6dd47..7abc6cd 100644 (file)
@@ -6,7 +6,6 @@ from sfa.util.method import Method
 from sfa.trust.credential import Credential
 
 from sfa.storage.parameter import Parameter, Mixed
-from sfa.storage.record import SfaRecord
 
 class Resolve(Method):
     """
@@ -26,7 +25,8 @@ class Resolve(Method):
               Parameter(list, "List of credentials)"))  
         ]
 
-    returns = [SfaRecord]
+    # xxx used to be [SfaRecord]
+    returns = [Parameter(dict, "registry record")]
     
     def call(self, xrns, creds):
         type = None
diff --git a/sfa/openstack/euca_shell.py b/sfa/openstack/euca_shell.py
new file mode 100644 (file)
index 0000000..80f9f52
--- /dev/null
@@ -0,0 +1,59 @@
+try:
+    import boto
+    from boto.ec2.regioninfo import RegionInfo
+    from boto.exception import EC2ResponseError
+    has_boto=True
+except:
+    has_boto=False    
+
+from sfa.util.sfalogging import logger
+from sfa.openstack.nova_shell import NovaShell
+from sfa.util.config import Config
+
+class EucaShell:
+    """
+    A xmlrpc connection to the euca api. 
+    """    
+
+    def __init__(self, config):
+        self.config = Config
+
+    def get_euca_connection(self):
+        if not has_boto:
+            logger.info('Unable to access EC2 API - boto library not found.')
+            return None
+        nova = NovaShell(self.config)
+        admin_user = nova.auth_manager.get_user(self.config.SFA_NOVA_USER)
+        access_key = admin_user.access
+        secret_key = admin_user.secret
+        url = self.config.SFA_NOVA_API_URL
+        path = "/"
+        euca_port = self.config.SFA_NOVA_API_PORT        
+        use_ssl = False
+
+        # Split the url into parts 
+        if url.find('https://') >= 0:
+            use_ssl  = True
+            url = url.replace('https://', '')
+        elif url.find('http://') >= 0:
+            use_ssl  = False
+            url = url.replace('http://', '')
+        (host, parts) = url.split(':')
+        if len(parts) > 1:
+            parts = parts.split('/')
+            port = int(parts[0])
+            parts = parts[1:]
+            path = '/'.join(parts)
+        
+        return boto.connect_ec2(aws_access_key_id=access_key,
+                                aws_secret_access_key=secret_key,
+                                is_secure=use_ssl,
+                                region=RegionInfo(None, 'eucalyptus', host),
+                                port=port,
+                                path=path) 
+
+    def __getattr__(self, name):
+        def func(*args, **kwds):
+            conn = self.get_euca_connection()
+            return getattr(conn, name)(*args, **kwds)
+        return func                     
similarity index 62%
rename from sfa/openstack/openstack_driver.py
rename to sfa/openstack/nova_driver.py
index 67c5fed..46e1e0f 100644 (file)
@@ -3,24 +3,25 @@ import datetime
 #
 from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
     RecordNotFound, SfaNotImplemented, SliverDoesNotExist
+
 from sfa.util.sfalogging import logger
 from sfa.util.defaultdict import defaultdict
 from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
 from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf, urn_to_sliver_id
 from sfa.util.cache import Cache
-# one would think the driver should not need to mess with the SFA db, but..
-from sfa.storage.table import SfaTable
 # used to be used in get_ticket
 #from sfa.trust.sfaticket import SfaTicket
+
 from sfa.rspecs.version_manager import VersionManager
 from sfa.rspecs.rspec import RSpec
+
 # the driver interface, mostly provides default behaviours
 from sfa.managers.driver import Driver
-from sfa.openstack.openstack_shell import OpenstackShell
-import sfa.plc.peers as peers
-from sfa.plc.plaggregate import PlAggregate
+from sfa.openstack.nova_shell import NovaShell
+from sfa.openstack.euca_shell import EucaShell
+from sfa.openstack.osaggregate import OSAggregate
 from sfa.plc.plslices import PlSlices
-from sfa.util.plxrn import slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename, hrn_to_pl_login_base
+from sfa.util.osxrn import OSXrn
 
 
 def list_to_dict(recs, key):
@@ -35,19 +36,20 @@ def list_to_dict(recs, key):
 # can be sent as-is; it takes care of authentication
 # from the global config
 # 
-class OpenstackDriver (Driver):
+class NovaDriver (Driver):
 
     # the cache instance is a class member so it survives across incoming requests
     cache = None
 
     def __init__ (self, config):
         Driver.__init__ (self, config)
-        self.shell = OpenstackShell (config)
+        self.shell = NovaShell (config)
+        self.euca_shell = EucaShell(config)
         self.cache=None
         if config.SFA_AGGREGATE_CACHING:
-            if OpenstackDriver.cache is None:
-                OpenstackDriver.cache = Cache()
-            self.cache = OpenstackDriver.cache
+            if NovaDriver.cache is None:
+                NovaDriver.cache = Cache()
+            self.cache = NovaDriver.cache
  
     ########################################
     ########## registry oriented
@@ -100,11 +102,11 @@ class OpenstackDriver (Driver):
         type=sfa_record['type']
         name = Xrn(sfa_record['hrn']).get_leaf()     
         if type == 'user':
-            if self.shell.user_get(name):
-                self.shell.user_delete(name)
+            if self.shell.auth_manager.get_user(name):
+                self.shell.auth_manager.delete_user(name)
         elif type == 'slice':
-            if self.shell.project_get(name):
-                self.shell.project_delete(name)
+            if self.shell.auth_manager.get_project(name):
+                self.shell.auth_manager.delete_project(name)
         return True
 
 
@@ -121,28 +123,29 @@ class OpenstackDriver (Driver):
             name = Xrn(record['hrn']).get_leaf()
             os_record = None
             if record['type'] == 'user':
-                os_record = self.shell.user_get(name)
+                os_record = self.shell.auth_manager.get_user(name)
+                projects = self.shell.db.project_get_by_user(name)
                 record['slices'] = [self.hrn + "." + proj.name for \
-                                    proj in os_record.projects]
-                record['roles'] = [role for role in os_record.roles]
-                keys = self.shell.key_pair_get_all_by_user(name)
+                                    proj in projects]
+                record['roles'] = self.shell.db.user_get_roles(name)
+                keys = self.shell.db.key_pair_get_all_by_user(name)
                 record['keys'] = [key.public_key for key in keys]     
             elif record['type'] == 'slice': 
-                os_record = self.shell.project_get(name)
+                os_record = self.shell.auth_manager.get_project(name)
                 record['description'] = os_record.description
-                record['PI'] = self.hrn + "." + os_record.project_manager
+                record['PI'] = [self.hrn + "." + os_record.project_manager.name]
                 record['geni_creator'] = record['PI'] 
-                record['researcher'] = [self.hrn + "." + user.name for \
-                                         user in os_record.members]
+                record['researcher'] = [self.hrn + "." + user for \
+                                         user in os_record.member_ids]
             else:
                 continue
             record['geni_urn'] = hrn_to_urn(record['hrn'], record['type'])
             record['geni_certificate'] = record['gid'] 
             record['name'] = os_record.name
-            if os_record.created_at is not None:    
-                record['date_created'] = datetime_to_string(utcparse(os_record.created_at))
-            if os_record.updated_at is not None:
-                record['last_updated'] = datetime_to_string(utcparse(os_record.updated_at))
+            #if os_record.created_at is not None:    
+            #    record['date_created'] = datetime_to_string(utcparse(os_record.created_at))
+            #if os_record.updated_at is not None:
+            #    record['last_updated'] = datetime_to_string(utcparse(os_record.updated_at))
  
         return records
 
@@ -194,17 +197,16 @@ class OpenstackDriver (Driver):
         if self.cache:
             slices = self.cache.get('slices')
             if slices:
-                logger.debug("PlDriver.list_slices returns from cache")
+                logger.debug("OpenStackDriver.list_slices returns from cache")
                 return slices
     
-        # get data from db 
-        slices = self.shell.GetSlices({'peer_id': None}, ['name'])
-        slice_hrns = [slicename_to_hrn(self.hrn, slice['name']) for slice in slices]
-        slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
+        # get data from db
+        projs = self.shell.auth_manager.get_projects()
+        slice_urns = [OSXrn(proj.name, 'slice').urn for proj in projs] 
     
         # cache the result
         if self.cache:
-            logger.debug ("PlDriver.list_slices stores value in cache")
+            logger.debug ("OpenStackDriver.list_slices stores value in cache")
             self.cache.add('slices', slice_urns) 
     
         return slice_urns
@@ -226,67 +228,55 @@ class OpenstackDriver (Driver):
         if cached_requested and self.cache and not slice_hrn:
             rspec = self.cache.get(version_string)
             if rspec:
-                logger.debug("PlDriver.ListResources: returning cached advertisement")
+                logger.debug("OpenStackDriver.ListResources: returning cached advertisement")
                 return rspec 
     
         #panos: passing user-defined options
         #print "manager options = ",options
-        aggregate = PlAggregate(self)
+        aggregate = OSAggregate(self)
         rspec =  aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version, 
                                      options=options)
     
         # cache the result
         if self.cache and not slice_hrn:
-            logger.debug("PlDriver.ListResources: stores advertisement in cache")
+            logger.debug("OpenStackDriver.ListResources: stores advertisement in cache")
             self.cache.add(version_string, rspec)
     
         return rspec
     
     def sliver_status (self, slice_urn, slice_hrn):
         # find out where this slice is currently running
-        slicename = hrn_to_pl_slicename(slice_hrn)
-        
-        slices = self.shell.GetSlices([slicename], ['slice_id', 'node_ids','person_ids','name','expires'])
-        if len(slices) == 0:        
-            raise SliverDoesNotExist("%s (used %s as slicename internally)" % (slice_hrn, slicename))
-        slice = slices[0]
-        
-        # report about the local nodes only
-        nodes = self.shell.GetNodes({'node_id':slice['node_ids'],'peer_id':None},
-                              ['node_id', 'hostname', 'site_id', 'boot_state', 'last_contact'])
-
-        if len(nodes) == 0:
+        project_name = Xrn(slice_urn).get_leaf()
+        project = self.shell.auth_manager.get_project(project_name)
+        instances = self.shell.db.instance_get_all_by_project(project_name)
+        if len(instances) == 0:
             raise SliverDoesNotExist("You have not allocated any slivers here") 
-
-        site_ids = [node['site_id'] for node in nodes]
-    
+        
         result = {}
         top_level_status = 'unknown'
-        if nodes:
+        if instances:
             top_level_status = 'ready'
         result['geni_urn'] = slice_urn
-        result['pl_login'] = slice['name']
-        result['pl_expires'] = datetime_to_string(utcparse(slice['expires']))
+        result['plos_login'] = 'root' 
+        result['plos_expires'] = None
         
         resources = []
-        for node in nodes:
+        for instance in instances:
             res = {}
-            res['pl_hostname'] = node['hostname']
-            res['pl_boot_state'] = node['boot_state']
-            res['pl_last_contact'] = node['last_contact']
-            if node['last_contact'] is not None:
-                
-                res['pl_last_contact'] = datetime_to_string(utcparse(node['last_contact']))
-            sliver_id = urn_to_sliver_id(slice_urn, slice['slice_id'], node['node_id']) 
+            # instances are accessed by ip, not hostname. We need to report the ip
+            # somewhere so users know where to ssh to.     
+            res['plos_hostname'] = instance.hostname
+            res['plos_created_at'] = datetime_to_string(utcparse(instance.created_at))    
+            res['plos_boot_state'] = instance.vm_state
+            res['plos_sliver_type'] = instance.instance_type.name 
+            sliver_id =  Xrn(slice_urn).get_sliver_id(instance.project_id, \
+                                                      instance.hostname, instance.id)
             res['geni_urn'] = sliver_id
-            if node['boot_state'] == 'boot':
-                res['geni_status'] = 'ready'
+
+            if instance.vm_state == 'running':
+                res['boot_state'] = 'ready';
             else:
-                res['geni_status'] = 'failed'
-                top_level_status = 'failed' 
-                
-            res['geni_error'] = ''
-    
+                res['boot_state'] = 'unknown'  
             resources.append(res)
             
         result['geni_status'] = top_level_status
@@ -295,101 +285,50 @@ class OpenstackDriver (Driver):
 
     def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
 
-        aggregate = PlAggregate(self)
-        slices = PlSlices(self)
-        peer = slices.get_peer(slice_hrn)
-        sfa_peer = slices.get_sfa_peer(slice_hrn)
-        slice_record=None    
-        if users:
-            slice_record = users[0].get('slice_record', {})
-    
+        aggregate = OSAggregate(self)
+        slicename = get_leaf(slice_hrn)
+        
         # parse rspec
         rspec = RSpec(rspec_string)
         requested_attributes = rspec.version.get_slice_attributes()
+        pubkeys = []
+        for user in users:
+            pubkeys.extend(user['keys']) 
+        # assume that there is a key whos nane matches the caller's username.
+        project_key = Xrn(users[0]['urn']).get_leaf()    
         
-        # ensure site record exists
-        site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer, options=options)
+         
         # ensure slice record exists
-        slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer, options=options)
+        aggregate.create_project(slicename, users, options=options)
         # ensure person records exists
-        persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer, options=options)
-        # ensure slice attributes exists
-        slices.verify_slice_attributes(slice, requested_attributes, options=options)
-        
+        aggregate.create_project_users(slicename, users, options=options)
         # add/remove slice from nodes
-        requested_slivers = [node.get('component_name') for node in rspec.version.get_nodes_with_slivers()]
-        nodes = slices.verify_slice_nodes(slice, requested_slivers, peer) 
+        aggregate.run_instances(slicename, rspec, project_key, pubkeys)    
    
-        # add/remove links links 
-        slices.verify_slice_links(slice, rspec.version.get_link_requests(), nodes)
-    
-        # handle MyPLC peer association.
-        # only used by plc and ple.
-        slices.handle_peer(site, slice, persons, peer)
-        
         return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
 
     def delete_sliver (self, slice_urn, slice_hrn, creds, options):
-        slicename = hrn_to_pl_slicename(slice_hrn)
-        slices = self.shell.GetSlices({'name': slicename})
-        if not slices:
+        name = OSXrn(xrn=slice_urn).name
+        slice = self.shell.project_get(name)
+        if not slice:
             return 1
-        slice = slices[0]
-    
-        # determine if this is a peer slice
-        # xxx I wonder if this would not need to use PlSlices.get_peer instead 
-        # in which case plc.peers could be deprecated as this here
-        # is the only/last call to this last method in plc.peers
-        peer = peers.get_peer(self, slice_hrn)
-        try:
-            if peer:
-                self.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
-            self.shell.DeleteSliceFromNodes(slicename, slice['node_ids'])
-        finally:
-            if peer:
-                self.shell.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
+        instances = self.shell.db.instance_get_all_by_project(name)
+        for instance in instances:
+            self.shell.db.instance_destroy(instance.instance_id)
         return 1
     
     def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
-        slicename = hrn_to_pl_slicename(slice_hrn)
-        slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
-        if not slices:
-            raise RecordNotFound(slice_hrn)
-        slice = slices[0]
-        requested_time = utcparse(expiration_time)
-        record = {'expires': int(datetime_to_epoch(requested_time))}
-        try:
-            self.shell.UpdateSlice(slice['slice_id'], record)
-            return True
-        except:
-            return False
+        return True
 
-    # remove the 'enabled' tag 
     def start_slice (self, slice_urn, slice_hrn, creds):
-        slicename = hrn_to_pl_slicename(slice_hrn)
-        slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
-        if not slices:
-            raise RecordNotFound(slice_hrn)
-        slice_id = slices[0]['slice_id']
-        slice_tags = self.shell.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'}, ['slice_tag_id'])
-        # just remove the tag if it exists
-        if slice_tags:
-            self.shell.DeleteSliceTag(slice_tags[0]['slice_tag_id'])
         return 1
 
-    # set the 'enabled' tag to 0
     def stop_slice (self, slice_urn, slice_hrn, creds):
-        slicename = hrn_to_pl_slicename(slice_hrn)
-        slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
-        if not slices:
-            raise RecordNotFound(slice_hrn)
-        slice_id = slices[0]['slice_id']
-        slice_tags = self.shell.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'})
-        if not slice_tags:
-            self.shell.AddSliceTag(slice_id, 'enabled', '0')
-        elif slice_tags[0]['value'] != "0":
-            tag_id = slice_tags[0]['slice_tag_id']
-            self.shell.UpdateSliceTag(tag_id, '0')
+        name = OSXrn(xrn=slice_urn).name
+        slice = self.shell.get_project(name)
+        instances = self.shell.db.instance_get_all_by_project(name)
+        for instance in instances:
+            self.shell.db.instance_stop(instance.instance_id)
         return 1
     
     def reset_slice (self, slice_urn, slice_hrn, creds):
@@ -398,7 +337,7 @@ class OpenstackDriver (Driver):
     # xxx this code is quite old and has not run for ages
     # it is obviously totally broken and needs a rewrite
     def get_ticket (self, slice_urn, slice_hrn, creds, rspec_string, options):
-        raise SfaNotImplemented,"PlDriver.get_ticket needs a rewrite"
+        raise SfaNotImplemented,"OpenStackDriver.get_ticket needs a rewrite"
 # please keep this code for future reference
 #        slices = PlSlices(self)
 #        peer = slices.get_peer(slice_hrn)
diff --git a/sfa/openstack/nova_shell.py b/sfa/openstack/nova_shell.py
new file mode 100644 (file)
index 0000000..9179faa
--- /dev/null
@@ -0,0 +1,79 @@
+import sys
+import xmlrpclib
+import socket
+from urlparse import urlparse
+from sfa.util.sfalogging import logger
+try:
+    from nova import db
+    from nova import flags
+    from nova import context
+    from nova.auth.manager import AuthManager
+    from nova.compute.manager import ComputeManager
+    from nova.network.manager import NetworkManager
+    from nova.scheduler.manager import SchedulerManager
+    from nova.image.glance import GlanceImageService
+    has_nova = True
+except:
+    has_nova = False
+
+
+class InjectContext:
+    """
+    Wraps the module and injects the context when executing methods 
+    """     
+    def __init__(self, proxy, context):
+        self.proxy = proxy
+        self.context = context
+    
+    def __getattr__(self, name):
+        def func(*args, **kwds):
+            result=getattr(self.proxy, name)(self.context, *args, **kwds)
+            return result
+        return func
+
+class NovaShell:
+    """
+    A simple native shell to a nova backend. 
+    This class can receive all nova calls to the underlying testbed
+    """
+    
+    # dont care about limiting calls yet 
+    direct_calls = []
+    alias_calls = {}
+
+
+    # use the 'capability' auth mechanism for higher performance when the PLC db is local    
+    def __init__ ( self, config ) :
+        url = config.SFA_PLC_URL
+        # try to figure if the url is local
+        is_local=False    
+        hostname=urlparse(url).hostname
+        if hostname == 'localhost': is_local=True
+        # otherwise compare IP addresses; 
+        # this might fail for any number of reasons, so let's harden that
+        try:
+            # xxx todo this seems to result in a DNS request for each incoming request to the AM
+            # should be cached or improved
+            url_ip=socket.gethostbyname(hostname)
+            local_ip=socket.gethostbyname(socket.gethostname())
+            if url_ip==local_ip: is_local=True
+        except:
+            pass
+
+
+        if is_local and has_nova:
+            logger.debug('nova access - native')
+            # load the config
+            flags.FLAGS(['foo', '--flagfile=/etc/nova/nova.conf', 'foo', 'foo'])
+            # instantiate managers 
+            self.auth_manager = AuthManager()
+            self.compute_manager = ComputeManager()
+            self.network_manager = NetworkManager()
+            self.scheduler_manager = SchedulerManager()
+            self.db = InjectContext(db, context.get_admin_context())
+            self.image_manager = InjectContext(GlanceImageService(), context.get_admin_context())
+        else:
+            self.auth = None
+            self.proxy = None
+            logger.debug('nova access - REST')
+            raise SfaNotImplemented('nova access - Rest')
diff --git a/sfa/openstack/openstack_shell.py b/sfa/openstack/openstack_shell.py
deleted file mode 100644 (file)
index d2c19fb..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-import sys
-import xmlrpclib
-import socket
-from urlparse import urlparse
-
-from sfa.util.sfalogging import logger
-
-class OpenstackShell:
-    """
-    A simple xmlrpc shell to a myplc instance
-    This class can receive all Openstack calls to the underlying testbed
-    """
-    
-    # dont care about limiting calls yet 
-    direct_calls = []
-    alias_calls = {}
-
-
-    # use the 'capability' auth mechanism for higher performance when the PLC db is local    
-    def __init__ ( self, config ) :
-        url = config.SFA_PLC_URL
-        # try to figure if the url is local
-        hostname=urlparse(url).hostname
-        is_local=False
-        if hostname == 'localhost': is_local=True
-        # otherwise compare IP addresses; 
-        # this might fail for any number of reasons, so let's harden that
-        try:
-            # xxx todo this seems to result in a DNS request for each incoming request to the AM
-            # should be cached or improved
-            url_ip=socket.gethostbyname(hostname)
-            local_ip=socket.gethostbyname(socket.gethostname())
-            if url_ip==local_ip: is_local=True
-        except:
-            pass
-
-
-        # Openstack provides a RESTful api but it is very limited, so we will
-        # ignore it for now and always use the native openstack (nova) library.
-        # This of course will not work if sfa is not installed on the same machine
-        # as the openstack-compute package.   
-        if is_local:
-            try:
-                from nova.auth.manager import AuthManager, db, context
-                direct_access=True
-            except:
-                direct_access=False
-        if is_local and direct_access:
-            
-            logger.debug('openstack access - native')
-            self.auth = context.get_admin_context()
-            # AuthManager isnt' really useful for much yet but it's
-            # more convenient to use than the db reference which requires
-            # a context. Lets hold onto the AuthManager reference for now.
-            #self.proxy = AuthManager()
-            self.auth_manager = AuthManager()
-            self.proxy = db
-
-        else:
-            self.auth = None
-            self.proxy = None
-            logger.debug('openstack access - REST')
-            raise SfaNotImplemented('openstack access - Rest')
-
-    def __getattr__(self, name):
-        def func(*args, **kwds):
-            result=getattr(self.proxy, name)(self.auth, *args, **kwds)
-            return result
-        return func
diff --git a/sfa/openstack/osaggregate.py b/sfa/openstack/osaggregate.py
new file mode 100644 (file)
index 0000000..cf5a3b4
--- /dev/null
@@ -0,0 +1,273 @@
+from nova.exception import ImageNotFound
+from nova.api.ec2.cloud import CloudController
+from sfa.util.faults import SfaAPIError
+from sfa.rspecs.rspec import RSpec
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.login import Login
+from sfa.rspecs.elements.disk_image import DiskImage
+from sfa.rspecs.elements.services import Services
+from sfa.util.xrn import Xrn
+from sfa.util.osxrn import OSXrn
+from sfa.rspecs.version_manager import VersionManager
+
+
+def disk_image_to_rspec_object(image):
+    img = DiskImage()
+    img['name'] = image['ami']['name']
+    img['description'] = image['ami']['name']
+    img['os'] = image['ami']['name']
+    img['version'] = image['ami']['name']
+    return img
+    
+
+def instance_to_sliver(instance, slice_xrn=None):
+    # should include?
+    # * instance.image_ref
+    # * instance.kernel_id
+    # * instance.ramdisk_id
+    import nova.db.sqlalchemy.models
+    name=None
+    type=None
+    sliver_id = None
+    if isinstance(instance, dict):
+        # this is an isntance type dict
+        name = instance['name']
+        type = instance['name']
+    elif isinstance(instance, nova.db.sqlalchemy.models.Instance):
+        # this is an object that describes a running instance
+        name = instance.display_name
+        type = instance.instance_type.name
+    else:
+        raise SfaAPIError("instnace must be an instance_type dict or" + \
+                           " a nova.db.sqlalchemy.models.Instance object")
+    if slice_xrn:
+        xrn = Xrn(slice_xrn, 'slice')
+        sliver_id = xrn.get_sliver_id(instance.project_id, instance.hostname, instance.id)
+
+    sliver = Sliver({'slice_id': sliver_id,
+                     'name': name,
+                     'type': 'plos-' + type,
+                     'tags': []})
+    return sliver
+            
+
+class OSAggregate:
+
+    def __init__(self, driver):
+        self.driver = driver
+
+    def get_machine_image_details(self, image):
+        """
+        Returns a dict that contains the ami, aki and ari details for the specified
+        ami image. 
+        """
+        disk_image = {}
+        if image['container_format'] == 'ami':
+            kernel_id = image['properties']['kernel_id']
+            ramdisk_id = image['properties']['ramdisk_id']
+            disk_image['ami'] = image
+            disk_image['aki'] = self.driver.shell.image_manager.show(kernel_id)
+            disk_image['ari'] = self.driver.shell.image_manager.show(ramdisk_id)
+        return disk_image
+        
+    def get_disk_image(self, id=None, name=None):
+        """
+        Look up a image bundle using the specifeid id or name  
+        """
+        disk_image = None    
+        try:
+            if id:
+                image = self.driver.shell.image_manager.show(image_id)
+            elif name:
+                image = self.driver.shell.image_manager.show_by_name(image_name)
+            if image['container_format'] == 'ami':
+                disk_image = self.get_machine_image_details(image)
+        except ImageNotFound:
+                pass
+        return disk_image
+
+    def get_available_disk_images(self):
+        # get image records
+        disk_images = []
+        for image in self.driver.shell.image_manager.detail():
+            if image['container_format'] == 'ami':
+                disk_images.append(self.get_machine_image_details(image))
+        return disk_images 
+
+    def get_rspec(self, slice_xrn=None, version=None, options={}):
+        version_manager = VersionManager()
+        version = version_manager.get_version(version)
+        if not slice_xrn:
+            rspec_version = version_manager._get_version(version.type, version.version, 'ad')
+            nodes = self.get_aggregate_nodes()
+        else:
+            rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
+            nodes = self.get_slice_nodes(slice_xrn)
+        rspec = RSpec(version=rspec_version, user_options=options)
+        rspec.version.add_nodes(nodes)
+        return rspec.toxml()
+
+    def get_slice_nodes(self, slice_xrn):
+        name = OSXrn(xrn = slice_xrn).name
+        instances = self.driver.shell.db.instance_get_all_by_project(name)
+        rspec_nodes = []
+        for instance in instances:
+            rspec_node = Node()
+            xrn = OSXrn(instance.hostname, 'node')
+            rspec_node['component_id'] = xrn.urn
+            rspec_node['component_name'] = xrn.name
+            rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()   
+            sliver = instance_to_sliver(instance)
+            disk_image = self.get_disk_image(instance.image_ref)
+            sliver['disk_images'] = [disk_image_to_rspec_object(disk_image)]
+            rspec_node['slivers'] = [sliver]
+            rspec_nodes.append(rspec_node)
+        return rspec_nodes
+
+    def get_aggregate_nodes(self):
+                
+        zones = self.driver.shell.db.zone_get_all()
+        if not zones:
+            zones = ['cloud']
+        else:
+            zones = [zone.name for zone in zones]
+
+        # available sliver/instance/vm types
+        instances = self.driver.shell.db.instance_type_get_all().values()
+        # available images
+        disk_images = self.get_available_disk_images()
+        disk_image_objects = [disk_image_to_rspec_object(image) \
+                               for image in disk_images]  
+        rspec_nodes = []
+        for zone in zones:
+            rspec_node = Node()
+            xrn = OSXrn(zone, 'node')
+            rspec_node['component_id'] = xrn.urn
+            rspec_node['component_name'] = xrn.name
+            rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
+            rspec_node['exclusive'] = 'false'
+            rspec_node['hardware_types'] = [HardwareType({'name': 'plos-pc'}),
+                                                HardwareType({'name': 'pc'})]
+            slivers = []
+            for instance in instances:
+                sliver = instance_to_sliver(instance)
+                sliver['disk_images'] = disk_image_objects
+                slivers.append(sliver)
+        
+            rspec_node['slivers'] = slivers
+            rspec_nodes.append(rspec_node) 
+
+        return rspec_nodes 
+
+
+    def create_project(self, slicename, users, options={}):
+        """
+        Create the slice if it doesn't alredy exist  
+        """
+        import nova.exception.ProjectNotFound
+        try:
+            slice = self.driver.shell.auth_manager.get_project(slicename)
+        except nova.exception.ProjectNotFound:
+            # convert urns to user names
+            usernames = [Xrn(user['urn']).get_leaf() for user in users]
+            # assume that the first user is the project manager
+            proj_manager = usernames[0] 
+            self.driver.shell.auth_manager.create_project(slicename, proj_manager)
+
+    def create_project_users(self, slicename, users, options={}):
+        """
+        Add requested users to the specified slice.  
+        """
+        
+        # There doesn't seem to be an effcient way to 
+        # look up all the users of a project, so lets not  
+        # attempt to remove stale users . For now lets just
+        # ensure that the specified users exist     
+        for user in users:
+            username = Xrn(user['urn']).get_leaf()
+            try:
+                self.driver.shell.auth_manager.get_user(username)
+            except nova.exception.UserNotFound:
+                self.driver.shell.auth_manager.create_user(username)
+            self.verify_user_keys(username, user['keys'], options)
+        
+
+    def verify_user_keys(self, username, keys, options={}):
+        """
+        Add requested keys.
+        """
+        append = options.get('append', True)    
+        existing_keys = self.driver.shell.db.key_pair_get_all_by_user(username)
+        existing_pub_keys = [key.public_key for key in existing_keys]
+        removed_pub_keys = set(existing_pub_keys).difference(keys)
+        added_pub_keys = set(keys).difference(existing_pub_keys)
+        pubkeys = []
+        # add new keys
+        for public_key in added_pub_keys:
+            key = {}
+            key['user_id'] = username
+            key['name'] =  username
+            key['public'] = public_key
+            self.driver.shell.db.key_pair_create(key)
+
+        # remove old keys
+        if not append:
+            for key in existing_keys:
+                if key.public_key in removed_pub_keys:
+                    self.driver.shell.db.key_pair_destroy(username, key.name)
+    
+    def reserve_instance(self, image_id, kernel_id, ramdisk_id, \
+                         instance_type, key_name, user_data):
+        conn  = self.driver.euca_shell
+        logger.info('Reserving an instance: image: %s, kernel: ' \
+                    '%s, ramdisk: %s, type: %s, key: %s' % \
+                    (image_id, kernel_id, ramdisk_id,
+                    instance_type, key_name))
+        try:
+            reservation = conn.run_instances(image_id=image_id,
+                                             kernel_id=kernel_id,
+                                             ramdisk_id=ramdisk_id,
+                                             instance_type=instance_type,
+                                             key_name=key_name,
+                                             user_data = user_data)
+        except EC2ResponseError, ec2RespError:
+            logger.log_exc(ec2RespError)
+               
+    def run_instances(self, slicename, rspec, keyname, pubkeys):
+        """
+        Create the instances thats requested in the rspec 
+        """
+        # the default image to use for instnaces that dont
+        # explicitly request an image.
+        # Just choose the first available image for now.
+        available_images = self.get_available_disk_images()
+        default_image = self.get_disk_images()[0]    
+        default_ami_id = CloudController.image_ec2_id(default_image['ami']['id'])  
+        default_aki_id = CloudController.image_ec2_id(default_image['aki']['id'])  
+        default_ari_id = CloudController.image_ec2_id(default_image['ari']['id'])
+
+        # get requested slivers
+        rspec = RSpec(rspec)
+        requested_instances = defaultdict(list)
+        # iterate over clouds/zones/nodes
+        for node in rspec.version.get_nodes_with_slivers():
+            instance_types = node.get('slivers', [])
+            if isinstance(instance_types, list):
+                # iterate over sliver/instance types
+                for instance_type in instance_types:
+                    ami_id = default_ami_id
+                    aki_id = default_aki_id
+                    ari_id = default_ari_id
+                    req_image = instance_type.get('disk_images')
+                    if req_image and isinstance(req_image, list):
+                        req_image_name = req_image[0]['name']
+                        disk_image = self.get_disk_image(name=req_image_name)
+                        if disk_image:
+                            ami_id = CloudController.image_ec2_id(disk_image['ami']['id'])
+                            aki_id = CloudController.image_ec2_id(disk_image['aki']['id'])
+                            ari_id = CloudController.image_ec2_id(disk_image['ari']['id'])
+                    # start the instance
+                    self.reserve_instance(ami_id, aki_id, ari_id, \
+                                          instance_type['name'], keyname, pubkeys) 
index 1cace7a..55e6291 100644 (file)
@@ -136,6 +136,11 @@ class PlAggregate:
         return (slice, slivers)
 
     def get_nodes_and_links(self, slice=None,slivers=[], options={}):
+        # if we are dealing with a slice that has no node just return 
+        # and empty list    
+        if slice is not None and not slice['node_ids']:
+            return ([],[])
+
         filter = {}
         tags_filter = {}
         if slice and 'node_ids' in slice and slice['node_ids']:
index 3f01e7f..b9db9b2 100644 (file)
@@ -11,7 +11,8 @@ from sfa.util.xrn import hrn_to_urn, get_leaf, urn_to_sliver_id
 from sfa.util.cache import Cache
 
 # one would think the driver should not need to mess with the SFA db, but..
-from sfa.storage.table import SfaTable
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord
 
 # used to be used in get_ticket
 #from sfa.trust.sfaticket import SfaTicket
@@ -109,7 +110,15 @@ class PlDriver (Driver):
                 self.shell.AddPersonToSite(pointer, login_base)
     
             # What roles should this user have?
-            self.shell.AddRoleToPerson('user', pointer)
+            roles=[]
+            if 'roles' in sfa_record: 
+                # if specified in xml, but only low-level roles
+                roles = [ role for role in sfa_record['roles'] if role in ['user','tech'] ]
+            # at least user if no other cluse could be found
+            if not roles:
+                roles=['user']
+            for role in roles:
+                self.shell.AddRoleToPerson(role, pointer)
             # Add the user's key
             if pub_key:
                 self.shell.AddPersonKey(pointer, {'key_type' : 'ssh', 'key' : pub_key})
@@ -206,7 +215,7 @@ class PlDriver (Driver):
 
 
     ##
-    # Convert SFA fields to PLC fields for use when registering up updating
+    # Convert SFA fields to PLC fields for use when registering or updating
     # registry record in the PLC database
     #
 
@@ -224,10 +233,10 @@ class PlDriver (Driver):
                pl_record["url"] = sfa_record["url"]
            if "description" in sfa_record:
                pl_record["description"] = sfa_record["description"]
-        if "expires" in sfa_record:
-            date = utcparse(sfa_record['expires'])
-            expires = datetime_to_epoch(date)
-            pl_record["expires"] = expires
+            if "expires" in sfa_record:
+                date = utcparse(sfa_record['expires'])
+                expires = datetime_to_epoch(date)
+                pl_record["expires"] = expires
 
         elif type == "node":
             if not "hostname" in pl_record:
@@ -446,16 +455,15 @@ class PlDriver (Driver):
         # we'll replace pl ids (person_ids) with hrns from the sfa records
         # we obtain
         
-        # get the sfa records
-        table = SfaTable()
+        # get the registry records
         person_list, persons = [], {}
-        person_list = table.find({'type': 'user', 'pointer': person_ids})
+        person_list = dbsession.query (RegRecord).filter(RegRecord.pointer.in_(person_ids))
         # create a hrns keyed on the sfa record's pointer.
         # Its possible for multiple records to have the same pointer so
         # the dict's value will be a list of hrns.
         persons = defaultdict(list)
         for person in person_list:
-            persons[person['pointer']].append(person)
+            persons[person.pointer].append(person)
 
         # get the pl records
         pl_person_list, pl_persons = [], {}
@@ -469,13 +477,14 @@ class PlDriver (Driver):
             #    continue 
             sfa_info = {}
             type = record['type']
+            logger.info("fill_record_sfa_info - incoming record typed %s"%type)
             if (type == "slice"):
                 # all slice users are researchers
                 record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice')
                 record['PI'] = []
                 record['researcher'] = []
                 for person_id in record.get('person_ids', []):
-                    hrns = [person['hrn'] for person in persons[person_id]]
+                    hrns = [person.hrn for person in persons[person_id]]
                     record['researcher'].extend(hrns)                
 
                 # pis at the slice's site
@@ -483,12 +492,13 @@ class PlDriver (Driver):
                     pl_pis = site_pis[record['site_id']]
                     pi_ids = [pi['person_id'] for pi in pl_pis]
                     for person_id in pi_ids:
-                        hrns = [person['hrn'] for person in persons[person_id]]
+                        hrns = [person.hrn for person in persons[person_id]]
                         record['PI'].extend(hrns)
                         record['geni_creator'] = record['PI'] 
                 
             elif (type.startswith("authority")):
                 record['url'] = None
+                logger.info("fill_record_sfa_info - authority xherex")
                 if record['pointer'] != -1:
                     record['PI'] = []
                     record['operator'] = []
@@ -497,7 +507,7 @@ class PlDriver (Driver):
                         if pointer not in persons or pointer not in pl_persons:
                             # this means there is not sfa or pl record for this user
                             continue   
-                        hrns = [person['hrn'] for person in persons[pointer]] 
+                        hrns = [person.hrn for person in persons[pointer]] 
                         roles = pl_persons[pointer]['roles']   
                         if 'pi' in roles:
                             record['PI'].extend(hrns)
@@ -511,6 +521,7 @@ class PlDriver (Driver):
                 # xxx TODO: URI, LatLong, IP, DNS
     
             elif (type == "user"):
+                logger.info('setting user.email')
                 sfa_info['email'] = record.get("email", "")
                 sfa_info['geni_urn'] = hrn_to_urn(record['hrn'], 'user')
                 sfa_info['geni_certificate'] = record['gid'] 
index 3a810a5..1f530f6 100644 (file)
@@ -1,4 +1,9 @@
 from sfa.rspecs.elements.element import Element
 
 class DiskImage(Element):
-    fields = {}        
+    fields = [
+        'name',
+        'os',
+        'version',
+        'description',
+    ]        
index b288cbb..7f79e81 100644 (file)
@@ -18,4 +18,4 @@ class Element(dict):
         elif hasattr(self.element, name):
             return getattr(self.element, name)
         else:
-            raise AttributeError, "class Element has not attribute %s" % name
+            raise AttributeError, "class Element has no attribute %s" % name
index 8dd6542..55a3feb 100644 (file)
@@ -8,4 +8,5 @@ class Sliver(Element):
         'name',
         'type',
         'tags',
+        'disk_images',
     ]
diff --git a/sfa/rspecs/elements/versions/pgv2DiskImage.py b/sfa/rspecs/elements/versions/pgv2DiskImage.py
new file mode 100644 (file)
index 0000000..51363de
--- /dev/null
@@ -0,0 +1,24 @@
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.disk_image import DiskImage
+
+class PGv2DiskImage:
+
+    @staticmethod
+    def add_images(xml, images):
+        if not images:
+            return 
+        if not isinstance(images, list):
+            images = [images]
+        for image in images: 
+            xml.add_instance('disk_image', image, DiskImage.fields)
+    
+    @staticmethod
+    def get_images(xml, filter={}):
+        xpath = './default:disk_image | ./disk_image'
+        image_elems = xml.xpath(xpath)
+        images = []
+        for image_elem in image_elems:
+            image = DiskImage(image_elem.attrib, image_elem)
+            images.append(image)
+        return images
+
index ffa4b41..55091f5 100644 (file)
@@ -1,5 +1,6 @@
 from sfa.rspecs.elements.element import Element
 from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.versions.pgv2DiskImage import PGv2DiskImage
 
 class PGv2SliverType:
 
@@ -14,7 +15,10 @@ class PGv2SliverType:
             if sliver.get('type'):
                 sliver_elem.set('name', sliver['type'])
             if sliver.get('client_id'):
-                sliver_elem.set('client_id', sliver['client_id'])  
+                sliver_elem.set('client_id', sliver['client_id'])
+            images = sliver.get('disk_images')
+            if images and isinstance(images, list):
+                PGv2DiskImage.add_images(sliver_elem, images)      
             PGv2SliverType.add_sliver_attributes(sliver_elem, sliver.get('tags', []))
     
     @staticmethod
@@ -39,6 +43,7 @@ class PGv2SliverType:
                 sliver['component_id'] = xml.attrib['component_id']
             if 'name' in sliver_elem.attrib:
                 sliver['type'] = sliver_elem.attrib['name']
+            sliver['images'] = PGv2DiskImage.get_images(sliver_elem)
             slivers.append(sliver)
         return slivers
 
index fdf1eb2..f50e687 100644 (file)
@@ -55,8 +55,9 @@ class SFAv1Node:
             if location:
                 node_elem.add_instance('location', location, Location.fields)
 
-            for interface in node.get('interfaces', []):
-                node_elem.add_instance('interface', interface, ['component_id', 'client_id', 'ipv4']) 
+            if isinstance(node.get('interfaces'), list):
+                for interface in node.get('interfaces', []):
+                    node_elem.add_instance('interface', interface, ['component_id', 'client_id', 'ipv4']) 
             
             #if 'bw_unallocated' in node and node['bw_unallocated']:
             #    bw_unallocated = etree.SubElement(node_elem, 'bw_unallocated', units='kbps').text = str(int(node['bw_unallocated'])/1000)
diff --git a/sfa/rspecs/versions/federica.py b/sfa/rspecs/versions/federica.py
new file mode 100644 (file)
index 0000000..798fc7d
--- /dev/null
@@ -0,0 +1,17 @@
+from sfa.rspecs.versions.pgv2 import PGv2Ad, PGv2Request, PGv2Manifest
+
+class FedericaAd (PGv2Ad):
+    enabled = True
+    schema = 'http://sorch.netmode.ntua.gr/ws/RSpec/ad.xsd'
+    namespace = 'http://sorch.netmode.ntua.gr/ws/RSpec'
+
+class FedericaRequest (PGv2Request):
+    enabled = True
+    schema = 'http://sorch.netmode.ntua.gr/ws/RSpec/request.xsd'
+    namespace = 'http://sorch.netmode.ntua.gr/ws/RSpec'
+
+class FedericaManifest (PGv2Manifest):
+    enabled = True
+    schema = 'http://sorch.netmode.ntua.gr/ws/RSpec/manifest.xsd'
+    namespace = 'http://sorch.netmode.ntua.gr/ws/RSpec'
+
index 39bbac5..ab022df 100644 (file)
@@ -101,7 +101,7 @@ class SFAv1(BaseVersion):
             sliver_attributes = self.get_sliver_attributes(nodename, network)
             for sliver_attribute in sliver_attributes:
                 sliver_attribute['node_id'] = nodename
-                attributes.append(attribute)
+                attributes.append(sliver_attribute)
         return attributes
 
 
index bdad7df..13a75fc 100644 (file)
@@ -20,6 +20,11 @@ class Registry(SfaServer):
     
     def __init__(self, ip, port, key_file, cert_file):
         SfaServer.__init__(self, ip, port, key_file, cert_file,'registry')
+        sfa_config=Config()
+        if Config().SFA_REGISTRY_ENABLED: 
+            from sfa.storage.alchemy import engine
+            from sfa.storage.dbschema import DBSchema
+            DBSchema().init_or_upgrade()
 
 #
 # Registries is a dictionary of registry connections keyed on the registry hrn
index b87e119..9ab75e7 100755 (executable)
@@ -27,7 +27,8 @@ from sfa.util.config import Config
 from sfa.trust.gid import GID, create_uuid
 from sfa.trust.hierarchy import Hierarchy
 
-from sfa.storage.table import SfaTable
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord
 
 def main():
     args = sys.argv
@@ -116,12 +117,10 @@ def export_gid(options):
     hrn = options.export
     type = options.type
     # check sfa table first
-    filter = {'hrn': hrn}
-    if type:
-        filter['type'] = type                    
-    table = SfaTable()
-    records = table.find(filter)
-    if not records:
+    request=dbsession.query(RegRecord).filter_by(hrn=hrn)
+    if type: request = request.filter_by(type=type)
+    record=request.first()
+    if not record:
         # check the authorities hierarchy 
         hierarchy = Hierarchy()
         try:
@@ -131,8 +130,7 @@ def export_gid(options):
             print "Record: %s not found" % hrn
             sys.exit(1)
     else:
-        record = records[0]
-        gid = GID(string=record['gid'])
+        gid = GID(string=record.gid)
         
     # get the outfile
     outfile = options.outfile
@@ -163,16 +161,14 @@ def import_gid(options):
         sys.exit(1)
 
     # check if record exists in db
-    table = SfaTable()
-    records = table.find({'hrn': gid.get_hrn(), 'type': 'authority'})
-    if not records:
+    record = dbsession.query(RegRecord).filter_by(type='authority',hrn=gid.get_hrn()).first()
+    if not record:
         print "%s not found in record database" % gid.get_hrn()  
         sys.exit(1)
 
     # update the database record
-    record = records[0]
-    record['gid'] = gid.save_to_string(save_parents=True)
-    table.update(record)
+    record.gid = gid.save_to_string(save_parents=True)
+    dbsession.commit()
     if options.verbose:
         print "Imported %s gid into db" % record['hrn']
 
diff --git a/sfa/server/sfa-clean-peer-records.py b/sfa/server/sfa-clean-peer-records.py
deleted file mode 100644 (file)
index bcb917b..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/usr/bin/python
-
-import sys
-import os
-import traceback
-import socket
-
-from sfa.util.prefixTree import prefixTree
-from sfa.util.config import Config
-
-from sfa.trust.certificate import Keypair
-from sfa.trust.hierarchy import Hierarchy
-from sfa.server.registry import Registries
-
-from sfa.storage.table import SfaTable
-
-from sfa.client.sfaserverproxy import SfaServerProxy 
-
-from sfa.generic import Generic
-
-def main():
-    config = Config()
-    if not config.SFA_REGISTRY_ENABLED:
-        sys.exit(0)
-
-    # Get the path to the sfa server key/cert files from 
-    # the sfa hierarchy object
-    sfa_hierarchy = Hierarchy()
-    sfa_key_path = sfa_hierarchy.basedir
-    key_file = os.path.join(sfa_key_path, "server.key")
-    cert_file = os.path.join(sfa_key_path, "server.cert")
-    key = Keypair(filename=key_file) 
-
-    # get a connection to our local sfa registry
-    # and a valid credential
-    authority = config.SFA_INTERFACE_HRN
-    url = 'http://%s:%s/' %(config.SFA_REGISTRY_HOST, config.SFA_REGISTRY_PORT)
-    registry = SfaServerProxy(url, key_file, cert_file)
-    sfa_api = Generic.the_flavour()
-    credential = sfa_api.getCredential()
-
-    # get peer registries
-    registries = Registries(sfa_api)
-    tree = prefixTree()
-    tree.load(registries.keys())
-    
-    # get local peer records
-    table = SfaTable()
-    peer_records = table.find({'~peer_authority': None})
-    found_records = []
-    hrn_dict = {}
-    for record in peer_records:
-        registry_hrn = tree.best_match(record['hrn'])
-        if registry_hrn not in hrn_dict:
-            hrn_dict[registry_hrn] = []
-        hrn_dict[registry_hrn].append(record['hrn'])
-
-    # attempt to resolve the record at the authoritative interface 
-    for registry_hrn in hrn_dict:
-        if registry_hrn in registries:
-            records = []
-            target_hrns = hrn_dict[registry_hrn]    
-            try:
-                records = registries[registry_hrn].Resolve(target_hrns, credential)
-                found_records.extend([record['hrn'] for record in records])
-            except ServerException:
-                # an exception will be thrown if the record doenst exist
-                # if so remove the record from the local registry
-                continue
-            except:
-                # this deosnt necessarily mean the records dont exist
-                # lets give them the benefit of the doubt here (for now)
-                found_records.extend(target_hrns)
-                traceback.print_exc()
-
-    # remove what wasnt found 
-    for peer_record in peer_records:
-        if peer_record['hrn'] not in found_records:
-            registries[sfa_api.hrn].Remove(peer_record['hrn'], credential, peer_record['type'])
-                
-if __name__ == '__main__':
-    main()
index 73c467d..f1fc047 100755 (executable)
@@ -14,7 +14,7 @@
 # is up to date and accurate.
 #
 # 1) Import the existing planetlab database, creating the
-#    appropriate SFA records. This is done by running the "sfa-import-plc.py" tool.
+#    appropriate SFA records. This is done by running the "sfa-import.py" tool.
 #
 # 2) Create a "trusted_roots" directory and place the certificate of the root
 #    authority in that directory. Given the defaults in sfa-import-plc.py, this
@@ -40,7 +40,6 @@ from sfa.trust.trustedroots import TrustedRoots
 from sfa.trust.certificate import Keypair, Certificate
 from sfa.trust.hierarchy import Hierarchy
 from sfa.trust.gid import GID
-
 from sfa.server.sfaapi import SfaApi
 from sfa.server.registry import Registries
 from sfa.server.aggregate import Aggregates
@@ -104,22 +103,13 @@ def install_peer_certs(server_key_file, server_cert_file):
                 logger.info("get_trusted_certs: skipping non sfa aggregate: %s" % new_hrn)
                 continue
             trusted_gids = ReturnValue.get_value(interface.get_trusted_certs())
-            #trusted_gids = interface.get_trusted_certs()
-            print>>sys.stderr, " \r\n \r\n \t=============================================== install_peer_certs  TRUSTED_GIDS %s   " %(trusted_gids)
             if trusted_gids:
-                 #and not isinstance(trusted_gids,list):
                 # the gid we want should be the first one in the list,
                 # but lets make sure
-                #trusted_gids = [trusted_gids]
-                print>>sys.stderr, " \r\n \r\n \t=============================================== install_peer_certs  TRUSTED_GIDS %s   " %(trusted_gids)
-                for trusted_gid in trusted_gids: 
-                    print>>sys.stderr, " \r\n \r\n \t=============================================== install_peer_certs  trusted_gids%s   " %(trusted_gid)
                     # default message
                     message = "interface: %s\t" % (api.interface)
                     message += "unable to install trusted gid for %s" % \
-                               (new_hrn) 
-                    print>>sys.stderr, " \r\n \r\n \t=============================================== install_peer_certs   message %s   " %(message)
-                    #gid = GID(string=trusted_gids[0])
+                               (new_hrn)
                     gid = GID(string=trusted_gid)
                     print>>sys.stderr, " \r\n \r\n \t=============================================== install_peer_certs   gid %s   " %(gid)
                     peer_gids.append(gid)
@@ -141,37 +131,35 @@ def update_cert_records(gids):
     Make sure there is a record in the registry for the specified gids. 
     Removes old records from the db.
     """
-    # import SfaTable here so this module can be loaded by PlcComponentApi
-    from sfa.storage.table import SfaTable
-    from sfa.storage.record import SfaRecord
+    # import db stuff here here so this module can be loaded by PlcComponentApi
+    from sfa.storage.alchemy import dbsession
+    from sfa.storage.model import RegRecord
     if not gids:
         return
-    table = SfaTable()
     # get records that actually exist in the db
     gid_urns = [gid.get_urn() for gid in gids]
     hrns_expected = [gid.get_hrn() for gid in gids]
-    records_found = table.find({'hrn': hrns_expected, 'pointer': -1}) 
+    records_found = dbsession.query(RegRecord).\
+        filter_by(pointer=-1).filter(RegRecord.hrn.in_(hrns_expected)).all()
 
     # remove old records
     for record in records_found:
-        if record['hrn'] not in hrns_expected and \
-            record['hrn'] != self.api.config.SFA_INTERFACE_HRN:
-            table.remove(record)
+        if record.hrn not in hrns_expected and \
+            record.hrn != self.api.config.SFA_INTERFACE_HRN:
+            dbsession.delete(record)
 
     # TODO: store urn in the db so we do this in 1 query 
     for gid in gids:
         hrn, type = gid.get_hrn(), gid.get_type()
-        print>>sys.stderr, " \r\n \r\n  update_cert_records  hrn,%s type %s"%(hrn, type)       
-        record = table.find({'hrn': hrn, 'type': type, 'pointer': -1})
+        record = dbsession.query(RegRecord).filter_by(hrn=hrn, type=type,pointer=-1).first()
         if not record:
-            record = {
-                'hrn': hrn, 'type': type, 'pointer': -1,
-                'authority': get_authority(hrn),
-                'gid': gid.save_to_string(save_parents=True),
-            }
-            record = SfaRecord(dict=record)
-            print>>sys.stderr, " \r\n \r\rn record %s "%(record)
-            table.insert(record)
+            record = RegRecord (dict= {'type':type,
+                                       'hrn': hrn, 
+                                       'authority': get_authority(hrn),
+                                       'gid': gid.save_to_string(save_parents=True),
+                                       })
+            dbsession.add(record)
+    dbsession.commit()
         
 def main():
     # Generate command line parser
index ad5cbe1..2510ec4 100644 (file)
@@ -1,12 +1,12 @@
 import os, os.path
 import datetime
 
-from sfa.util.faults import SfaFault, SfaAPIError
+from sfa.util.faults import SfaFault, SfaAPIError, RecordNotFound
 from sfa.util.genicode import GENICODE
 from sfa.util.config import Config
 from sfa.util.cache import Cache
-from sfa.trust.auth import Auth
 
+from sfa.trust.auth import Auth
 from sfa.trust.certificate import Keypair, Certificate
 from sfa.trust.credential import Credential
 from sfa.trust.rights import determine_rights
@@ -109,9 +109,9 @@ class SfaApi (XmlrpcApi):
 
         # get a new credential
         if self.interface in ['registry']:
-            cred =  self.__getCredentialRaw()
+            cred =  self._getCredentialRaw()
         else:
-            cred =  self.__getCredential()
+            cred =  self._getCredential()
         cred.save_to_file(cred_filename, save_parents=True)
 
         return cred.save_to_string(save_parents=True)
@@ -134,7 +134,7 @@ class SfaApi (XmlrpcApi):
                 break
         return delegated_cred
  
-    def __getCredential(self):
+    def _getCredential(self):
         """ 
         Get our credential from a remote registry 
         """
@@ -148,7 +148,7 @@ class SfaApi (XmlrpcApi):
         cred = registry.GetCredential(self_cred, self.hrn, 'authority')
         return Credential(string=cred)
 
-    def __getCredentialRaw(self):
+    def _getCredentialRaw(self):
         """
         Get our current credential directly from the local registry.
         """
@@ -160,15 +160,12 @@ class SfaApi (XmlrpcApi):
         if not auth_hrn or hrn == self.config.SFA_INTERFACE_HRN:
             auth_hrn = hrn
         auth_info = self.auth.get_auth_info(auth_hrn)
-        # xxx thgen fixme - use SfaTable hardwired for now 
-        # thgen xxx fixme this is wrong all right, but temporary, will use generic
-        from sfa.storage.table import SfaTable
-        table = SfaTable()
-        records = table.findObjects({'hrn': hrn, 'type': 'authority+sa'})
-        if not records:
-            raise RecordNotFound
-        record = records[0]
-        type = record['type']
+        from sfa.storage.alchemy import dbsession
+        from sfa.storage.model import RegRecord
+        record = dbsession.query(RegRecord).filter_by(type='authority+sa', hrn=hrn).first()
+        if not record:
+            raise RecordNotFound(hrn)
+        type = record.type
         object_gid = record.get_gid_object()
         new_cred = Credential(subject = object_gid.get_subject())
         new_cred.set_gid_caller(object_gid)
diff --git a/sfa/storage/PostgreSQL.py b/sfa/storage/PostgreSQL.py
deleted file mode 100644 (file)
index de6f9fa..0000000
+++ /dev/null
@@ -1,272 +0,0 @@
-#
-# PostgreSQL database interface. Sort of like DBI(3) (Database
-# independent interface for Perl).
-#
-#
-
-import re
-import traceback
-import commands
-from pprint import pformat
-from types import StringTypes, NoneType
-
-import psycopg2
-import psycopg2.extensions
-psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
-# UNICODEARRAY not exported yet
-psycopg2.extensions.register_type(psycopg2._psycopg.UNICODEARRAY)
-
-# allow to run sfa2wsdl if this is missing (for mac)
-import sys
-try: import pgdb
-except: print >> sys.stderr, "WARNING, could not import pgdb"
-
-from sfa.util.faults import SfaDBError
-from sfa.util.sfalogging import logger
-
-if not psycopg2:
-    is8bit = re.compile("[\x80-\xff]").search
-
-    def unicast(typecast):
-        """
-        pgdb returns raw UTF-8 strings. This function casts strings that
-        apppear to contain non-ASCII characters to unicode objects.
-        """
-    
-        def wrapper(*args, **kwds):
-            value = typecast(*args, **kwds)
-
-            # pgdb always encodes unicode objects as UTF-8 regardless of
-            # the DB encoding (and gives you no option for overriding
-            # the encoding), so always decode 8-bit objects as UTF-8.
-            if isinstance(value, str) and is8bit(value):
-                value = unicode(value, "utf-8")
-
-            return value
-
-        return wrapper
-
-    pgdb.pgdbTypeCache.typecast = unicast(pgdb.pgdbTypeCache.typecast)
-
-def handle_exception(f):
-    def wrapper(*args, **kwds):
-        try: return f(*args, **kwds)
-        except Exception, fault:
-            raise SfaDBError(str(fault))
-    return wrapper
-
-class PostgreSQL:
-    def __init__(self, config):
-        self.config = config
-        self.debug = False
-#        self.debug = True
-        self.connection = None
-
-    @handle_exception
-    def cursor(self):
-        if self.connection is None:
-            # (Re)initialize database connection
-            if psycopg2:
-                try:
-                    # Try UNIX socket first
-                    self.connection = psycopg2.connect(user = self.config.SFA_DB_USER,
-                                                       password = self.config.SFA_DB_PASSWORD,
-                                                       database = self.config.SFA_DB_NAME)
-                except psycopg2.OperationalError:
-                    # Fall back on TCP
-                    self.connection = psycopg2.connect(user = self.config.SFA_DB_USER,
-                                                       password = self.config.SFA_DB_PASSWORD,
-                                                       database = self.config.SFA_DB_NAME,
-                                                       host = self.config.SFA_DB_HOST,
-                                                       port = self.config.SFA_DB_PORT)
-                self.connection.set_client_encoding("UNICODE")
-            else:
-                self.connection = pgdb.connect(user = self.config.SFA_DB_USER,
-                                               password = self.config.SFA_DB_PASSWORD,
-                                               host = "%s:%d" % (self.config.SFA_DB_HOST, self.config.SFA_DB_PORT),
-                                               database = self.config.SFA_DB_NAME)
-
-        (self.rowcount, self.description, self.lastrowid) = \
-                        (None, None, None)
-
-        return self.connection.cursor()
-
-    def close(self):
-        if self.connection is not None:
-            self.connection.close()
-            self.connection = None
-
-    def quote(self, value):
-        """
-        Returns quoted version of the specified value.
-        """
-
-        # The pgdb._quote function is good enough for general SQL
-        # quoting, except for array types.
-        if isinstance(value, (list, tuple, set)):
-            return "ARRAY[%s]" % ", ".join(map, self.quote, value)
-        else:
-            return pgdb._quote(value)
-
-    quote = classmethod(quote)
-
-    def param(self, name, value):
-        # None is converted to the unquoted string NULL
-        if isinstance(value, NoneType):
-            conversion = "s"
-        # True and False are also converted to unquoted strings
-        elif isinstance(value, bool):
-            conversion = "s"
-        elif isinstance(value, float):
-            conversion = "f"
-        elif not isinstance(value, StringTypes):
-            conversion = "d"
-        else:
-            conversion = "s"
-
-        return '%(' + name + ')' + conversion
-
-    param = classmethod(param)
-
-    def begin_work(self):
-        # Implicit in pgdb.connect()
-        pass
-
-    def commit(self):
-        self.connection.commit()
-
-    def rollback(self):
-        self.connection.rollback()
-
-    def do(self, query, params = None):
-        cursor = self.execute(query, params)
-        cursor.close()
-        return self.rowcount
-
-    def next_id(self, table_name, primary_key):
-        sequence = "%(table_name)s_%(primary_key)s_seq" % locals()     
-        sql = "SELECT nextval('%(sequence)s')" % locals()
-        rows = self.selectall(sql, hashref = False)
-        if rows: 
-            return rows[0][0]
-        return None 
-
-    def last_insert_id(self, table_name, primary_key):
-        if isinstance(self.lastrowid, int):
-            sql = "SELECT %s FROM %s WHERE oid = %d" % \
-                  (primary_key, table_name, self.lastrowid)
-            rows = self.selectall(sql, hashref = False)
-            if rows:
-                return rows[0][0]
-
-        return None
-
-    # modified for psycopg2-2.0.7 
-    # executemany is undefined for SELECT's
-    # see http://www.python.org/dev/peps/pep-0249/
-    # accepts either None, a single dict, a tuple of single dict - in which case it execute's
-    # or a tuple of several dicts, in which case it executemany's
-    def execute(self, query, params = None):
-
-        cursor = self.cursor()
-        try:
-
-            # psycopg2 requires %()s format for all parameters,
-            # regardless of type.
-            # this needs to be done carefully though as with pattern-based filters
-            # we might have percents embedded in the query
-            # so e.g. GetPersons({'email':'*fake*'}) was resulting in .. LIKE '%sake%'
-            if psycopg2:
-                query = re.sub(r'(%\([^)]*\)|%)[df]', r'\1s', query)
-            # rewrite wildcards set by Filter.py as '***' into '%'
-            query = query.replace ('***','%')
-
-            if not params:
-                if self.debug:
-                    logger.debug('execute0 %r'%query)
-                cursor.execute(query)
-            elif isinstance(params,dict):
-                if self.debug:
-                    logger.debug('execute-dict: params=[%r] query=[%r]'%(params,query%params))
-                cursor.execute(query,params)
-            elif isinstance(params,tuple) and len(params)==1:
-                if self.debug:
-                    logger.debug('execute-tuple %r'%(query%params[0]))
-                cursor.execute(query,params[0])
-            else:
-                param_seq=(params,)
-                if self.debug:
-                    for params in param_seq:
-                        logger.debug('executemany %r'%(query%params))
-                cursor.executemany(query, param_seq)
-            (self.rowcount, self.description, self.lastrowid) = \
-                            (cursor.rowcount, cursor.description, cursor.lastrowid)
-        except Exception, e:
-            try:
-                self.rollback()
-            except:
-                pass
-            uuid = commands.getoutput("uuidgen")
-            logger.error("Database error %s:" % uuid)
-            logger.error("Exception=%r"%e)
-            logger.error("Query=%r"%query)
-            logger.error("Params=%r"%pformat(params))
-            logger.log_exc("PostgreSQL.execute caught exception")
-            raise SfaDBError("Please contact support: %s" % str(e))
-
-        return cursor
-
-    def selectall(self, query, params = None, hashref = True, key_field = None):
-        """
-        Return each row as a dictionary keyed on field name (like DBI
-        selectrow_hashref()). If key_field is specified, return rows
-        as a dictionary keyed on the specified field (like DBI
-        selectall_hashref()).
-
-        If params is specified, the specified parameters will be bound
-        to the query.
-        """
-
-        cursor = self.execute(query, params)
-        rows = cursor.fetchall()
-        cursor.close()
-        self.commit()
-        if hashref or key_field is not None:
-            # Return each row as a dictionary keyed on field name
-            # (like DBI selectrow_hashref()).
-            labels = [column[0] for column in self.description]
-            rows = [dict(zip(labels, row)) for row in rows]
-
-        if key_field is not None and key_field in labels:
-            # Return rows as a dictionary keyed on the specified field
-            # (like DBI selectall_hashref()).
-            return dict([(row[key_field], row) for row in rows])
-        else:
-            return rows
-
-    def fields(self, table, notnull = None, hasdef = None):
-        """
-        Return the names of the fields of the specified table.
-        """
-
-        if hasattr(self, 'fields_cache'):
-            if self.fields_cache.has_key((table, notnull, hasdef)):
-                return self.fields_cache[(table, notnull, hasdef)]
-        else:
-            self.fields_cache = {}
-
-        sql = "SELECT attname FROM pg_attribute, pg_class" \
-              " WHERE pg_class.oid = attrelid" \
-              " AND attnum > 0 AND relname = %(table)s"
-
-        if notnull is not None:
-            sql += " AND attnotnull is %(notnull)s"
-
-        if hasdef is not None:
-            sql += " AND atthasdef is %(hasdef)s"
-
-        rows = self.selectall(sql, locals(), hashref = False)
-
-        self.fields_cache[(table, notnull, hasdef)] = [row[0] for row in rows]
-
-        return self.fields_cache[(table, notnull, hasdef)]
diff --git a/sfa/storage/alchemy.py b/sfa/storage/alchemy.py
new file mode 100644 (file)
index 0000000..7e00116
--- /dev/null
@@ -0,0 +1,69 @@
+from types import StringTypes
+
+from sqlalchemy import create_engine
+from sqlalchemy.orm import sessionmaker
+
+from sqlalchemy import Column, Integer, String
+from sqlalchemy.orm import relationship, backref
+from sqlalchemy import ForeignKey
+
+from sfa.util.sfalogging import logger
+
+# this module is designed to be loaded when the configured db server is reachable
+# OTOH model can be loaded from anywhere including the client-side
+
+class Alchemy:
+
+    def __init__ (self, config):
+        dbname="sfa"
+        # will be created lazily on-demand
+        self._session = None
+        # the former PostgreSQL.py used the psycopg2 directly and was doing
+        #self.connection.set_client_encoding("UNICODE")
+        # it's unclear how to achieve this in sqlalchemy, nor if it's needed at all
+        # http://www.sqlalchemy.org/docs/dialects/postgresql.html#unicode
+        # we indeed have /var/lib/pgsql/data/postgresql.conf where
+        # this setting is unset, it might be an angle to tweak that if need be
+        # try a unix socket first - omitting the hostname does the trick
+        unix_url = "postgresql+psycopg2://%s:%s@:%s/%s"%\
+            (config.SFA_DB_USER,config.SFA_DB_PASSWORD,config.SFA_DB_PORT,dbname)
+        # the TCP fallback method
+        tcp_url = "postgresql+psycopg2://%s:%s@%s:%s/%s"%\
+            (config.SFA_DB_USER,config.SFA_DB_PASSWORD,config.SFA_DB_HOST,config.SFA_DB_PORT,dbname)
+        for url in [ unix_url, tcp_url ] :
+            try:
+                self.engine = create_engine (url)
+                self.check()
+                self.url=url
+                return
+            except:
+                pass
+        self.engine=None
+        raise Exception,"Could not connect to database"
+                
+
+    # expects boolean True: debug is ON or False: debug is OFF
+    def debug (self, echo):
+        self.engine.echo=echo
+
+    def check (self):
+        self.engine.execute ("select 1").scalar()
+
+    def session (self):
+        if self._session is None:
+            Session=sessionmaker ()
+            self._session=Session(bind=self.engine)
+        return self._session
+
+    def close_session (self):
+        if self._session is None: return
+        self._session.close()
+        self._session=None
+
+####################
+from sfa.util.config import Config
+
+alchemy=Alchemy (Config())
+engine=alchemy.engine
+dbsession=alchemy.session()
+
diff --git a/sfa/storage/dbschema.py b/sfa/storage/dbschema.py
new file mode 100644 (file)
index 0000000..3776d52
--- /dev/null
@@ -0,0 +1,127 @@
+import sys
+import traceback
+
+from sqlalchemy import MetaData, Table
+from sqlalchemy.exc import NoSuchTableError
+
+import migrate.versioning.api as migrate
+
+from sfa.util.sfalogging import logger
+import sfa.storage.model as model
+
+########## this class takes care of database upgrades
+### upgrade from a pre-2.1 db 
+# * 1.0 and up to 1.1-4:  ('very old')    
+#       was piggybacking the planetlab5 database
+#       this is kind of out of our scope here, we don't have the credentials 
+#       to connect to planetlab5, but this is documented in
+#       https://svn.planet-lab.org/wiki/SFATutorialConfigureSFA#Upgradingnotes
+#       and essentially this is seamless to users
+# * from 1.1-5 up to 2.0-x: ('old')
+#       uses the 'sfa' db and essentially the 'records' table,
+#       as well as record_types
+#       together with an 'sfa_db_version' table (version, subversion)
+# * from 2.1:
+#       we have an 'records' table, plus 'users' and the like
+#       and once migrate has kicked in there is a table named (see migrate.cfg)
+#       migrate_db_version (repository_id, repository_path, version)
+### after 2.1 
+#       Starting with 2.1, we use sqlalchemy-migrate scripts in a standard way
+#       Note that the model defined in sfa.storage.model needs to be maintained 
+#       as the 'current/latest' version, and newly installed deployments will 
+#       then 'jump' to the latest version number without going through the migrations
+###
+# An initial attempt to run this as a 001_*.py migrate script 
+# did not quite work out (essentially we need to set the current version
+# number out of the migrations logic)
+# also this approach has less stuff in the initscript, which seems just right
+
+class DBSchema:
+
+    header="Upgrading to 2.1 or higher"
+
+    def __init__ (self):
+        from sfa.storage.alchemy import alchemy
+        self.url=alchemy.url
+        self.engine=alchemy.engine
+        self.repository="/usr/share/sfa/migrations"
+
+    def current_version (self):
+        try:
+            return migrate.db_version (self.url, self.repository)
+        except:
+            return None
+
+    def table_exists (self, tablename):
+        try:
+            metadata = MetaData (bind=self.engine)
+            table=Table (tablename, metadata, autoload=True)
+            return True
+        except NoSuchTableError:
+            return False
+
+    def drop_table (self, tablename):
+        if self.table_exists (tablename):
+            print >>sys.stderr, "%s: Dropping table %s"%(DBSchema.header,tablename)
+            self.engine.execute ("drop table %s cascade"%tablename)
+        else:
+            print >>sys.stderr, "%s: no need to drop table %s"%(DBSchema.header,tablename)
+        
+    def handle_old_releases (self):
+        try:
+            # try to find out which old version this can be
+            if not self.table_exists ('records'):
+                # this likely means 
+                # (.) we've just created the db, so it's either a fresh install, or
+                # (.) we come from a 'very old' depl.
+                # in either case, an import is required but there's nothing to clean up
+                print >> sys.stderr,"%s: make sure to run import"%(DBSchema.header,)
+            elif self.table_exists ('sfa_db_version'):
+                # we come from an 'old' version
+                self.drop_table ('records')
+                self.drop_table ('record_types')
+                self.drop_table ('sfa_db_version')
+            else:
+                # we should be good here
+                pass
+        except:
+            print >> sys.stderr, "%s: unknown exception"%(DBSchema.header,)
+            traceback.print_exc ()
+
+    # after this call the db schema and the version as known by migrate should 
+    # reflect the current data model and the latest known version
+    def init_or_upgrade (self):
+        # check if under version control, and initialize it otherwise
+        if self.current_version() is None:
+            before="Unknown"
+            # can be either a very old version, or a fresh install
+            # for very old versions:
+            self.handle_old_releases()
+            # in any case, initialize db from current code and reflect in migrate
+            model.init_tables(self.engine)
+            code_version = migrate.version (self.repository)
+            migrate.version_control (self.url, self.repository, code_version)
+            after="%s"%self.current_version()
+            logger.info("DBSchema : jumped to version %s"%(after))
+        else:
+            # use migrate in the usual way
+            before="%s"%self.current_version()
+            migrate.upgrade (self.url, self.repository)
+            after="%s"%self.current_version()
+            if before != after:
+                logger.info("DBSchema : upgraded version from %s to %s"%(before,after))
+            else:
+                logger.debug("DBSchema : no change needed in db schema (%s==%s)"%(before,after))
+    
+    # this trashes the db altogether, from the current model in sfa.storage.model
+    # I hope this won't collide with ongoing migrations and all
+    # actually, now that sfa uses its own db, this is essentially equivalent to 
+    # dropping the db entirely, modulo a 'service sfa start'
+    def nuke (self):
+        model.drop_tables(self.engine)
+        # so in this case it's like we haven't initialized the db at all
+        migrate.drop_version_control (self.url, self.repository)
+        
+
+if __name__ == '__main__':
+    DBSchema().init_or_upgrade()
diff --git a/sfa/storage/filter.py b/sfa/storage/filter.py
deleted file mode 100644 (file)
index afe425d..0000000
+++ /dev/null
@@ -1,208 +0,0 @@
-from types import StringTypes
-import pgdb
-from sfa.util.faults import SfaInvalidArgument
-
-from sfa.storage.parameter import Parameter, Mixed, python_type
-
-class Filter(Parameter, dict):
-    """
-    A type of parameter that represents a filter on one or more
-    columns of a database table.
-    Special features provide support for negation, upper and lower bounds, 
-    as well as sorting and clipping.
-
-
-    fields should be a dictionary of field names and types
-    Only filters on non-sequence type fields are supported.
-    example : fields = {'node_id': Parameter(int, "Node identifier"),
-                        'hostname': Parameter(int, "Fully qualified hostname", max = 255),
-                        ...}
-
-
-    filter should be a dictionary of field names and values
-    representing  the criteria for filtering. 
-    example : filter = { 'hostname' : '*.edu' , site_id : [34,54] }
-    Whether the filter represents an intersection (AND) or a union (OR) 
-    of these criteria is determined by the join_with argument 
-    provided to the sql method below
-
-    Special features:
-
-    * a field starting with the ~ character means negation.
-    example :  filter = { '~peer_id' : None }
-
-    * a field starting with < [  ] or > means lower than or greater than
-      < > uses strict comparison
-      [ ] is for using <= or >= instead
-    example :  filter = { ']event_id' : 2305 }
-    example :  filter = { '>time' : 1178531418 }
-      in this example the integer value denotes a unix timestamp
-
-    * if a value is a sequence type, then it should represent 
-      a list of possible values for that field
-    example : filter = { 'node_id' : [12,34,56] }
-
-    * a (string) value containing either a * or a % character is
-      treated as a (sql) pattern; * are replaced with % that is the
-      SQL wildcard character.
-    example :  filter = { 'hostname' : '*.jp' } 
-
-    * fields starting with - are special and relate to row selection, i.e. sorting and clipping
-    * '-SORT' : a field name, or an ordered list of field names that are used for sorting
-      these fields may start with + (default) or - for denoting increasing or decreasing order
-    example : filter = { '-SORT' : [ '+node_id', '-hostname' ] }
-    * '-OFFSET' : the number of first rows to be ommitted
-    * '-LIMIT' : the amount of rows to be returned 
-    example : filter = { '-OFFSET' : 100, '-LIMIT':25}
-
-    A realistic example would read
-    GetNodes ( { 'node_type' : 'regular' , 'hostname' : '*.edu' , '-SORT' : 'hostname' , '-OFFSET' : 30 , '-LIMIT' : 25 } )
-    and that would return regular (usual) nodes matching '*.edu' in alphabetical order from 31th to 55th
-    """
-
-    def __init__(self, fields = {}, filter = {}, doc = "Attribute filter"):
-        # Store the filter in our dict instance
-        valid_fields = {}
-        for field in filter:
-            if field in fields:
-                valid_fields[field] = filter[field]
-        dict.__init__(self, valid_fields)
-
-        # Declare ourselves as a type of parameter that can take
-        # either a value or a list of values for each of the specified
-        # fields.
-        self.fields = dict ( [ ( field, Mixed (expected, [expected])) 
-                                 for (field,expected) in fields.iteritems()
-                                 if python_type(expected) not in (list, tuple, set) ] )
-
-        # Null filter means no filter
-        Parameter.__init__(self, self.fields, doc = doc, nullok = True)
-
-    def quote(self, value):
-        """
-        Returns quoted version of the specified value.
-        """
-
-        # The pgdb._quote function is good enough for general SQL
-        # quoting, except for array types.
-        if isinstance(value, (list, tuple, set)):
-            return "ARRAY[%s]" % ", ".join(map(self.quote, value))
-        else:
-            return pgdb._quote(value)    
-
-    def sql(self, join_with = "AND"):
-        """
-        Returns a SQL conditional that represents this filter.
-        """
-
-        # So that we always return something
-        if join_with == "AND":
-            conditionals = ["True"]
-        elif join_with == "OR":
-            conditionals = ["False"]
-        else:
-            assert join_with in ("AND", "OR")
-
-        # init 
-        sorts = []
-        clips = []
-
-        for field, value in self.iteritems():
-           # handle negation, numeric comparisons
-           # simple, 1-depth only mechanism
-
-           modifiers={'~' : False, 
-                      '<' : False, '>' : False,
-                      '[' : False, ']' : False,
-                       '-' : False,
-                      }
-
-           for char in modifiers.keys():
-               if field[0] == char:
-                   modifiers[char]=True
-                   field = field[1:]
-                   break
-
-            # filter on fields
-            if not modifiers['-']:
-                if field not in self.fields:
-                    raise SfaInvalidArgument, "Invalid filter field '%s'" % field
-
-                if isinstance(value, (list, tuple, set)):
-                    # handling filters like '~slice_id':[]
-                    # this should return true, as it's the opposite of 'slice_id':[] which is false
-                    # prior to this fix, 'slice_id':[] would have returned ``slice_id IN (NULL) '' which is unknown 
-                    # so it worked by coincidence, but the negation '~slice_ids':[] would return false too
-                    if not value:
-                        field=""
-                        operator=""
-                        value = "FALSE"
-                    else:
-                        operator = "IN"
-                        value = map(str, map(self.quote, value))
-                        value = "(%s)" % ", ".join(value)
-                else:
-                    if value is None:
-                        operator = "IS"
-                        value = "NULL"
-                    elif isinstance(value, StringTypes) and \
-                            (value.find("*") > -1 or value.find("%") > -1):
-                        operator = "LIKE"
-                        # insert *** in pattern instead of either * or %
-                        # we dont use % as requests are likely to %-expansion later on
-                        # actual replacement to % done in PostgreSQL.py
-                        value = value.replace ('*','***')
-                        value = value.replace ('%','***')
-                        value = str(self.quote(value))
-                    else:
-                        operator = "="
-                        if modifiers['<']:
-                            operator='<'
-                        if modifiers['>']:
-                            operator='>'
-                        if modifiers['[']:
-                            operator='<='
-                        if modifiers[']']:
-                            operator='>='
-                        else:
-                            value = str(self.quote(value))
-                clause = "%s %s %s" % (field, operator, value)
-
-                if modifiers['~']:
-                    clause = " ( NOT %s ) " % (clause)
-
-                conditionals.append(clause)
-            # sorting and clipping
-            else:
-                if field not in ('SORT','OFFSET','LIMIT'):
-                    raise SfaInvalidArgument, "Invalid filter, unknown sort and clip field %r"%field
-                # sorting
-                if field == 'SORT':
-                    if not isinstance(value,(list,tuple,set)):
-                        value=[value]
-                    for field in value:
-                        order = 'ASC'
-                        if field[0] == '+':
-                            field = field[1:]
-                        elif field[0] == '-':
-                            field = field[1:]
-                            order = 'DESC'
-                        if field not in self.fields:
-                            raise SfaInvalidArgument, "Invalid field %r in SORT filter"%field
-                        sorts.append("%s %s"%(field,order))
-                # clipping
-                elif field == 'OFFSET':
-                    clips.append("OFFSET %d"%value)
-                # clipping continued
-                elif field == 'LIMIT' :
-                    clips.append("LIMIT %d"%value)
-
-        where_part = (" %s " % join_with).join(conditionals)
-        clip_part = ""
-        if sorts:
-            clip_part += " ORDER BY " + ",".join(sorts)
-        if clips:
-            clip_part += " " + " ".join(clips)
-        return (where_part,clip_part)
diff --git a/sfa/storage/migrations/__init__.py b/sfa/storage/migrations/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sfa/storage/migrations/migrate.cfg b/sfa/storage/migrations/migrate.cfg
new file mode 100644 (file)
index 0000000..c570dd9
--- /dev/null
@@ -0,0 +1,20 @@
+[db_settings]
+# Used to identify which repository this database is versioned under.
+# You can use the name of your project.
+repository_id=sqlalchemy-migrate repository for SFA-2.1 and on
+
+# The name of the database table used to track the schema version.
+# This name shouldn't already be used by your project.
+# If this is changed once a database is under version control, you'll need to 
+# change the table name in each database too. 
+version_table=migrate_db_version
+
+# When committing a change script, Migrate will attempt to generate the 
+# sql for all supported databases; normally, if one of them fails - probably
+# because you don't have that database installed - it is ignored and the 
+# commit continues, perhaps ending successfully. 
+# Databases in this list MUST compile successfully during a commit, or the 
+# entire commit will fail. List the databases your application will actually 
+# be using to ensure your updates to that database work properly.
+# This must be a list; example: ['postgres','sqlite']
+required_dbs=['postgres']
diff --git a/sfa/storage/migrations/versions/001_slice_researchers.py b/sfa/storage/migrations/versions/001_slice_researchers.py
new file mode 100644 (file)
index 0000000..eb1d3b7
--- /dev/null
@@ -0,0 +1,28 @@
+# this move is about adding a slice x users many to many relation ship for modelling 
+# regular "membership" of users in a slice
+
+from sqlalchemy import Table, MetaData, Column, ForeignKey
+from sqlalchemy import Integer, String
+
+metadata=MetaData()
+
+# this is needed my migrate so it can locate 'records.record_id'
+records = \
+    Table ( 'records', metadata,
+            Column ('record_id', Integer, primary_key=True),
+            )
+
+# slice x user (researchers) association
+slice_researcher_table = \
+    Table ( 'slice_researcher', metadata,
+            Column ('slice_id', Integer, ForeignKey ('records.record_id'), primary_key=True),
+            Column ('researcher_id', Integer, ForeignKey ('records.record_id'), primary_key=True),
+            )
+
+def upgrade(migrate_engine):
+    metadata.bind = migrate_engine
+    slice_researcher_table.create()
+
+def downgrade(migrate_engine):
+    metadata.bind = migrate_engine
+    slice_researcher_table.drop()
diff --git a/sfa/storage/migrations/versions/__init__.py b/sfa/storage/migrations/versions/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sfa/storage/model.py b/sfa/storage/model.py
new file mode 100644 (file)
index 0000000..702c182
--- /dev/null
@@ -0,0 +1,392 @@
+from types import StringTypes
+from datetime import datetime
+
+from sqlalchemy import Column, Integer, String, DateTime
+from sqlalchemy import Table, Column, MetaData, join, ForeignKey
+from sqlalchemy.orm import relationship, backref
+from sqlalchemy.orm import column_property
+from sqlalchemy.orm import object_mapper
+from sqlalchemy.orm import validates
+from sqlalchemy.ext.declarative import declarative_base
+
+from sfa.util.sfalogging import logger
+from sfa.util.sfatime import utcparse, datetime_to_string
+from sfa.util.xml import XML 
+
+from sfa.trust.gid import GID
+
+##############################
+Base=declarative_base()
+
+####################
+# dicts vs objects
+####################
+# historically the front end to the db dealt with dicts, so the code was only dealing with dicts
+# sqlalchemy however offers an object interface, meaning that you write obj.id instead of obj['id']
+# which is admittedly much nicer
+# however we still need to deal with dictionaries if only for the xmlrpc layer
+# 
+# here are a few utilities for this 
+# 
+# (*) first off, when an old pieve of code needs to be used as-is, if only temporarily, the simplest trick
+# is to use obj.__dict__
+# this behaves exactly like required, i.e. obj.__dict__['field']='new value' does change obj.field
+# however this depends on sqlalchemy's implementation so it should be avoided 
+#
+# (*) second, when an object needs to be exposed to the xmlrpc layer, we need to convert it into a dict
+# remember though that writing the resulting dictionary won't change the object
+# essentially obj.__dict__ would be fine too, except that we want to discard alchemy private keys starting with '_'
+# 2 ways are provided for that:
+# . dict(obj)
+# . obj.todict()
+# the former dict(obj) relies on __iter__() and next() below, and does not rely on the fields names
+# although it seems to work fine, I've found cases where it issues a weird python error that I could not get right
+# so the latter obj.todict() seems more reliable but more hacky as is relies on the form of fields, so this can probably be improved
+#
+# (*) finally for converting a dictionary into an sqlalchemy object, we provide
+# obj.load_from_dict(dict)
+
+class AlchemyObj:
+    def __iter__(self): 
+        self._i = iter(object_mapper(self).columns)
+        return self 
+    def next(self): 
+        n = self._i.next().name
+        return n, getattr(self, n)
+    def todict (self):
+        d=self.__dict__
+        keys=[k for k in d.keys() if not k.startswith('_')]
+        return dict ( [ (k,d[k]) for k in keys ] )
+    def load_from_dict (self, d):
+        for (k,v) in d.iteritems():
+            # experimental
+            if isinstance(v, StringTypes) and v.lower() in ['true']: v=True
+            if isinstance(v, StringTypes) and v.lower() in ['false']: v=False
+            setattr(self,k,v)
+
+    def validate_datetime (self, key, incoming):
+        if isinstance (incoming, datetime):     return incoming
+        elif isinstance (incoming, (int,float)):return datetime.fromtimestamp (incoming)
+
+    # in addition we provide convenience for converting to and from xml records
+    # for this purpose only, we need the subclasses to define 'fields' as either 
+    # a list or a dictionary
+    def xml_fields (self):
+        fields=self.fields
+        if isinstance(fields,dict): fields=fields.keys()
+        return fields
+
+    def save_as_xml (self):
+        # xxx not sure about the scope here
+        input_dict = dict( [ (key, getattr(self.key), ) for key in self.xml_fields() if getattr(self,key,None) ] )
+        xml_record=XML("<record />")
+        xml_record.parse_dict (input_dict)
+        return xml_record.toxml()
+
+    def dump(self, format=None, dump_parents=False):
+        if not format:
+            format = 'text'
+        else:
+            format = format.lower()
+        if format == 'text':
+            self.dump_text(dump_parents)
+        elif format == 'xml':
+            print self.save_to_string()
+        elif format == 'simple':
+            print self.dump_simple()
+        else:
+            raise Exception, "Invalid format %s" % format
+   
+    # xxx fixme 
+    # turns out the date_created field is received by the client as a 'created' int
+    # (and 'last_updated' does not make it at all)
+    # let's be flexible
+    def date_repr (self,fields):
+        if not isinstance(fields,list): fields=[fields]
+        for field in fields:
+            value=getattr(self,field,None)
+            if isinstance (value,datetime): 
+                return datetime_to_string (value)
+            elif isinstance (value,(int,float)):
+                return datetime_to_string(utcparse(value))
+        # fallback
+        return "** undef_datetime **"
+
+    def dump_text(self, dump_parents=False):
+        # print core fields in this order
+        core_fields = [ 'hrn', 'type', 'authority', 'date_created', 'created', 'last_updated', 'gid',  ]
+        print "".join(['=' for i in range(40)])
+        print "RECORD"
+        print "    hrn:", self.hrn
+        print "    type:", self.type
+        print "    authority:", self.authority
+        print "    date created:", self.date_repr( ['date_created','created'] )
+        print "    last updated:", self.date_repr('last_updated')
+        print "    gid:"
+        print self.get_gid_object().dump_string(8, dump_parents)  
+        
+        # print remaining fields
+        for attrib_name in dir(self):
+            attrib = getattr(self, attrib_name)
+            # skip internals
+            if attrib_name.startswith('_'):     continue
+            # skip core fields
+            if attrib_name in core_fields:      continue
+            # skip callables 
+            if callable (attrib):               continue
+            print "     %s: %s" % (attrib_name, attrib)
+    
+    def dump_simple(self):
+        return "%s"%self
+      
+#    # only intended for debugging 
+#    def inspect (self, logger, message=""):
+#        logger.info("%s -- Inspecting AlchemyObj -- attrs"%message)
+#        for k in dir(self):
+#            if not k.startswith('_'):
+#                logger.info ("  %s: %s"%(k,getattr(self,k)))
+#        logger.info("%s -- Inspecting AlchemyObj -- __dict__"%message)
+#        d=self.__dict__
+#        for (k,v) in d.iteritems():
+#            logger.info("[%s]=%s"%(k,v))
+
+
+##############################
+# various kinds of records are implemented as an inheritance hierarchy
+# RegRecord is the base class for all actual variants
+# a first draft was using 'type' as the discriminator for the inheritance
+# but we had to define another more internal column (classtype) so we 
+# accomodate variants in types like authority+am and the like
+
+class RegRecord (Base,AlchemyObj):
+    __tablename__       = 'records'
+    record_id           = Column (Integer, primary_key=True)
+    # this is the discriminator that tells which class to use
+    classtype           = Column (String)
+    # in a first version type was the discriminator
+    # but that could not accomodate for 'authority+sa' and the like
+    type                = Column (String)
+    hrn                 = Column (String)
+    gid                 = Column (String)
+    authority           = Column (String)
+    peer_authority      = Column (String)
+    pointer             = Column (Integer, default=-1)
+    date_created        = Column (DateTime)
+    last_updated        = Column (DateTime)
+    # use the 'type' column to decide which subclass the object is of
+    __mapper_args__     = { 'polymorphic_on' : classtype }
+
+    fields = [ 'type', 'hrn', 'gid', 'authority', 'peer_authority' ]
+    def __init__ (self, type=None, hrn=None, gid=None, authority=None, peer_authority=None, 
+                  pointer=None, dict=None):
+        if type:                                self.type=type
+        if hrn:                                 self.hrn=hrn
+        if gid: 
+            if isinstance(gid, StringTypes):    self.gid=gid
+            else:                               self.gid=gid.save_to_string(save_parents=True)
+        if authority:                           self.authority=authority
+        if peer_authority:                      self.peer_authority=peer_authority
+        if pointer:                             self.pointer=pointer
+        if dict:                                self.load_from_dict (dict)
+
+    def __repr__(self):
+        result="<Record id=%s, type=%s, hrn=%s, authority=%s, pointer=%s" % \
+                (self.record_id, self.type, self.hrn, self.authority, self.pointer)
+        # skip the uniform '--- BEGIN CERTIFICATE --' stuff
+        if self.gid: result+=" gid=%s..."%self.gid[28:36]
+        else: result+=" nogid"
+        result += ">"
+        return result
+
+    @validates ('gid')
+    def validate_gid (self, key, gid):
+        if gid is None:                     return
+        elif isinstance(gid, StringTypes):  return gid
+        else:                               return gid.save_to_string(save_parents=True)
+
+    @validates ('date_created')
+    def validate_date_created (self, key, incoming): return self.validate_datetime (key, incoming)
+
+    @validates ('last_updated')
+    def validate_last_updated (self, key, incoming): return self.validate_datetime (key, incoming)
+
+    # xxx - there might be smarter ways to handle get/set'ing gid using validation hooks 
+    def get_gid_object (self):
+        if not self.gid: return None
+        else: return GID(string=self.gid)
+
+    def just_created (self):
+        now=datetime.now()
+        self.date_created=now
+        self.last_updated=now
+
+    def just_updated (self):
+        now=datetime.now()
+        self.last_updated=now
+
+##############################
+# all subclasses define a convenience constructor with a default value for type, 
+# and when applicable a way to define local fields in a kwd=value argument
+####################
+class RegAuthority (RegRecord):
+    __tablename__       = 'authorities'
+    __mapper_args__     = { 'polymorphic_identity' : 'authority' }
+    record_id           = Column (Integer, ForeignKey ("records.record_id"), primary_key=True)
+    
+    def __init__ (self, **kwds):
+        # fill in type if not previously set
+        if 'type' not in kwds: kwds['type']='authority'
+        # base class constructor
+        RegRecord.__init__(self, **kwds)
+
+    # no proper data yet, just hack the typename
+    def __repr__ (self):
+        return RegRecord.__repr__(self).replace("Record","Authority")
+
+####################
+# slice x user (researchers) association
+slice_researcher_table = \
+    Table ( 'slice_researcher', Base.metadata,
+            Column ('slice_id', Integer, ForeignKey ('records.record_id'), primary_key=True),
+            Column ('researcher_id', Integer, ForeignKey ('records.record_id'), primary_key=True),
+            )
+
+####################
+class RegSlice (RegRecord):
+    __tablename__       = 'slices'
+    __mapper_args__     = { 'polymorphic_identity' : 'slice' }
+    record_id           = Column (Integer, ForeignKey ("records.record_id"), primary_key=True)
+    #### extensions come here
+    reg_researchers     = relationship \
+        ('RegUser', 
+         secondary=slice_researcher_table,
+         primaryjoin=RegRecord.record_id==slice_researcher_table.c.slice_id,
+         secondaryjoin=RegRecord.record_id==slice_researcher_table.c.researcher_id,
+         backref="reg_slices_as_researcher")
+
+    def __init__ (self, **kwds):
+        if 'type' not in kwds: kwds['type']='slice'
+        RegRecord.__init__(self, **kwds)
+
+    def __repr__ (self):
+        return RegRecord.__repr__(self).replace("Record","Slice")
+
+####################
+class RegNode (RegRecord):
+    __tablename__       = 'nodes'
+    __mapper_args__     = { 'polymorphic_identity' : 'node' }
+    record_id           = Column (Integer, ForeignKey ("records.record_id"), primary_key=True)
+    
+    def __init__ (self, **kwds):
+        if 'type' not in kwds: kwds['type']='node'
+        RegRecord.__init__(self, **kwds)
+
+    def __repr__ (self):
+        return RegRecord.__repr__(self).replace("Record","Node")
+
+####################
+class RegUser (RegRecord):
+    __tablename__       = 'users'
+    # these objects will have type='user' in the records table
+    __mapper_args__     = { 'polymorphic_identity' : 'user' }
+    record_id           = Column (Integer, ForeignKey ("records.record_id"), primary_key=True)
+    #### extensions come here
+    email               = Column ('email', String)
+    # can't use name 'keys' here because when loading from xml we're getting
+    # a 'keys' tag, and assigning a list of strings in a reference column like this crashes
+    reg_keys            = relationship \
+        ('RegKey', backref='reg_user',
+         cascade="all, delete, delete-orphan")
+    
+    # so we can use RegUser (email=.., hrn=..) and the like
+    def __init__ (self, **kwds):
+        # handle local settings
+        if 'email' in kwds: self.email=kwds.pop('email')
+        if 'type' not in kwds: kwds['type']='user'
+        RegRecord.__init__(self, **kwds)
+
+    # append stuff at the end of the record __repr__
+    def __repr__ (self): 
+        result = RegRecord.__repr__(self).replace("Record","User")
+        result.replace (">"," email=%s"%self.email)
+        result += ">"
+        return result
+
+    @validates('email') 
+    def validate_email(self, key, address):
+        assert '@' in address
+        return address
+
+####################
+# xxx tocheck : not sure about eager loading of this one
+# meaning, when querying the whole records, we expect there should
+# be a single query to fetch all the keys 
+# or, is it enough that we issue a single query to retrieve all the keys 
+class RegKey (Base):
+    __tablename__       = 'keys'
+    key_id              = Column (Integer, primary_key=True)
+    record_id             = Column (Integer, ForeignKey ("records.record_id"))
+    key                 = Column (String)
+    pointer             = Column (Integer, default = -1)
+    
+    def __init__ (self, key, pointer=None):
+        self.key=key
+        if pointer: self.pointer=pointer
+
+    def __repr__ (self):
+        result="<key id=%s key=%s..."%(self.key_id,self.key[8:16],)
+        try:    result += " user=%s"%self.reg_user.record_id
+        except: result += " no-user"
+        result += ">"
+        return result
+
+##############################
+# although the db needs of course to be reachable for the following functions
+# the schema management functions are here and not in alchemy
+# because the actual details of the classes need to be known
+# migrations: this code has no notion of the previous versions
+# of the data model nor of migrations
+# sfa.storage.migrations.db_init uses this when starting from
+# a fresh db only
+def init_tables(engine):
+    logger.info("Initializing db schema from current/latest model")
+    Base.metadata.create_all(engine)
+
+def drop_tables(engine):
+    logger.info("Dropping tables from current/latest model")
+    Base.metadata.drop_all(engine)
+
+##############################
+# create a record of the right type from either a dict or an xml string
+def make_record (dict={}, xml=""):
+    if dict:    return make_record_dict (dict)
+    elif xml:   return make_record_xml (xml)
+    else:       raise Exception("make_record has no input")
+
+# convert an incoming record - typically from xmlrpc - into an object
+def make_record_dict (record_dict):
+    assert ('type' in record_dict)
+    type=record_dict['type'].split('+')[0]
+    if type=='authority':
+        result=RegAuthority (dict=record_dict)
+    elif type=='user':
+        result=RegUser (dict=record_dict)
+    elif type=='slice':
+        result=RegSlice (dict=record_dict)
+    elif type=='node':
+        result=RegNode (dict=record_dict)
+    else:
+        logger.debug("Untyped RegRecord instance")
+        result=RegRecord (dict=record_dict)
+    logger.info ("converting dict into Reg* with type=%s"%type)
+    logger.info ("returning=%s"%result)
+    # xxx todo
+    # register non-db attributes in an extensions field
+    return result
+        
+def make_record_xml (xml):
+    xml_record = XML(xml)
+    xml_dict = xml_record.todict()
+    logger.info("load from xml, keys=%s"%xml_dict.keys())
+    return make_record_dict (xml_dict)
+
diff --git a/sfa/storage/record.py b/sfa/storage/record.py
deleted file mode 100644 (file)
index f9f807e..0000000
+++ /dev/null
@@ -1,463 +0,0 @@
-##
-# Implements support for SFA records
-#
-# TODO: Use existing PLC database methods? or keep this separate?
-##
-
-from types import StringTypes
-from sfa.trust.gid import GID
-from sfa.storage.parameter import Parameter
-from sfa.util.xrn import get_authority
-from sfa.storage.row import Row
-from sfa.util.xml import XML 
-from sfa.util.sfalogging import logger
-
-class SfaRecord(Row):
-    """ 
-    The SfaRecord class implements an SFA Record. A SfaRecord is a tuple
-    (Hrn, GID, Type, Info).
-    Hrn specifies the Human Readable Name of the object
-    GID is the GID of the object
-    Type is user | authority | slice | component
-    Info is comprised of the following sub-fields
-           pointer = a pointer to the record in the PL database
-    The pointer is interpreted depending on the type of the record. For example,
-    if the type=="user", then pointer is assumed to be a person_id that indexes
-    into the persons table.
-    A given HRN may have more than one record, provided that the records are
-    of different types.
-    """
-
-#    table_name = 'sfa'
-#    primary_key = 'record_id'
-
-    ### the wsdl generator assumes this is named 'fields'
-    internal_fields = {
-        'record_id': Parameter(int, "An id that uniquely identifies this record", ro=True),
-        'pointer': Parameter(int, "An id that uniquely identifies this record in an external database")
-    }
-
-    fields = {
-        'authority': Parameter(str, "The authority for this record"),
-        'peer_authority': Parameter(str, "The peer authority for this record"),
-        'hrn': Parameter(str, "Human readable name of object"),
-        'gid': Parameter(str, "GID of the object"),
-        'type': Parameter(str, "Record type"),
-        'last_updated': Parameter(int, "Date and time of last update", ro=True),
-        'date_created': Parameter(int, "Date and time this record was created", ro=True),
-    }
-    all_fields = dict(fields.items() + internal_fields.items())
-    ##
-    # Create an SFA Record
-    #
-    # @param name if !=None, assign the name of the record
-    # @param gid if !=None, assign the gid of the record
-    # @param type one of user | authority | slice | component
-    # @param pointer is a pointer to a PLC record
-    # @param dict if !=None, then fill in this record from the dictionary
-
-    def __init__(self, hrn=None, gid=None, type=None, pointer=None, authority=None, 
-                 peer_authority=None, dict=None, string=None):
-        self.dirty = True
-        self.hrn = None
-        self.gid = None
-        self.type = None
-        self.pointer = None
-        self.set_peer_auth(peer_authority)
-        self.set_authority(authority)
-        if hrn:
-            self.set_name(hrn)
-        if gid:
-            self.set_gid(gid)
-        if type:
-            self.set_type(type)
-        if pointer:
-            self.set_pointer(pointer)
-        if dict:
-            self.load_from_dict(dict)
-        if string:
-            self.load_from_string(string)
-
-
-    def validate_last_updated(self, last_updated):
-        return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
-        
-    def update(self, new_dict):
-        if isinstance(new_dict, list):
-            new_dict = new_dict[0]
-
-        # Convert any boolean strings to real bools
-        for key in new_dict:
-            if isinstance(new_dict[key], StringTypes):
-                if new_dict[key].lower() in ["true"]:
-                    new_dict[key] = True
-                elif new_dict[key].lower() in ["false"]:
-                    new_dict[key] = False
-        dict.update(self, new_dict)
-
-    ##
-    # Set the name of the record
-    #
-    # @param hrn is a string containing the HRN
-
-    def set_name(self, hrn):
-        """
-        Set the name of the record
-        """
-        self.hrn = hrn
-        self['hrn'] = hrn
-        self.dirty = True
-
-    def set_authority(self, authority):
-        """
-        Set the authority
-        """
-        if not authority:
-            authority = ""
-        self.authority = authority
-        self['authority'] = authority
-        self.dirty = True    
-        
-
-    ##
-    # Set the GID of the record
-    #
-    # @param gid is a GID object or the string representation of a GID object
-
-    def set_gid(self, gid):
-        """
-        Set the GID of the record
-        """
-
-        if isinstance(gid, StringTypes):
-            self.gid = gid
-            self['gid'] = gid
-        else:
-            self.gid = gid.save_to_string(save_parents=True)
-            self['gid'] = gid.save_to_string(save_parents=True)
-        self.dirty = True
-
-    ##
-    # Set the type of the record
-    #
-    # @param type is a string: user | authority | slice | component
-
-    def set_type(self, type):
-        """
-        Set the type of the record
-        """
-        self.type = type
-        self['type'] = type
-        self.dirty = True
-
-    ##
-    # Set the pointer of the record
-    #
-    # @param pointer is an integer containing the ID of a PLC record
-
-    def set_pointer(self, pointer):
-        """
-        Set the pointer of the record
-        """
-        self.pointer = pointer
-        self['pointer'] = pointer
-        self.dirty = True
-
-
-    def set_peer_auth(self, peer_authority):
-        self.peer_authority = peer_authority
-        self['peer_authority'] = peer_authority
-        self.dirty = True
-
-    ##
-    # Return the name (HRN) of the record
-
-    def get_name(self):
-        """
-        Return the name (HRN) of the record
-        """
-        return self.hrn
-
-    ##
-    # Return the type of the record
-
-    def get_type(self):
-        """
-        Return the type of the record
-        """
-        return self.type
-
-    ##
-    # Return the pointer of the record. The pointer is an integer that may be
-    # used to look up the record in the PLC database. The evaluation of pointer
-    # depends on the type of the record
-
-    def get_pointer(self):
-        """
-        Return the pointer of the record. The pointer is an integer that may be
-        used to look up the record in the PLC database. The evaluation of pointer
-        depends on the type of the record
-        """
-        return self.pointer
-
-    ##
-    # Return the GID of the record, in the form of a GID object
-    # TODO: not the best name for the function, because we have things called
-    # gidObjects in the Cred
-
-    def get_gid_object(self):
-        """
-        Return the GID of the record, in the form of a GID object
-        """
-        return GID(string=self.gid)
-
-    ##
-    # Returns the value of a field
-
-    def get_field(self, fieldname, default=None):
-        # sometimes records act like classes, and sometimes they act like dicts
-        try:
-            return getattr(self, fieldname)
-        except AttributeError:
-            try:
-                 return self[fieldname]
-            except KeyError:
-                 if default != None:
-                     return default
-                 else:
-                     raise
-
-    ##
-    # Returns a list of field names in this record. 
-
-    def get_field_names(self):
-        """
-        Returns a list of field names in this record.
-        """
-        return self.fields.keys()
-
-    ##
-    # Given a field name ("hrn", "gid", ...) return the value of that field.
-    #
-    # @param fieldname is the name of field to be returned
-
-    def get_field_value_string(self, fieldname):
-        """
-        Given a field name ("hrn", "gid", ...) return the value of that field.
-        """
-        if fieldname == "authority":
-            val = get_authority(self['hrn'])
-        else:
-            try:
-                val = getattr(self, fieldname)
-            except:
-                val = self[fieldname] 
-        if isinstance(val, str):
-            return "'" + str(val) + "'"
-        else:
-            return str(val)
-
-    ##
-    # Given a list of field names, return a list of values for those public.
-    #
-    # @param fieldnames is a list of field names
-
-    def get_field_value_strings(self, fieldnames):
-        """
-        Given a list of field names, return a list of values for those public.
-        """
-        return [ self.get_field_value_string (fieldname) for fieldname in fieldnames ]
-
-    ##
-    # Return the record in the form of a dictionary
-
-    def as_dict(self):
-        """
-        Return the record in the form of a dictionary
-        """
-        return dict(self)
-
-    ##
-    # Load the record from a dictionary
-    #
-    # @param dict dictionary to load record public from
-
-    def load_from_dict(self, dict):
-        """
-        Load the record from a dictionary 
-        """
-
-        self.set_name(dict['hrn'])
-        gidstr = dict.get("gid", None)
-        if gidstr:
-            self.set_gid(dict['gid'])
-
-        if "pointer" in dict:
-           self.set_pointer(dict['pointer'])
-
-        self.set_type(dict['type'])
-        self.update(dict)        
-    
-    ##
-    # Save the record to a string. The string contains an XML representation of
-    # the record.
-
-    def save_to_string(self):
-        """
-        Save the record to a string. The string contains an XML representation of
-        the record.
-        """
-        recorddict = self.as_dict()
-        filteredDict = dict([(key, val) for (key, val) in recorddict.iteritems() if key in self.fields.keys()])
-        xml_record = XML('<record/>')
-        xml_record.parse_dict(filteredDict)
-        str = xml_record.toxml()
-        return str
-
-    ##
-    # Load the record from a string. The string is assumed to contain an XML
-    # representation of the record.
-
-    def load_from_string(self, str):
-        """
-        Load the record from a string. The string is assumed to contain an XML
-        representation of the record.
-        """
-        #dict = xmlrpclib.loads(str)[0][0]
-
-        xml_record = XML(str)
-        self.load_from_dict(xml_record.todict())
-
-    ##
-    # Dump the record to stdout
-    #
-    # @param dump_parents if true, then the parents of the GID will be dumped
-
-    def dump(self, dump_parents=False):
-        """
-        Walk tree and dump records.
-        """
-        #print "RECORD", self.name
-        #print "        hrn:", self.name
-        #print "       type:", self.type
-        #print "        gid:"
-        #if (not self.gid):
-        #    print "        None"
-        #else:
-        #    self.get_gid_object().dump(8, dump_parents)
-        #print "    pointer:", self.pointer
-       
-        order = SfaRecord.fields.keys() 
-        for key in self.keys():
-            if key not in order:
-                order.append(key)
-        for key in order:
-            if key in self and key in self.fields:
-                if key in 'gid' and self[key]:
-                    gid = GID(string=self[key])
-                    print "     %s:" % key
-                    gid.dump(8, dump_parents)
-                else:    
-                    print "     %s: %s" % (key, self[key])
-    
-    def summary_string(self):
-        return "Record(record_id=%s, hrn=%s, type=%s, authority=%s, pointer=%s)" % \
-                (self.get('record_id'), self.get('hrn'), self.get('type'), self.get('authority'), \
-                 self.get('pointer'))
-
-    def getdict(self):
-        return dict(self)
-   
-    def sync(self):
-        """ 
-        Sync this record with the database.
-        """ 
-        from sfa.storage.table import SfaTable
-        table = SfaTable()
-        filter = {}
-        if self.get('record_id'):
-            filter['record_id'] = self.get('record_id')
-        if self.get('hrn') and self.get('type'):
-            filter['hrn'] = self.get('hrn') 
-            filter['type'] = self.get('type')
-            if self.get('pointer'):
-                filter['pointer'] = self.get('pointer')
-        existing_records = table.find(filter)
-        if not existing_records:
-            table.insert(self)
-        else:
-            existing_record = existing_records[0]
-            self['record_id'] = existing_record['record_id']
-            table.update(self) 
-
-    def delete(self):
-        """
-        Remove record from the database.
-        """
-        from sfa.storage.table import SfaTable
-        table = SfaTable()
-        filter = {}
-        if self.get('record_id'):
-            filter['record_id'] = self.get('record_id')
-        if self.get('hrn') and self.get('type'):
-            filter['hrn'] = self.get('hrn')
-            filter['type'] = self.get('type')
-            if self.get('pointer'):
-                filter['pointer'] = self.get('pointer')
-        if filter:
-            existing_records = table.find(filter)
-            for record in existing_records:
-                table.remove(record)
-
-class UserRecord(SfaRecord):
-
-    fields = {
-        'email': Parameter(str, 'email'),
-        'first_name': Parameter(str, 'First name'),
-        'last_name': Parameter(str, 'Last name'),
-        'phone': Parameter(str, 'Phone Number'),
-        'keys': Parameter(str, 'Public key'),
-        'slices': Parameter([str], 'List of slices this user belongs to'),
-        }
-    fields.update(SfaRecord.fields)
-    
-class SliceRecord(SfaRecord):
-    fields = {
-        'name': Parameter(str, 'Slice name'),
-        'url': Parameter(str, 'Slice url'),
-        'expires': Parameter(int, 'Date and time this slice exipres'),
-        'researcher': Parameter([str], 'List of users for this slice'),
-        'PI': Parameter([str], 'List of PIs responsible for this slice'),
-        'description': Parameter([str], 'Description of this slice'), 
-        }
-    fields.update(SfaRecord.fields)
-
-class NodeRecord(SfaRecord):
-    fields = {
-        'hostname': Parameter(str, 'This nodes dns name'),
-        'node_type': Parameter(str, 'Type of node this is'),
-        'latitude': Parameter(str, 'latitude'),
-        'longitude': Parameter(str, 'longitude'),
-        }
-    fields.update(SfaRecord.fields)
-
-
-class AuthorityRecord(SfaRecord):
-    fields =  {
-        'name': Parameter(str, 'Name'),
-        'login_base': Parameter(str, 'login base'),
-        'enabled': Parameter(bool, 'Is this site enabled'),
-        'url': Parameter(str, 'URL'),
-        'nodes': Parameter([str], 'List of nodes at this site'),  
-        'operator': Parameter([str], 'List of operators'),
-        'researcher': Parameter([str], 'List of researchers'),
-        'PI': Parameter([str], 'List of Principal Investigators'),
-        }
-    fields.update(SfaRecord.fields)
-    
-
diff --git a/sfa/storage/row.py b/sfa/storage/row.py
deleted file mode 100644 (file)
index ef66844..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-
-class Row(dict):
-
-    # Set this to the name of the table that stores the row.
-    # e.g. table_name = "nodes"
-    table_name = None
-
-    # Set this to the name of the primary key of the table. It is
-    # assumed that the this key is a sequence if it is not set when
-    # sync() is called.
-    # e.g. primary_key="record_id"
-    primary_key = None
-
-    # Set this to the names of tables that reference this table's
-    # primary key.
-    join_tables = []
-
-    def validate(self):
-        """
-        Validates values. Will validate a value with a custom function
-        if a function named 'validate_[key]' exists.
-        """
-        # Warn about mandatory fields
-        # XX TODO: Support checking for mandatory fields later
-        #mandatory_fields = self.db.fields(self.table_name, notnull = True, hasdef = False)
-        #for field in mandatory_fields:
-        #    if not self.has_key(field) or self[field] is None:
-        #        raise SfaInvalidArgument, field + " must be specified and cannot be unset in class %s"%self.__class__.__name__
-
-        # Validate values before committing
-        for (key, value) in self.iteritems():
-            if value is not None and hasattr(self, 'validate_' + key):
-                validate = getattr(self, 'validate_' + key)
-                self[key] = validate(value)
-
-
-    def validate_timestamp(self, timestamp, check_future = False):
-        """
-        Validates the specified GMT timestamp string (must be in
-        %Y-%m-%d %H:%M:%S format) or number (seconds since UNIX epoch,
-        i.e., 1970-01-01 00:00:00 GMT). If check_future is True,
-        raises an exception if timestamp is not in the future. Returns
-        a GMT timestamp string.
-        """
-
-        time_format = "%Y-%m-%d %H:%M:%S"
-        if isinstance(timestamp, StringTypes):
-            # calendar.timegm() is the inverse of time.gmtime()
-            timestamp = calendar.timegm(time.strptime(timestamp, time_format))
-
-        # Human readable timestamp string
-        human = time.strftime(time_format, time.gmtime(timestamp))
-
-        if check_future and timestamp < time.time():
-            raise SfaInvalidArgument, "'%s' not in the future" % human
-
-        return human
-
diff --git a/sfa/storage/sfa.sql b/sfa/storage/sfa.sql
deleted file mode 100644 (file)
index 9a2792c..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
---
--- SFA database schema
---
-
-SET client_encoding = 'UNICODE';
-
---------------------------------------------------------------------------------
--- Version
---------------------------------------------------------------------------------
-
--- Database version
-CREATE TABLE sfa_db_version (
-    version integer NOT NULL,
-    subversion integer NOT NULL DEFAULT 0
-) WITH OIDS;
-
--- the migration scripts do not use the major 'version' number
--- so 5.0 sets subversion at 100
--- in case your database misses the site and persons tags feature, 
--- you might wish to first upgrade to 4.3-rc16 before moving to some 5.0
--- or run the up script here
--- http://svn.planet-lab.org/svn/PLCAPI/branches/4.3/migrations/
-
-INSERT INTO sfa_db_version (version, subversion) VALUES (1, 1);
-
---------------------------------------------------------------------------------
--- Aggregates and store procedures
---------------------------------------------------------------------------------
-
--- Like MySQL GROUP_CONCAT(), this function aggregates values into a
--- PostgreSQL array.
-CREATE AGGREGATE array_accum (
-    sfunc = array_append,
-    basetype = anyelement,
-    stype = anyarray,
-    initcond = '{}'
-);
-
--- Valid record types
-CREATE TABLE record_types (
-       record_type text PRIMARY KEY
-) WITH OIDS;
-INSERT INTO record_types (record_type) VALUES ('authority');
-INSERT INTO record_types (record_type) VALUES ('authority+sa');
-INSERT INTO record_types (record_type) VALUES ('authority+am');
-INSERT INTO record_types (record_type) VALUES ('authority+sm');
-INSERT INTO record_types (record_type) VALUES ('user');
-INSERT INTO record_types (record_type) VALUES ('slice');
-INSERT INTO record_types (record_type) VALUES ('node');
-
-
--- main table 
-CREATE TABLE records ( 
-    record_id serial PRIMARY KEY , 
-    hrn text NOT NULL, 
-    authority text NOT NULL, 
-    peer_authority text, 
-    gid text, 
-    type text REFERENCES record_types, 
-    pointer integer, 
-    date_created timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP, 
-    last_updated timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP
-);
-CREATE INDEX sfa_hrn_ids on records (hrn);
-CREATE INDEX sfa_type_ids on records (type);
-CREATE INDEX sfa_authority_ids on records (authority);
-CREATE INDEX sfa_peer_authority_ids on records (peer_authority);
-CREATE INDEX sfa_pointer_ids on records (pointer);
diff --git a/sfa/storage/table.py b/sfa/storage/table.py
deleted file mode 100644 (file)
index 602bcab..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-#
-# implements support for SFA records stored in db tables
-#
-# TODO: Use existing PLC database methods? or keep this separate?
-
-from types import StringTypes
-
-from sfa.util.config import Config
-
-from sfa.storage.parameter import Parameter
-from sfa.storage.filter import Filter
-from sfa.storage.PostgreSQL import PostgreSQL
-from sfa.storage.record import SfaRecord, AuthorityRecord, NodeRecord, SliceRecord, UserRecord
-
-class SfaTable(list):
-
-    SFA_TABLE_PREFIX = "records"
-
-    def __init__(self, record_filter = None):
-
-        # pgsql doesn't like table names with "." in them, to replace it with "$"
-        self.tablename = SfaTable.SFA_TABLE_PREFIX
-        self.config = Config()
-        self.db = PostgreSQL(self.config)
-
-        if record_filter:
-            records = self.find(record_filter)
-            for record in records:
-                self.append(record)             
-
-    def db_fields(self, obj=None):
-        
-        db_fields = self.db.fields(self.SFA_TABLE_PREFIX)
-        return dict( [ (key,value) for (key, value) in obj.iteritems() \
-                        if key in db_fields and
-                        self.is_writable(key, value, SfaRecord.fields)] )      
-
-    @staticmethod
-    def is_writable (key,value,dict):
-        # if not mentioned, assume it's writable (e.g. deleted ...)
-        if key not in dict: return True
-        # if mentioned but not linked to a Parameter object, idem
-        if not isinstance(dict[key], Parameter): return True
-        # if not marked ro, it's writable
-        if not dict[key].ro: return True
-
-        return False
-
-
-    def clear (self):
-        self.db.do("DELETE from %s"%self.tablename)
-        self.db.commit()
-
-    # what sfa-nuke does
-    def nuke (self):
-        self.clear()
-
-    def remove(self, record):
-        params = {'record_id': record['record_id']}
-        template = "DELETE FROM %s " % self.tablename
-        sql = template + "WHERE record_id = %(record_id)s"
-        self.db.do(sql, params)
-        
-        # if this is a site, remove all records where 'authority' == the 
-        # site's hrn
-        if record['type'] == 'authority':
-            params = {'authority': record['hrn']}
-            sql = template + "WHERE authority = %(authority)s"
-            self.db.do(sql, params)
-        self.db.commit() 
-
-    def insert(self, record):
-        db_fields = self.db_fields(record)
-        keys = db_fields.keys()
-        values = [self.db.param(key, value) for (key, value) in db_fields.iteritems()]
-        query_str = "INSERT INTO " + self.tablename + \
-                       "(" + ",".join(keys) + ") " + \
-                       "VALUES(" + ",".join(values) + ")"
-        self.db.do(query_str, db_fields)
-        self.db.commit()
-        result = self.find({'hrn': record['hrn'], 'type': record['type'], 'peer_authority': record['peer_authority']})
-        if not result:
-            record_id = None
-        elif isinstance(result, list):
-            record_id = result[0]['record_id']
-        else:
-            record_id = result['record_id']
-
-        return record_id
-
-    def update(self, record):
-        db_fields = self.db_fields(record)
-        keys = db_fields.keys()
-        values = [self.db.param(key, value) for (key, value) in db_fields.iteritems()]
-        columns = ["%s = %s" % (key, value) for (key, value) in zip(keys, values)]
-        query_str = "UPDATE %s SET %s WHERE record_id = %s" % \
-                    (self.tablename, ", ".join(columns), record['record_id'])
-        self.db.do(query_str, db_fields)
-        self.db.commit()
-
-    def quote_string(self, value):
-        return str(self.db.quote(value))
-
-    def quote(self, value):
-        return self.db.quote(value)
-
-    def find(self, record_filter = None, columns=None):
-        if not columns:
-            columns = "*"
-        else:
-            columns = ",".join(columns)
-        sql = "SELECT %s FROM %s WHERE True " % (columns, self.tablename)
-        
-        if isinstance(record_filter, (list, tuple, set)):
-            ints = filter(lambda x: isinstance(x, (int, long)), record_filter)
-            strs = filter(lambda x: isinstance(x, StringTypes), record_filter)
-            record_filter = Filter(SfaRecord.all_fields, {'record_id': ints, 'hrn': strs})
-            sql += "AND (%s) %s " % record_filter.sql("OR") 
-        elif isinstance(record_filter, dict):
-            record_filter = Filter(SfaRecord.all_fields, record_filter)        
-            sql += " AND (%s) %s" % record_filter.sql("AND")
-        elif isinstance(record_filter, StringTypes):
-            record_filter = Filter(SfaRecord.all_fields, {'hrn':[record_filter]})    
-            sql += " AND (%s) %s" % record_filter.sql("AND")
-        elif isinstance(record_filter, int):
-            record_filter = Filter(SfaRecord.all_fields, {'record_id':[record_filter]})    
-            sql += " AND (%s) %s" % record_filter.sql("AND")
-
-        results = self.db.selectall(sql)
-        if isinstance(results, dict):
-            results = [results]
-        return results
-
-    def findObjects(self, record_filter = None, columns=None):
-        
-        results = self.find(record_filter, columns) 
-        result_rec_list = []
-        for result in results:
-            if result['type'] in ['authority']:
-                result_rec_list.append(AuthorityRecord(dict=result))
-            elif result['type'] in ['node']:
-                result_rec_list.append(NodeRecord(dict=result))
-            elif result['type'] in ['slice']:
-                result_rec_list.append(SliceRecord(dict=result))
-            elif result['type'] in ['user']:
-                result_rec_list.append(UserRecord(dict=result))
-            else:
-                result_rec_list.append(SfaRecord(dict=result))
-        return result_rec_list
-
-
index 733884e..ad2d201 100644 (file)
@@ -280,6 +280,7 @@ class Credential(object):
             self.decode()
         return self.gidObject.get_printable_subject()
 
+    # sounds like this should be __repr__ instead ??
     def get_summary_tostring(self):
         if not self.gidObject:
             self.decode()
index 656de4b..470757b 100644 (file)
-#----------------------------------------------------------------------
-# Copyright (c) 2008 Board of Trustees, Princeton University
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and/or hardware specification (the "Work") to
-# deal in the Work without restriction, including without limitation the
-# rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Work, and to permit persons to whom the Work
-# is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Work.
-#
-# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 
-# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 
-# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS 
-# IN THE WORK.
-#----------------------------------------------------------------------
-##
-# Implements SFA GID. GIDs are based on certificates, and the GID class is a
-# descendant of the certificate class.
-##
-
-import xmlrpclib
-import uuid
-
-from sfa.trust.certificate import Certificate
-
-from sfa.util.faults import GidInvalidParentHrn, GidParentHrn
-from sfa.util.sfalogging import logger
-from sfa.util.xrn import hrn_to_urn, urn_to_hrn, hrn_authfor_hrn
-
-##
-# Create a new uuid. Returns the UUID as a string.
-
-def create_uuid():
-    return str(uuid.uuid4().int)
-
-##
-# GID is a tuple:
-#    (uuid, urn, public_key)
-#
-# UUID is a unique identifier and is created by the python uuid module
-#    (or the utility function create_uuid() in gid.py).
-#
-# HRN is a human readable name. It is a dotted form similar to a backward domain
-#    name. For example, planetlab.us.arizona.bakers.
-#
-# URN is a human readable identifier of form:
-#   "urn:publicid:IDN+toplevelauthority[:sub-auth.]*[\res. type]\ +object name"
-#   For  example, urn:publicid:IDN+planetlab:us:arizona+user+bakers      
-#
-# PUBLIC_KEY is the public key of the principal identified by the UUID/HRN.
-# It is a Keypair object as defined in the cert.py module.
-#
-# It is expected that there is a one-to-one pairing between UUIDs and HRN,
-# but it is uncertain how this would be inforced or if it needs to be enforced.
-#
-# These fields are encoded using xmlrpc into the subjectAltName field of the
-# x509 certificate. Note: Call encode() once the fields have been filled in
-# to perform this encoding.
-
-
-class GID(Certificate):
-    uuid = None
-    hrn = None
-    urn = None
-
-    ##
-    # Create a new GID object
-    #
-    # @param create If true, create the X509 certificate
-    # @param subject If subject!=None, create the X509 cert and set the subject name
-    # @param string If string!=None, load the GID from a string
-    # @param filename If filename!=None, load the GID from a file
-    # @param lifeDays life of GID in days - default is 1825==5 years
-
-    def __init__(self, create=False, subject=None, string=None, filename=None, uuid=None, hrn=None, urn=None, lifeDays=1825):
-        
-        Certificate.__init__(self, lifeDays, create, subject, string, filename)
-        if subject:
-            logger.debug("Creating GID for subject: %s" % subject)
-        if uuid:
-            self.uuid = int(uuid)
-        if hrn:
-            self.hrn = hrn
-            self.urn = hrn_to_urn(hrn, 'unknown')
-        if urn:
-            self.urn = urn
-            self.hrn, type = urn_to_hrn(urn)
-
-    def set_uuid(self, uuid):
-        if isinstance(uuid, str):
-            self.uuid = int(uuid)
-        else:
-            self.uuid = uuid
-
-    def get_uuid(self):
-        if not self.uuid:
-            self.decode()
-        return self.uuid
-
-    def set_hrn(self, hrn):
-        self.hrn = hrn
-
-    def get_hrn(self):
-        if not self.hrn:
-            self.decode()
-        return self.hrn
-
-    def set_urn(self, urn):
-        self.urn = urn
-        self.hrn, type = urn_to_hrn(urn)
-    def get_urn(self):
-        if not self.urn:
-            self.decode()
-        return self.urn            
-
-    def get_type(self):
-        if not self.urn:
-            self.decode()
-        _, t = urn_to_hrn(self.urn)
-        return t
-    
-    ##
-    # Encode the GID fields and package them into the subject-alt-name field
-    # of the X509 certificate. This must be called prior to signing the
-    # certificate. It may only be called once per certificate.
-
-    def encode(self):
-        if self.urn:
-            urn = self.urn
-        else:
-            urn = hrn_to_urn(self.hrn, None)
-            
-        str = "URI:" + urn
-
-        if self.uuid:
-            str += ", " + "URI:" + uuid.UUID(int=self.uuid).urn
-        
-        self.set_data(str, 'subjectAltName')
-
-        
-
-
-    ##
-    # Decode the subject-alt-name field of the X509 certificate into the
-    # fields of the GID. This is automatically called by the various get_*()
-    # functions in this class.
-
-    def decode(self):
-        data = self.get_data('subjectAltName')
-        dict = {}
-        if data:
-            if data.lower().startswith('uri:http://<params>'):
-                dict = xmlrpclib.loads(data[11:])[0][0]
-            else:
-                spl = data.split(', ')
-                for val in spl:
-                    if val.lower().startswith('uri:urn:uuid:'):
-                        dict['uuid'] = uuid.UUID(val[4:]).int
-                    elif val.lower().startswith('uri:urn:publicid:idn+'):
-                        dict['urn'] = val[4:]
-                    
-        self.uuid = dict.get("uuid", None)
-        self.urn = dict.get("urn", None)
-        self.hrn = dict.get("hrn", None)    
-        if self.urn:
-            self.hrn = urn_to_hrn(self.urn)[0]
-
-    ##
-    # Dump the credential to stdout.
-    #
-    # @param indent specifies a number of spaces to indent the output
-    # @param dump_parents If true, also dump the parents of the GID
-
-    def dump(self, *args, **kwargs):
-        print self.dump_string(*args,**kwargs)
-
-    def dump_string(self, indent=0, dump_parents=False):
-        result=" "*(indent-2) + "GID\n"
-        result += " "*indent + "hrn:" + str(self.get_hrn()) +"\n"
-        result += " "*indent + "urn:" + str(self.get_urn()) +"\n"
-        result += " "*indent + "uuid:" + str(self.get_uuid()) + "\n"
-        filename=self.get_filename()
-        if filename: result += "Filename %s\n"%filename
-
-        if self.parent and dump_parents:
-            result += " "*indent + "parent:\n"
-            result += self.parent.dump_string(indent+4, dump_parents)
-        return result
-
-    ##
-    # Verify the chain of authenticity of the GID. First perform the checks
-    # of the certificate class (verifying that each parent signs the child,
-    # etc). In addition, GIDs also confirm that the parent's HRN is a prefix
-    # of the child's HRN, and the parent is of type 'authority'.
-    #
-    # Verifying these prefixes prevents a rogue authority from signing a GID
-    # for a principal that is not a member of that authority. For example,
-    # planetlab.us.arizona cannot sign a GID for planetlab.us.princeton.foo.
-
-    def verify_chain(self, trusted_certs = None):
-        # do the normal certificate verification stuff
-        trusted_root = Certificate.verify_chain(self, trusted_certs)        
-       
-        if self.parent:
-            # make sure the parent's hrn is a prefix of the child's hrn
-            if not hrn_authfor_hrn(self.parent.get_hrn(), self.get_hrn()):
-                raise GidParentHrn("This cert HRN %s isn't in the namespace for parent HRN %s" % (self.get_hrn(), self.parent.get_hrn()))
-
-            # Parent must also be an authority (of some type) to sign a GID
-            # There are multiple types of authority - accept them all here
-            if not self.parent.get_type().find('authority') == 0:
-                raise GidInvalidParentHrn("This cert %s's parent %s is not an authority (is a %s)" % (self.get_hrn(), self.parent.get_hrn(), self.parent.get_type()))
-
-            # Then recurse up the chain - ensure the parent is a trusted
-            # root or is in the namespace of a trusted root
-            self.parent.verify_chain(trusted_certs)
-        else:
-            # make sure that the trusted root's hrn is a prefix of the child's
-            trusted_gid = GID(string=trusted_root.save_to_string())
-            trusted_type = trusted_gid.get_type()
-            trusted_hrn = trusted_gid.get_hrn()
-            #if trusted_type == 'authority':
-            #    trusted_hrn = trusted_hrn[:trusted_hrn.rindex('.')]
-            cur_hrn = self.get_hrn()
-            if not hrn_authfor_hrn(trusted_hrn, cur_hrn):
-                raise GidParentHrn("Trusted root with HRN %s isn't a namespace authority for this cert %s" % (trusted_hrn, cur_hrn))
-
-            # There are multiple types of authority - accept them all here
-            if not trusted_type.find('authority') == 0:
-                raise GidInvalidParentHrn("This cert %s's trusted root signer %s is not an authority (is a %s)" % (self.get_hrn(), trusted_hrn, trusted_type))
-
-        return
+#----------------------------------------------------------------------\r
+# Copyright (c) 2008 Board of Trustees, Princeton University\r
+#\r
+# Permission is hereby granted, free of charge, to any person obtaining\r
+# a copy of this software and/or hardware specification (the "Work") to\r
+# deal in the Work without restriction, including without limitation the\r
+# rights to use, copy, modify, merge, publish, distribute, sublicense,\r
+# and/or sell copies of the Work, and to permit persons to whom the Work\r
+# is furnished to do so, subject to the following conditions:\r
+#\r
+# The above copyright notice and this permission notice shall be\r
+# included in all copies or substantial portions of the Work.\r
+#\r
+# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS \r
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT \r
+# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
+# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS \r
+# IN THE WORK.\r
+#----------------------------------------------------------------------\r
+##\r
+# Implements SFA GID. GIDs are based on certificates, and the GID class is a\r
+# descendant of the certificate class.\r
+##\r
+\r
+import xmlrpclib\r
+import uuid\r
+\r
+from sfa.trust.certificate import Certificate\r
+\r
+from sfa.util.faults import GidInvalidParentHrn, GidParentHrn\r
+from sfa.util.sfalogging import logger\r
+from sfa.util.xrn import hrn_to_urn, urn_to_hrn, hrn_authfor_hrn\r
+\r
+##\r
+# Create a new uuid. Returns the UUID as a string.\r
+\r
+def create_uuid():\r
+    return str(uuid.uuid4().int)\r
+\r
+##\r
+# GID is a tuple:\r
+#    (uuid, urn, public_key)\r
+#\r
+# UUID is a unique identifier and is created by the python uuid module\r
+#    (or the utility function create_uuid() in gid.py).\r
+#\r
+# HRN is a human readable name. It is a dotted form similar to a backward domain\r
+#    name. For example, planetlab.us.arizona.bakers.\r
+#\r
+# URN is a human readable identifier of form:\r
+#   "urn:publicid:IDN+toplevelauthority[:sub-auth.]*[\res. type]\ +object name"\r
+#   For  example, urn:publicid:IDN+planetlab:us:arizona+user+bakers      \r
+#\r
+# PUBLIC_KEY is the public key of the principal identified by the UUID/HRN.\r
+# It is a Keypair object as defined in the cert.py module.\r
+#\r
+# It is expected that there is a one-to-one pairing between UUIDs and HRN,\r
+# but it is uncertain how this would be inforced or if it needs to be enforced.\r
+#\r
+# These fields are encoded using xmlrpc into the subjectAltName field of the\r
+# x509 certificate. Note: Call encode() once the fields have been filled in\r
+# to perform this encoding.\r
+\r
+\r
+class GID(Certificate):\r
+    uuid = None\r
+    hrn = None\r
+    urn = None\r
+    email = None # for adding to the SubjectAltName\r
+\r
+    ##\r
+    # Create a new GID object\r
+    #\r
+    # @param create If true, create the X509 certificate\r
+    # @param subject If subject!=None, create the X509 cert and set the subject name\r
+    # @param string If string!=None, load the GID from a string\r
+    # @param filename If filename!=None, load the GID from a file\r
+    # @param lifeDays life of GID in days - default is 1825==5 years\r
+\r
+    def __init__(self, create=False, subject=None, string=None, filename=None, uuid=None, hrn=None, urn=None, lifeDays=1825):\r
+        \r
+        Certificate.__init__(self, lifeDays, create, subject, string, filename)\r
+        if subject:\r
+            logger.debug("Creating GID for subject: %s" % subject)\r
+        if uuid:\r
+            self.uuid = int(uuid)\r
+        if hrn:\r
+            self.hrn = hrn\r
+            self.urn = hrn_to_urn(hrn, 'unknown')\r
+        if urn:\r
+            self.urn = urn\r
+            self.hrn, type = urn_to_hrn(urn)\r
+\r
+    def set_uuid(self, uuid):\r
+        if isinstance(uuid, str):\r
+            self.uuid = int(uuid)\r
+        else:\r
+            self.uuid = uuid\r
+\r
+    def get_uuid(self):\r
+        if not self.uuid:\r
+            self.decode()\r
+        return self.uuid\r
+\r
+    def set_hrn(self, hrn):\r
+        self.hrn = hrn\r
+\r
+    def get_hrn(self):\r
+        if not self.hrn:\r
+            self.decode()\r
+        return self.hrn\r
+\r
+    def set_urn(self, urn):\r
+        self.urn = urn\r
+        self.hrn, type = urn_to_hrn(urn)\r
\r
+    def get_urn(self):\r
+        if not self.urn:\r
+            self.decode()\r
+        return self.urn            \r
+\r
+    # Will be stuffed into subjectAltName\r
+    def set_email(self, email):\r
+        self.email = email\r
+\r
+    def get_email(self):\r
+        if not self.email:\r
+            self.decode()\r
+        return self.email\r
+\r
+    def get_type(self):\r
+        if not self.urn:\r
+            self.decode()\r
+        _, t = urn_to_hrn(self.urn)\r
+        return t\r
+    \r
+    ##\r
+    # Encode the GID fields and package them into the subject-alt-name field\r
+    # of the X509 certificate. This must be called prior to signing the\r
+    # certificate. It may only be called once per certificate.\r
+\r
+    def encode(self):\r
+        if self.urn:\r
+            urn = self.urn\r
+        else:\r
+            urn = hrn_to_urn(self.hrn, None)\r
+            \r
+        str = "URI:" + urn\r
+\r
+        if self.uuid:\r
+            str += ", " + "URI:" + uuid.UUID(int=self.uuid).urn\r
+        \r
+        if self.email:\r
+            str += ", " + "email:" + self.email\r
+\r
+        self.set_data(str, 'subjectAltName')\r
+\r
+        \r
+\r
+\r
+    ##\r
+    # Decode the subject-alt-name field of the X509 certificate into the\r
+    # fields of the GID. This is automatically called by the various get_*()\r
+    # functions in this class.\r
+\r
+    def decode(self):\r
+        data = self.get_data('subjectAltName')\r
+        dict = {}\r
+        if data:\r
+            if data.lower().startswith('uri:http://<params>'):\r
+                dict = xmlrpclib.loads(data[11:])[0][0]\r
+            else:\r
+                spl = data.split(', ')\r
+                for val in spl:\r
+                    if val.lower().startswith('uri:urn:uuid:'):\r
+                        dict['uuid'] = uuid.UUID(val[4:]).int\r
+                    elif val.lower().startswith('uri:urn:publicid:idn+'):\r
+                        dict['urn'] = val[4:]\r
+                    elif val.lower().startswith('email:'):\r
+                        # FIXME: Ensure there isn't cruft in that address...\r
+                        # EG look for email:copy,....\r
+                        dict['email'] = val[6:]\r
+                    \r
+        self.uuid = dict.get("uuid", None)\r
+        self.urn = dict.get("urn", None)\r
+        self.hrn = dict.get("hrn", None)\r
+        self.email = dict.get("email", None)\r
+        if self.urn:\r
+            self.hrn = urn_to_hrn(self.urn)[0]\r
+\r
+    ##\r
+    # Dump the credential to stdout.\r
+    #\r
+    # @param indent specifies a number of spaces to indent the output\r
+    # @param dump_parents If true, also dump the parents of the GID\r
+\r
+    def dump(self, *args, **kwargs):\r
+        print self.dump_string(*args,**kwargs)\r
+\r
+    def dump_string(self, indent=0, dump_parents=False):\r
+        result=" "*(indent-2) + "GID\n"\r
+        result += " "*indent + "hrn:" + str(self.get_hrn()) +"\n"\r
+        result += " "*indent + "urn:" + str(self.get_urn()) +"\n"\r
+        result += " "*indent + "uuid:" + str(self.get_uuid()) + "\n"\r
+        if self.get_email() is not None:\r
+            result += " "*indent + "email:" + str(self.get_email()) + "\n"\r
+        filename=self.get_filename()\r
+        if filename: result += "Filename %s\n"%filename\r
+\r
+        if self.parent and dump_parents:\r
+            result += " "*indent + "parent:\n"\r
+            result += self.parent.dump_string(indent+4, dump_parents)\r
+        return result\r
+\r
+    ##\r
+    # Verify the chain of authenticity of the GID. First perform the checks\r
+    # of the certificate class (verifying that each parent signs the child,\r
+    # etc). In addition, GIDs also confirm that the parent's HRN is a prefix\r
+    # of the child's HRN, and the parent is of type 'authority'.\r
+    #\r
+    # Verifying these prefixes prevents a rogue authority from signing a GID\r
+    # for a principal that is not a member of that authority. For example,\r
+    # planetlab.us.arizona cannot sign a GID for planetlab.us.princeton.foo.\r
+\r
+    def verify_chain(self, trusted_certs = None):\r
+        # do the normal certificate verification stuff\r
+        trusted_root = Certificate.verify_chain(self, trusted_certs)        \r
+       \r
+        if self.parent:\r
+            # make sure the parent's hrn is a prefix of the child's hrn\r
+            if not hrn_authfor_hrn(self.parent.get_hrn(), self.get_hrn()):\r
+                raise GidParentHrn("This cert HRN %s isn't in the namespace for parent HRN %s" % (self.get_hrn(), self.parent.get_hrn()))\r
+\r
+            # Parent must also be an authority (of some type) to sign a GID\r
+            # There are multiple types of authority - accept them all here\r
+            if not self.parent.get_type().find('authority') == 0:\r
+                raise GidInvalidParentHrn("This cert %s's parent %s is not an authority (is a %s)" % (self.get_hrn(), self.parent.get_hrn(), self.parent.get_type()))\r
+\r
+            # Then recurse up the chain - ensure the parent is a trusted\r
+            # root or is in the namespace of a trusted root\r
+            self.parent.verify_chain(trusted_certs)\r
+        else:\r
+            # make sure that the trusted root's hrn is a prefix of the child's\r
+            trusted_gid = GID(string=trusted_root.save_to_string())\r
+            trusted_type = trusted_gid.get_type()\r
+            trusted_hrn = trusted_gid.get_hrn()\r
+            #if trusted_type == 'authority':\r
+            #    trusted_hrn = trusted_hrn[:trusted_hrn.rindex('.')]\r
+            cur_hrn = self.get_hrn()\r
+            if not hrn_authfor_hrn(trusted_hrn, cur_hrn):\r
+                raise GidParentHrn("Trusted root with HRN %s isn't a namespace authority for this cert %s" % (trusted_hrn, cur_hrn))\r
+\r
+            # There are multiple types of authority - accept them all here\r
+            if not trusted_type.find('authority') == 0:\r
+                raise GidInvalidParentHrn("This cert %s's trusted root signer %s is not an authority (is a %s)" % (self.get_hrn(), trusted_hrn, trusted_type))\r
+\r
+        return\r
index 239240e..598ba56 100644 (file)
@@ -9,7 +9,6 @@
 # subdirectory are several files:
 #      *.GID - GID file
 #      *.PKEY - private key file
-#      *.DBINFO - database info
 ##
 
 import os
@@ -32,21 +31,18 @@ class AuthInfo:
     gid_object = None
     gid_filename = None
     privkey_filename = None
-    dbinfo_filename = None
     ##
     # Initialize and authority object.
     #
     # @param xrn the human readable name of the authority (urn will be converted to hrn)
     # @param gid_filename the filename containing the GID
     # @param privkey_filename the filename containing the private key
-    # @param dbinfo_filename the filename containing the database info
 
-    def __init__(self, xrn, gid_filename, privkey_filename, dbinfo_filename):
+    def __init__(self, xrn, gid_filename, privkey_filename):
         hrn, type = urn_to_hrn(xrn)
         self.hrn = hrn
         self.set_gid_filename(gid_filename)
         self.privkey_filename = privkey_filename
-        self.dbinfo_filename = dbinfo_filename
 
     ##
     # Set the filename of the GID
@@ -77,15 +73,6 @@ class AuthInfo:
     def get_pkey_object(self):
         return Keypair(filename = self.privkey_filename)
 
-    ##
-    # Get the dbinfo in the form of a dictionary
-
-    def get_dbinfo(self):
-        f = file(self.dbinfo_filename)
-        dict = eval(f.read())
-        f.close()
-        return dict
-
     ##
     # Replace the GID with a new one. The file specified by gid_filename is
     # overwritten with the new GID object
@@ -102,7 +89,7 @@ class AuthInfo:
 #
 # The tree is stored on disk in a hierarchical manner than reflects the
 # structure of the tree. Each authority is a subdirectory, and each subdirectory
-# contains the GID, pkey, and dbinfo files for that authority (as well as
+# contains the GID and pkey files for that authority (as well as
 # subdirectories for each sub-authority)
 
 class Hierarchy:
@@ -117,7 +104,7 @@ class Hierarchy:
             basedir = os.path.join(self.config.SFA_DATA_DIR, "authorities")
         self.basedir = basedir
     ##
-    # Given a hrn, return the filenames of the GID, private key, and dbinfo
+    # Given a hrn, return the filenames of the GID, private key
     # files.
     #
     # @param xrn the human readable name of the authority (urn will be convertd to hrn)
@@ -130,9 +117,8 @@ class Hierarchy:
 
         gid_filename = os.path.join(directory, leaf+".gid")
         privkey_filename = os.path.join(directory, leaf+".pkey")
-        dbinfo_filename = os.path.join(directory, leaf+".dbinfo")
 
-        return (directory, gid_filename, privkey_filename, dbinfo_filename)
+        return (directory, gid_filename, privkey_filename)
 
     ##
     # Check to see if an authority exists. An authority exists if it's disk
@@ -142,12 +128,10 @@ class Hierarchy:
 
     def auth_exists(self, xrn):
         hrn, type = urn_to_hrn(xrn) 
-        (directory, gid_filename, privkey_filename, dbinfo_filename) = \
+        (directory, gid_filename, privkey_filename) = \
             self.get_auth_filenames(hrn)
         
-        return os.path.exists(gid_filename) and \
-               os.path.exists(privkey_filename) and \
-               os.path.exists(dbinfo_filename)
+        return os.path.exists(gid_filename) and os.path.exists(privkey_filename) 
 
     ##
     # Create an authority. A private key for the authority and the associated
@@ -165,7 +149,7 @@ class Hierarchy:
         parent_urn = hrn_to_urn(parent_hrn, 'authority')
         if (parent_hrn) and (not self.auth_exists(parent_urn)) and (create_parents):
             self.create_auth(parent_urn, create_parents)
-        (directory, gid_filename, privkey_filename, dbinfo_filename) = \
+        (directory, gid_filename, privkey_filename,) = \
             self.get_auth_filenames(hrn)
 
         # create the directory to hold the files
@@ -186,13 +170,6 @@ class Hierarchy:
         gid = self.create_gid(xrn, create_uuid(), pkey)
         gid.save_to_file(gid_filename, save_parents=True)
 
-        # XXX TODO: think up a better way for the dbinfo to work
-
-        dbinfo = Config().get_plc_dbinfo()
-        dbinfo_file = file(dbinfo_filename, "w")
-        dbinfo_file.write(str(dbinfo))
-        dbinfo_file.close()
-
     def create_top_level_auth(self, hrn=None):
         """
         Create top level records (includes root and sub authorities (local/remote)
@@ -232,10 +209,10 @@ class Hierarchy:
             logger.warning("Hierarchy: missing authority - xrn=%s, hrn=%s"%(xrn,hrn))
             raise MissingAuthority(hrn)
 
-        (directory, gid_filename, privkey_filename, dbinfo_filename) = \
+        (directory, gid_filename, privkey_filename, ) = \
             self.get_auth_filenames(hrn)
 
-        auth_info = AuthInfo(hrn, gid_filename, privkey_filename, dbinfo_filename)
+        auth_info = AuthInfo(hrn, gid_filename, privkey_filename)
 
         # check the GID and see if it needs to be refreshed
         gid = auth_info.get_gid_object()
index db88123..eb0bb74 100644 (file)
@@ -73,6 +73,7 @@ def determine_rights(type, name):
         rl.add("bind")
         rl.add("control")
         rl.add("info")
+# wouldn't that be authority+cm instead ?
     elif type == "component":
         rl.add("operator")
     return rl
index f114a2d..eaf2aea 100644 (file)
@@ -82,15 +82,6 @@ class Config:
         else:
             return "plc"
 
-    def get_plc_dbinfo(self):
-        return {
-            'dbname' : self.SFA_DB_NAME,
-            'address' : self.SFA_DB_HOST,
-            'port' : self.SFA_DB_PORT,
-            'user' : self.SFA_DB_USER,
-            'password' : self.SFA_DB_PASSWORD
-            }
-
     # TODO: find a better place to put this method
     def get_max_aggrMgr_info(self):
         am_apiclient_path = '/usr/local/MAXGENI_AM_APIClient'
diff --git a/sfa/util/osxrn.py b/sfa/util/osxrn.py
new file mode 100644 (file)
index 0000000..752feec
--- /dev/null
@@ -0,0 +1,27 @@
+import re
+from sfa.util.xrn import Xrn
+from sfa.util.config import Config
+
+class OSXrn(Xrn):
+
+    def __init__(self, name=None, type=None, **kwds):
+        
+        config = Config()
+        if name is not None:
+            self.type = type
+            self.hrn = config.SFA_INTERFACE_HRN + "." + name
+            self.hrn_to_urn()
+        else:
+            Xrn.__init__(self, **kwds)   
+         
+        self.name = self.get_name() 
+    
+    def get_name(self):
+        self._normalize()
+        leaf = self.leaf
+        sliver_id_parts = leaf.split(':')
+        name = sliver_id_parts[0]
+        name = re.sub('[^a-zA-Z0-9_]', '', name)
+        return name
+
+    
index 6f3668f..d1a4e6c 100644 (file)
@@ -42,7 +42,7 @@ try:
 
 except:
     
-    from sfa.util.logging import logger
+    from sfa.util.sfalogging import logger
     def run_sfatables (_,__,___, rspec, ____=None):
         logger.warning("Cannot import sfatables.runtime, please install package sfa-sfatables")
         return rspec
index 2679512..a21b10c 100644 (file)
@@ -33,7 +33,7 @@ def hrn_to_urn(hrn,type): return Xrn(hrn, type=type).urn
 def hrn_authfor_hrn(parenthrn, hrn): return Xrn.hrn_is_auth_for_hrn(parenthrn, hrn)
 
 def urn_to_sliver_id(urn, slice_id, node_id, index=0):
-    return ":".join(map(str, [urn, slice_id, node_id, index]))
+    return Xrn(urn).get_sliver_id(slice_id, node_id, index)
 
 class Xrn:
 
@@ -133,6 +133,13 @@ class Xrn:
 #        if not type:
 #            debug_logger.debug("type-less Xrn's are not safe")
 
+    def __repr__ (self):
+        result="<XRN u=%s h=%s"%(self.urn,self.hrn)
+        if hasattr(self,'leaf'): result += " leaf=%s"%self.leaf
+        if hasattr(self,'authority'): result += " auth=%s"%self.authority
+        result += ">"
+        return result
+
     def get_urn(self): return self.urn
     def get_hrn(self): return self.hrn
     def get_type(self): return self.type
@@ -153,14 +160,18 @@ class Xrn:
         self._normalize()
         return self.leaf
 
-    def get_authority_hrn(self): 
+    def get_authority_hrn(self):
         self._normalize()
         return '.'.join( self.authority )
     
     def get_authority_urn(self): 
         self._normalize()
         return ':'.join( [Xrn.unescape(x) for x in self.authority] )
-    
+   
+    def get_sliver_id(self, slice_id, node_id, index=0):
+        self._normalize()
+        return ":".join(map(str, [self.get_urn(), slice_id, node_id, index])) 
     def urn_to_hrn(self):
         """
         compute tuple (hrn, type) from urn
index 875b139..1b9a466 100755 (executable)
@@ -12,7 +12,7 @@ from testXrn import *
 from testKeypair import *
 # xxx broken-test
 #from testHierarchy import *
-from testRecord import *
+from testStorage import *
 
 if __name__ == "__main__":
     unittest.main()
similarity index 58%
rename from tests/testRecord.py
rename to tests/testStorage.py
index f931bda..8324e9b 100755 (executable)
@@ -1,14 +1,14 @@
 import unittest
 from sfa.trust.gid import *
 from sfa.util.config import *
-from sfa.storage.record import *
+from sfa.storage.model import RegRecord
 
-class TestRecord(unittest.TestCase):
+class TestStorage(unittest.TestCase):
     def setUp(self):
         pass
 
     def testCreate(self):
-        r = SfaRecord()
+        r = RegRecord(type='authority',hrn='foo.bar')
 
 if __name__ == "__main__":
     unittest.main()