Merge branch 'master' into senslab2
authorSandrine Avakian <sandrine.avakian@inria.fr>
Mon, 16 Jan 2012 14:17:48 +0000 (15:17 +0100)
committerSandrine Avakian <sandrine.avakian@inria.fr>
Mon, 16 Jan 2012 14:17:48 +0000 (15:17 +0100)
Conflicts:
sfa/managers/registry_manager.py

42 files changed:
Makefile
init.d/sfa
setup.py
sfa.spec
sfa/client/sfaclientlib.py
sfa/client/sfascan.py
sfa/client/sfi.py
sfa/clientbin/Makefile [moved from sfa/client/Makefile with 100% similarity]
sfa/generic/pl.py
sfa/importer/sfa-import-openstack.py [new file with mode: 0755]
sfa/importer/sfa-import-plc.py
sfa/importer/sfaImport.py
sfa/managers/aggregate_manager.py
sfa/managers/aggregate_manager_openstack.py [new file with mode: 0644]
sfa/managers/registry_manager.py
sfa/managers/registry_manager_openstack.py [new file with mode: 0644]
sfa/managers/slice_manager.py
sfa/openstack/__init__.py [new file with mode: 0644]
sfa/openstack/openstack_driver.py [new file with mode: 0644]
sfa/openstack/openstack_shell.py [new file with mode: 0644]
sfa/plc/plaggregate.py
sfa/plc/pldriver.py
sfa/plc/plshell.py
sfa/plc/plslices.py
sfa/rspecs/baseversion.py
sfa/rspecs/elements/login.py
sfa/rspecs/elements/versions/pgv2Node.py
sfa/rspecs/elements/versions/pgv2SliverType.py
sfa/rspecs/elements/versions/sfav1Node.py
sfa/rspecs/pg_rspec_converter.py
sfa/rspecs/rspec.py
sfa/rspecs/version_manager.py
sfa/rspecs/versions/pgv2.py
sfa/rspecs/versions/pgv3.py
sfa/rspecs/versions/sfav1.py
sfa/server/sfaapi.py
sfa/storage/record.py
sfa/storage/row.py
sfa/trust/certificate.py
sfa/trust/credential.py
sfa/util/sfatime.py
sfa/util/xml.py

index 4228020..a21ce8f 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -138,31 +138,36 @@ BINS =    ./config/sfa-config-tty ./config/gen-sfa-cm-config.py \
        ./sfa/importer/sfa-import-plc.py ./sfa/importer/sfa-nuke-plc.py ./sfa/server/sfa-start.py \
        $(CLIENTS)
 
-sync:
+synccheck: 
 ifeq (,$(SSHURL))
-       @echo "sync: You must define, either PLC, or PLCHOST & GUEST, on the command line"
+       @echo "*sync: You must define, either PLC, or PLCHOST & GUEST, on the command line"
        @echo "  e.g. make sync PLC=private.one-lab.org"
        @echo "  or   make sync PLCHOST=testbox1.inria.fr GUEST=vplc03.inria.fr"
        @exit 1
-else
-       +$(RSYNC) ./sfa/ $(SSHURL)/usr/lib\*/python2.\*/site-packages/sfa/
+endif
+
+sync: synccheck
+       +$(RSYNC) --relative ./sfa/ $(SSHURL)/usr/lib\*/python2.\*/site-packages/
        +$(RSYNC) ./tests/ $(SSHURL)/root/tests-sfa
        +$(RSYNC)  $(BINS) $(SSHURL)/usr/bin/
        +$(RSYNC) ./init.d/sfa  $(SSHURL)/etc/init.d/
        +$(RSYNC) ./config/default_config.xml $(SSHURL)/etc/sfa/
        +$(RSYNC) ./sfa/storage/sfa.sql $(SSHURL)/usr/share/sfa/
        $(SSHCOMMAND) exec service sfa restart
-endif
 
 # 99% of the time this is enough
-fastsync:
-       +$(RSYNC) ./sfa/ $(SSHURL)/usr/lib\*/python2.\*/site-packages/sfa/
+fastsync: synccheck
+       +$(RSYNC) --relative ./sfa/ $(SSHURL)/usr/lib\*/python2.\*/site-packages/
        $(SSHCOMMAND) exec service sfa restart
 
-clientsync:
+clientsync: synccheck
        +$(RSYNC)  $(BINS) $(SSHURL)/usr/bin/
 
-.PHONY: sync fastsync clientsync
+ricasync: synccheck
+       +$(RSYNC) --relative ./sfa/fd ./sfa/generic/fd.py ./sfa/rspecs/versions/federica.py $(SSHURL)/usr/lib\*/python2.\*/site-packages/
+       $(SSHCOMMAND) exec service sfa restart
+
+.PHONY: synccheck sync fastsync clientsync ricasync
 
 ##########
 CLIENTLIBFILES= \
index 3e733ee..32cc1a0 100755 (executable)
@@ -126,7 +126,7 @@ function db_start () {
        ( egrep -v '^(PGDATA=|PGLOG=|PGPORT=)' $postgresql_sysconfig 
            echo "PGDATA=$PGDATA"
            echo "PGLOG=/var/log/pgsql"
-           echo "PGPORT=$PLC_DB_PORT"
+           echo "PGPORT=$SFA_DB_PORT"
        ) >> $tmp ; mv -f $tmp $postgresql_sysconfig
 
        ######## /var/lib/pgsql/data 
@@ -167,6 +167,10 @@ function db_start () {
            echo "host $SFA_DB_NAME $SFA_DB_USER 127.0.0.1/32 password"
            [ -n "$registry_ip" ] && echo "host $SFA_DB_NAME $SFA_DB_USER ${registry_ip}/32 password"
        ) >>$pghba_conf
+
+    if [ "$SFA_GENERIC_FLAVOUR" == "openstack" ] ; then
+        [ -n "$registry_ip" ] && echo "host nova nova ${registry_ip}/32 password" >> $pghba_conf
+    fi   
        
        # Fix ownership (sed -i changes it)
        chown postgres:postgres $postgresql_conf $pghba_conf
index a8740fc..41331de 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -26,6 +26,7 @@ scripts = glob("sfa/clientbin/*.py") + \
 
 packages = [
     'sfa', 
+    'sfa/openstack',
     'sfa/trust',
     'sfa/storage',
     'sfa/util', 
index 6a59992..90f7677 100644 (file)
--- a/sfa.spec
+++ b/sfa.spec
@@ -1,6 +1,6 @@
 %define name sfa
 %define version 2.0
-%define taglevel 6
+%define taglevel 9
 
 %define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}}
 %global python_sitearch        %( python -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)" )
@@ -224,6 +224,31 @@ fi
 [ "$1" -ge "1" ] && service sfa-cm restart || :
 
 %changelog
+* Sat Jan 7 2012 Tony Mack <tmack@cs.princeton.edu> - sfa-2.0-9
+- bugfix: 'geni_api' should be in the top level struct, not the code struct
+- bugfix: Display the correct host and port in 'geni_api_versions' field of the GetVersion
+          output returned by the Aggregate Manager.
+- bugfix: sfa.util.sfatime now handles numeric string inputs correctly.
+- bugfix: sfa.util.sfatime.datetime_to_epoch() returns integers instead of doubles.
+- bugfix: Fixed bug that prevented the rspec parser from identifying an rspec's schema when
+          there is extra whitespace in the schemaLocation field.
+- bugfix: Fixed bug that caused PlanetLab initscripts from showing up in the PGv2 and GENIv3 
+          advertisement rspecs.
+- bugfix: <login> RSpec element should contain the 'username' attribute.
+- bugfix: Use sfa.util.plxrn.PlXrn to parse the login_base (authority) out of a urn.      
+* Wed Jan 4 2012 Tony Mack <tmack@cs.princeton.edu> - sfa-2.0-8
+- bugfix: Fixed a bug in the sfa-import-plc.py script that caused the script to 
+  exit when it encountered a user with an invalid public key.
+- server: imporved logging in sfa-import-plc.py
+* Tue Jan 3 2012 Tony Mack <tmack@cs.princeton.edu> - sfa-2.0-7
+- bugfix: Fixed appending public keys in CreateSliver
+- bugfix: Fixed various bugs in the PGv2/GENIv3 request, advertisement and manifest rspecs.
+- client: -c --current option allows users to request the current/uncached rspec.
+- server: Added 'geni_api_versions' field to GetVersion() output.
+- server: Moved PLC specific code from sfa.importer.sfaImport to sfa.importer.sfa-import-plc.
+   
 * Fri Dec 16 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.0-6
 - bugfix: sfi was not sending call_id with ListResources to v2 servers
 - SFA_API_DEBUG replaced with SFA_API_LOGLEVEL
index 1253267..3f6f6bc 100644 (file)
@@ -3,6 +3,11 @@
 # a minimal library for writing "lightweight" SFA clients
 #
 
+# xxx todo
+# this library should probably check for the expiration date of the various
+# certificates and automatically retrieve fresh ones when expired
+
+import sys
 import os,os.path
 
 import sfa.util.sfalogging
@@ -276,9 +281,12 @@ class SfaClientBootstrap:
                     return filename
                 except IOError:
                     raise 
-                except:
-                    self.logger.log_exc("Could not produce/retrieve %s"%filename)
-                    raise Exception, "Could not produce/retrieve %s"%filename
+                except :
+                    error = sys.exc_info()[:2]
+                    message="Could not produce/retrieve %s (%s -- %s)"%\
+                        (filename,error[0],error[1])
+                    self.logger.log_exc(message)
+                    raise Exception, message
             return wrapped
         return wrap
 
index fdfa580..f7f5dda 100644 (file)
@@ -12,6 +12,7 @@ except:
 
 from optparse import OptionParser
 
+from sfa.client.return_value import ReturnValue
 from sfa.client.sfi import Sfi
 from sfa.util.sfalogging import logger, DEBUG
 from sfa.client.sfaserverproxy import SfaServerProxy
@@ -36,6 +37,7 @@ def url_hostname_port (url):
 ### assuming everything is sequential, as simple as it gets
 ### { url -> (timestamp,version)}
 class VersionCache:
+    # default expiration period is 1h
     def __init__ (self, filename=None, expires=60*60):
         # default is to store cache in the same dir as argv[0]
         if filename is None:
@@ -102,23 +104,43 @@ class VersionCache:
             return None
 
 ###
+# non-existing hostnames happen...
+# for better perfs we cache the result of gethostbyname too
 class Interface:
 
-    def __init__ (self,url,verbose=False):
+    def __init__ (self,url,mentioned_in=None,verbose=False):
         self._url=url
         self.verbose=verbose
+        cache=VersionCache()
+        key="interface:%s"%url
         try:
             (self._url,self.hostname,self.port)=url_hostname_port(url)
-            self.ip=socket.gethostbyname(self.hostname)
-            self.probed=False
+            # look for ip in the cache
+            tuple=cache.get(key)
+            if tuple:
+                (self.hostname, self.ip, self.port) = tuple
+            else:
+                self.ip=socket.gethostbyname(self.hostname)
         except:
+            msg="can't resolve hostname %s\n\tfound in url %s"%(self.hostname,self._url)
+            if mentioned_in:
+                msg += "\n\t(mentioned at %s)"%mentioned_in
+            logger.warning (msg)
             self.hostname="unknown"
             self.ip='0.0.0.0'
             self.port="???"
+
+        cache.set(key, (self.hostname, self.ip, self.port,) )
+        cache.save()
+        self.probed=False
+
+        # mark unknown interfaces as probed to avoid unnecessary attempts
+        if self.hostname=='unknown':
             # don't really try it
             self.probed=True
             self._version={}
 
+
     def url(self):
         return self._url
 
@@ -135,7 +157,7 @@ class Interface:
         logger.debug("searching in version cache %s"%self.url())
         cached_version = VersionCache().get(self.url())
         if cached_version is not None:
-            logger.info("Retrieved version info from cache")
+            logger.info("Retrieved version info from cache %s"%self.url())
             return cached_version
         ### otherwise let's do the hard work
         # dummy to meet Sfi's expectations for its 'options' field
@@ -147,15 +169,16 @@ class Interface:
         try:
             client=Sfi(options)
             client.read_config()
-            key_file = client.get_key_file()
-            cert_file = client.get_cert_file(key_file)
+            client.bootstrap()
+            key_file = client.private_key
+            cert_file = client.my_gid
             logger.debug("using key %s & cert %s"%(key_file,cert_file))
             url=self.url()
             logger.info('issuing GetVersion at %s'%url)
             # setting timeout here seems to get the call to fail - even though the response time is fast
             #server=SfaServerProxy(url, key_file, cert_file, verbose=self.verbose, timeout=options.timeout)
             server=SfaServerProxy(url, key_file, cert_file, verbose=self.verbose)
-            self._version=server.GetVersion()
+            self._version=ReturnValue.get_value(server.GetVersion())
         except:
             logger.log_exc("failed to get version")
             self._version={}
@@ -209,8 +232,10 @@ class Interface:
         try: shape=Interface.shapes[version['interface']]
         except: shape=Interface.shapes['default']
         layout['shape']=shape
-        ### fill color to outline wrongly configured bodies
-        if 'geni_api' not in version and 'sfa' not in version:
+        ### fill color to outline wrongly configured or unreachable bodies
+        # as of sfa-2.0 registry doesn't have 'sfa' not 'geni_api', but have peers
+        # slicemgr and aggregate have 'geni_api' and 'sfa'
+        if 'geni_api' not in version and 'peers' not in version:
             layout['style']='filled'
             layout['fillcolor']='gray'
         return layout
@@ -259,12 +284,10 @@ class Scanner:
                             logger.debug(k)
                             for (k1,v1) in v.iteritems():
                                 logger.debug("\r\t\t%s:%s"%(k1,v1))
-                # 'geni_api' is expected if the call succeeded at all
-                # 'peers' is needed as well as AMs typically don't have peers
-                if 'geni_api' in version and 'peers' in version: 
-                    # proceed with neighbours
+                # proceed with neighbours
+                if 'peers' in version: 
                     for (next_name,next_url) in version['peers'].iteritems():
-                        next_interface=Interface(next_url)
+                        next_interface=Interface(next_url,mentioned_in=interface.url())
                         # locate or create node in graph
                         try:
                             # if found, we're good with this one
@@ -331,7 +354,7 @@ class SfaScan:
         if not options.outfiles:
             options.outfiles=SfaScan.default_outfiles
         scanner=Scanner(left_to_right=options.left_to_right, verbose=bool_verbose)
-        entries = [ Interface(entry) for entry in args ]
+        entries = [ Interface(entry,mentioned_in="command line") for entry in args ]
         try:
             g=scanner.graph(entries)
             logger.info("creating layout")
index 9ed3c91..ca54c93 100644 (file)
@@ -165,7 +165,8 @@ def unique_call_id(): return uuid.uuid4().urn
 
 class Sfi:
     
-    required_options=['verbose',  'debug',  'registry',  'sm',  'auth',  'user']
+    # dirty hack to make this class usable from the outside
+    required_options=['verbose',  'debug',  'registry',  'sm',  'auth',  'user', 'user_private_key']
 
     @staticmethod
     def default_sfi_dir ():
@@ -255,8 +256,6 @@ class Sfi:
         # user specifies remote aggregate/sm/component                          
         if command in ("resources", "slices", "create", "delete", "start", "stop", 
                        "restart", "shutdown",  "get_ticket", "renew", "status"):
-            parser.add_option("-c", "--component", dest="component", default=None,
-                             help="component hrn")
             parser.add_option("-d", "--delegate", dest="delegate", default=None, 
                              action="store_true",
                              help="Include a credential delegated to the user's root"+\
@@ -268,10 +267,15 @@ class Sfi:
                             help="type filter ([all]|user|slice|authority|node|aggregate)",
                             choices=("all", "user", "slice", "authority", "node", "aggregate"),
                             default="all")
-        # display formats
         if command in ("resources"):
+            # rspec version
             parser.add_option("-r", "--rspec-version", dest="rspec_version", default="SFA 1",
                               help="schema type and version of resulting RSpec")
+            # disable/enable cached rspecs
+            parser.add_option("-c", "--current", dest="current", default=False,
+                              action="store_true",  
+                              help="Request the current rspec bypassing the cache. Cached rspecs are returned by default")
+            # display formats
             parser.add_option("-f", "--format", dest="format", type="choice",
                              help="display format ([xml]|dns|ip)", default="xml",
                              choices=("xml", "dns", "ip"))
@@ -551,6 +555,8 @@ class Sfi:
                 self.sliceapi_proxy=SfaServerProxy(cm_url, self.private_key, self.my_gid)
             else:
                 # otherwise use what was provided as --sliceapi, or SFI_SM in the config
+                if not self.sm_url.startswith('http://') or self.sm_url.startswith('https://'):
+                    self.sm_url = 'http://' + self.sm_url
                 self.logger.info("Contacting Slice Manager at: %s"%self.sm_url)
                 self.sliceapi_proxy = SfaServerProxy(self.sm_url, self.private_key, self.my_gid, 
                                                      timeout=self.options.timeout, verbose=self.options.debug)  
@@ -815,42 +821,35 @@ or with an slice hrn, shows currently provisioned resources
             creds.append(self.my_credential_string)
         if options.delegate:
             creds.append(self.delegate_cred(cred, get_authority(self.authority)))
-        
-        # V2 API
-        if self.server_supports_options_arg(server):
-            # with v2 everything goes in options inclusing the subject slice
-            api_options = {}
-            if args:
-                hrn = args[0]
-                api_options['geni_slice_urn'] = hrn_to_urn(hrn, 'slice')
-            if options.info:
-                api_options['info'] = options.info
-            if options.rspec_version:
-                version_manager = VersionManager()
-                server_version = self.get_cached_server_version(server)
-                if 'sfa' in server_version:
-                    # just request the version the client wants
-                    api_options['geni_rspec_version'] = version_manager.get_version(options.rspec_version).to_dict()
-                else:
-                    # this must be a protogeni aggregate. We should request a v2 ad rspec
-                    # regardless of what the client user requested
-                    api_options['geni_rspec_version'] = version_manager.get_version('ProtoGENI 2').to_dict() 
+       
+        # no need to check if server accepts the options argument since the options has
+        # been a required argument since v1 API   
+        api_options = {}
+        # always send call_id to v2 servers
+        api_options ['call_id'] = unique_call_id()
+        # ask for cached value if available
+        api_options ['cached'] = True
+        if args:
+            hrn = args[0]
+            api_options['geni_slice_urn'] = hrn_to_urn(hrn, 'slice')
+        if options.info:
+            api_options['info'] = options.info
+        if options.current:
+            if options.current == True:
+                api_options['cached'] = False
+            else:
+                api_options['cached'] = True
+        if options.rspec_version:
+            version_manager = VersionManager()
+            server_version = self.get_cached_server_version(server)
+            if 'sfa' in server_version:
+                # just request the version the client wants
+                api_options['geni_rspec_version'] = version_manager.get_version(options.rspec_version).to_dict()
             else:
                 api_options['geni_rspec_version'] = {'type': 'geni', 'version': '3.0'}    
-            # always send call_id to v2 servers
-            api_options ['call_id'] = unique_call_id()
-            # the V2 form
-            result = server.ListResources (creds, api_options)
-        # V1
         else:
-            # with an argument
-            if args:
-                hrn = args[0]
-                # xxx looks like we can pass a hrn and not a urn here ??
-                # last arg. is a raw call_id when supported
-                result = server.ListResources (creds, hrn, *self.cis(server))
-            else:
-                result = server.ListResources (creds, *self.cis(server))
+            api_options['geni_rspec_version'] = {'type': 'geni', 'version': '3.0'}    
+        result = server.ListResources (creds, api_options)
         value = ReturnValue.get_value(result)
         if options.file is None:
             display_rspec(value, options.format)
similarity index 100%
rename from sfa/client/Makefile
rename to sfa/clientbin/Makefile
index 2c03619..92b7266 100644 (file)
@@ -1,27 +1,27 @@
 from sfa.generic import Generic
 
-import sfa.server.sfaapi
-import sfa.plc.pldriver
-import sfa.managers.registry_manager
-import sfa.managers.slice_manager
-import sfa.managers.aggregate_manager
 
 class pl (Generic):
     
     # use the standard api class
     def api_class (self):
+        import sfa.server.sfaapi
         return sfa.server.sfaapi.SfaApi
 
     # the manager classes for the server-side services
     def registry_manager_class (self) : 
+        import sfa.managers.registry_manager
         return sfa.managers.registry_manager.RegistryManager
     def slicemgr_manager_class (self) : 
+        import sfa.managers.slice_manager
         return sfa.managers.slice_manager.SliceManager
     def aggregate_manager_class (self) :
+        import sfa.managers.aggregate_manager
         return sfa.managers.aggregate_manager.AggregateManager
 
     # driver class for server-side services, talk to the whole testbed
     def driver_class (self):
+        import sfa.plc.pldriver
         return sfa.plc.pldriver.PlDriver
 
     # for the component mode, to be run on board planetlab nodes
diff --git a/sfa/importer/sfa-import-openstack.py b/sfa/importer/sfa-import-openstack.py
new file mode 100755 (executable)
index 0000000..ec785e8
--- /dev/null
@@ -0,0 +1,161 @@
+#!/usr/bin/python
+#
+##
+# Import PLC records into the SFA database. It is indended that this tool be
+# run once to create SFA records that reflect the current state of the
+# planetlab database.
+#
+# The import tool assumes that the existing PLC hierarchy should all be part
+# of "planetlab.us" (see the root_auth and level1_auth variables below).
+#
+# Public keys are extracted from the users' SSH keys automatically and used to
+# create GIDs. This is relatively experimental as a custom tool had to be
+# written to perform conversion from SSH to OpenSSL format. It only supports
+# RSA keys at this time, not DSA keys.
+##
+
+import os
+import getopt
+import sys
+
+from sfa.util.config import Config
+from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
+from sfa.util.plxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_pl_slicename
+from sfa.storage.table import SfaTable
+from sfa.storage.record import SfaRecord
+from sfa.trust.certificate import convert_public_key, Keypair
+from sfa.trust.gid import create_uuid
+from sfa.importer.sfaImport import sfaImport, _cleanup_string
+from sfa.util.sfalogging import logger
+from sfa.openstack.openstack_shell import OpenstackShell    
+
+def process_options():
+
+   (options, args) = getopt.getopt(sys.argv[1:], '', [])
+   for opt in options:
+       name = opt[0]
+       val = opt[1]
+
+
+def load_keys(filename):
+    keys = {}
+    tmp_dict = {}
+    try:
+        execfile(filename, tmp_dict)
+        if 'keys' in tmp_dict:
+            keys = tmp_dict['keys']
+        return keys
+    except:
+        return keys
+
+def save_keys(filename, keys):
+    f = open(filename, 'w')
+    f.write("keys = %s" % str(keys))
+    f.close()
+
+def main():
+
+    process_options()
+    config = Config()
+    sfaImporter = sfaImport()
+    logger=sfaImporter.logger
+    logger.setLevelFromOptVerbose(config.SFA_API_LOGLEVEL)
+    if not config.SFA_REGISTRY_ENABLED:
+        sys.exit(0)
+    root_auth = config.SFA_REGISTRY_ROOT_AUTH
+    interface_hrn = config.SFA_INTERFACE_HRN
+    shell = OpenstackShell(config)
+    sfaImporter.create_top_level_records()
+    
+    # create dict of all existing sfa records
+    existing_records = {}
+    existing_hrns = []
+    key_ids = []
+    table = SfaTable()
+    results = table.find()
+    for result in results:
+        existing_records[(result['hrn'], result['type'])] = result
+        existing_hrns.append(result['hrn']) 
+            
+        
+    # Get all users
+    persons = shell.user_get_all()
+    persons_dict = {}
+    keys_filename = config.config_path + os.sep + 'person_keys.py' 
+    old_person_keys = load_keys(keys_filename)    
+    person_keys = {} 
+    for person in persons:
+        hrn = config.SFA_INTERFACE_HRN + "." + person.id
+        persons_dict[hrn] = person
+        old_keys = old_person_keys.get(person.id, [])
+        keys = [k.public_key for k in shell.key_pair_get_all_by_user(person.id)]
+        person_keys[person.id] = keys
+        update_record = False
+        if old_keys != keys:
+            update_record = True
+        if hrn not in existing_hrns or \
+               (hrn, 'user') not in existing_records or update_record:    
+            urn = hrn_to_urn(hrn, 'user')
+            
+            if keys:
+                try:
+                    pkey = convert_public_key(keys[0])
+                except:
+                    logger.log_exc('unable to convert public key for %s' % hrn)
+                    pkey = Keypair(create=True)
+            else:
+                logger.warn("Import: person %s does not have a PL public key"%hrn)
+                pkey = Keypair(create=True) 
+            person_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+            person_record = SfaRecord(hrn=hrn, gid=person_gid, type="user", \
+                                          authority=get_authority(hrn))
+            logger.info("Import: importing %s " % person_record.summary_string())
+            person_record.sync()
+
+    # Get all projects
+    projects = shell.project_get_all()
+    projects_dict = {}
+    for project in projects:
+        hrn = config.SFA_INTERFACE_HRN + '.' + project.id
+        projects_dict[hrn] = project
+        if hrn not in existing_hrns or \
+        (hrn, 'slice') not in existing_records:
+            pkey = Keypair(create=True)
+            urn = hrn_to_urn(hrn, 'slice')
+            project_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+            project_record = SfaRecord(hrn=hrn, gid=project_gid, type="slice",
+                                       authority=get_authority(hrn))
+            projects_dict[project_record['hrn']] = project_record
+            logger.info("Import: importing %s " % project_record.summary_string())
+            project_record.sync() 
+    
+    # remove stale records    
+    system_records = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
+    for (record_hrn, type) in existing_records.keys():
+        if record_hrn in system_records:
+            continue
+        
+        record = existing_records[(record_hrn, type)]
+        if record['peer_authority']:
+            continue
+
+        if type == 'user':
+            if record_hrn in persons_dict:
+                continue  
+        elif type == 'slice':
+            if record_hrn in projects_dict:
+                continue
+        else:
+            continue 
+        
+        record_object = existing_records[(record_hrn, type)]
+        record = SfaRecord(dict=record_object)
+        logger.info("Import: removing %s " % record.summary_string())
+        record.delete()
+                                   
+    # save pub keys
+    logger.info('Import: saving current pub keys')
+    save_keys(keys_filename, person_keys)                
+        
+if __name__ == "__main__":
+    main()
index 2371536..723f473 100755 (executable)
@@ -19,12 +19,14 @@ import getopt
 import sys
 
 from sfa.util.config import Config
-from sfa.util.xrn import Xrn, get_leaf, get_authority
+from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
 from sfa.util.plxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_pl_slicename
-
 from sfa.storage.table import SfaTable
-
-from sfa.importer.sfaImport import sfaImport
+from sfa.storage.record import SfaRecord
+from sfa.trust.gid import create_uuid    
+from sfa.trust.certificate import convert_public_key, Keypair
+from sfa.importer.sfaImport import sfaImport, _cleanup_string
+from sfa.plc.plshell import PlShell    
 
 def process_options():
 
@@ -70,40 +72,17 @@ def main():
     interface_hrn = config.SFA_INTERFACE_HRN
     keys_filename = config.config_path + os.sep + 'person_keys.py' 
     sfaImporter = sfaImport()
+    sfaImporter.create_top_level_records()
     logger=sfaImporter.logger
     logger.setLevelFromOptVerbose(config.SFA_API_LOGLEVEL)
-    shell = sfaImporter.shell
+    shell = PlShell (config)
     
-    # initialize registry db table
-    table = SfaTable()
-
-    # create root authority 
-    sfaImporter.create_top_level_auth_records(interface_hrn)
-
-    # create s user record for the slice manager
-    sfaImporter.create_sm_client_record()
-
-    # create interface records
-    logger.info("Import: creating interface records")
-    sfaImporter.create_interface_records()
-
-    # add local root authority's cert  to trusted list
-    logger.info("Import: adding " + interface_hrn + " to trusted list")
-    authority = sfaImporter.AuthHierarchy.get_auth_info(interface_hrn)
-    sfaImporter.TrustedRoots.add_gid(authority.get_gid_object())
-
-    # special case for vini
-    if ".vini" in interface_hrn and interface_hrn.endswith('vini'):
-        # create a fake internet2 site first
-        i2site = {'name': 'Internet2', 'abbreviated_name': 'I2',
-                    'login_base': 'internet2', 'site_id': -1}
-        sfaImporter.import_site(interface_hrn, i2site)
-   
     # create dict of all existing sfa records
     existing_records = {}
     existing_hrns = []
     key_ids = []
     person_keys = {} 
+    table = SfaTable()
     results = table.find()
     for result in results:
         existing_records[(result['hrn'], result['type'])] = result
@@ -148,6 +127,28 @@ def main():
     slices_dict = {}
     for slice in slices:
         slices_dict[slice['slice_id']] = slice
+
+    # special case for vini
+    if ".vini" in interface_hrn and interface_hrn.endswith('vini'):
+        # create a fake internet2 site first
+        i2site = {'name': 'Internet2', 'abbreviated_name': 'I2',
+                    'login_base': 'internet2', 'site_id': -1}
+        site_hrn = _get_site_hrn(interface_hrn, i2site)
+        logger.info("Importing site: %s" % site_hrn)
+        # import if hrn is not in list of existing hrns or if the hrn exists
+        # but its not a site record
+        if site_hrn not in existing_hrns or \
+           (site_hrn, 'authority') not in existing_records:
+            logger.info("Import: site %s " % site_hrn)
+            urn = hrn_to_urn(site_hrn, 'authority')
+            if not sfaImporter.AuthHierarchy.auth_exists(urn):
+                sfaImporter.AuthHierarchy.create_auth(urn)
+            auth_info = sfaImporter.AuthHierarchy.get_auth_info(urn)
+            auth_record = SfaRecord(hrn=site_hrn, gid=auth_info.get_gid_object(), \
+                                    type="authority", pointer=site['site_id'], 
+                                    authority=get_authority(site_hrn))
+            auth_record.sync(verbose=True)
+
     # start importing 
     for site in sites:
         site_hrn = _get_site_hrn(interface_hrn, site)
@@ -157,7 +158,22 @@ def main():
         # but its not a site record
         if site_hrn not in existing_hrns or \
            (site_hrn, 'authority') not in existing_records:
-            sfaImporter.import_site(site_hrn, site)
+            try:
+                logger.info("Import: site %s " % site_hrn)
+                urn = hrn_to_urn(site_hrn, 'authority')
+                if not sfaImporter.AuthHierarchy.auth_exists(urn):
+                    sfaImporter.AuthHierarchy.create_auth(urn)
+                auth_info = sfaImporter.AuthHierarchy.get_auth_info(urn)
+                auth_record = SfaRecord(hrn=site_hrn, gid=auth_info.get_gid_object(), \
+                                        type="authority", pointer=site['site_id'], 
+                                        authority=get_authority(site_hrn))
+                logger.info("Import: importing site: %s" % auth_record.summary_string())  
+                auth_record.sync()
+            except:
+                # if the site import fails then there is no point in trying to import the
+                # site's child records (node, slices, persons), so skip them.
+                logger.log_exc("Import: failed to import site. Skipping child records") 
+                continue 
              
         # import node records
         for node_id in site['node_ids']:
@@ -167,9 +183,20 @@ def main():
             site_auth = get_authority(site_hrn)
             site_name = get_leaf(site_hrn)
             hrn =  hostname_to_hrn(site_auth, site_name, node['hostname'])
+            if len(hrn) > 64:
+                hrn = hrn[:64]
             if hrn not in existing_hrns or \
                (hrn, 'node') not in existing_records:
-                sfaImporter.import_node(hrn, node)
+                try:
+                    pkey = Keypair(create=True)
+                    urn = hrn_to_urn(hrn, 'node')
+                    node_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+                    node_record = SfaRecord(hrn=hrn, gid=node_gid, type="node", pointer=node['node_id'], authority=get_authority(hrn))    
+                    logger.info("Import: importing node: %s" % node_record.summary_string())  
+                    node_record.sync()
+                except:
+                    logger.log_exc("Import: failed to import node") 
+                    
 
         # import slices
         for slice_id in site['slice_ids']:
@@ -177,9 +204,20 @@ def main():
                 continue 
             slice = slices_dict[slice_id]
             hrn = slicename_to_hrn(interface_hrn, slice['name'])
+            #slicename = slice['name'].split("_",1)[-1]
+            #slicename = _cleanup_string(slicename)
             if hrn not in existing_hrns or \
                (hrn, 'slice') not in existing_records:
-                sfaImporter.import_slice(site_hrn, slice)      
+                try:
+                    pkey = Keypair(create=True)
+                    urn = hrn_to_urn(hrn, 'slice')
+                    slice_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+                    slice_record = SfaRecord(hrn=hrn, gid=slice_gid, type="slice", pointer=slice['slice_id'],
+                                             authority=get_authority(hrn))
+                    logger.info("Import: importing slice: %s" % slice_record.summary_string())  
+                    slice_record.sync()
+                except:
+                    logger.log_exc("Import: failed to  import slice")
 
         # import persons
         for person_id in site['person_ids']:
@@ -187,6 +225,11 @@ def main():
                 continue 
             person = persons_dict[person_id]
             hrn = email_to_hrn(site_hrn, person['email'])
+            if len(hrn) > 64:
+                hrn = hrn[:64]
+
+            # if user's primary key has chnaged then we need to update the 
+            # users gid by forcing a update here
             old_keys = []
             new_keys = []
             if person_id in old_person_keys:
@@ -200,8 +243,26 @@ def main():
 
             if hrn not in existing_hrns or \
                (hrn, 'user') not in existing_records or update_record:
-                sfaImporter.import_person(site_hrn, person)
-
+                try:
+                    if 'key_ids' in person and person['key_ids']:
+                        key = new_keys[0]
+                        try:
+                            pkey = convert_public_key(key)
+                        except:
+                            logger.warn('unable to convert public key for %s' % hrn)
+                            pkey = Keypair(create=True)
+                    else:
+                        # the user has no keys. Creating a random keypair for the user's gid
+                        logger.warn("Import: person %s does not have a PL public key"%hrn)
+                        pkey = Keypair(create=True) 
+                    urn = hrn_to_urn(hrn, 'user')
+                    person_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+                    person_record = SfaRecord(hrn=hrn, gid=person_gid, type="user", \
+                                              pointer=person['person_id'], authority=get_authority(hrn))
+                    logger.info("Import: importing person: %s" % person_record.summary_string())  
+                    person_record.sync()
+                except:
+                    logger.log_exc("Import: failed to import person.") 
     
     # remove stale records    
     system_records = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
@@ -266,9 +327,13 @@ def main():
             continue 
         
         if not found:
-            record_object = existing_records[(record_hrn, type)]
-            sfaImporter.delete_record(record_hrn, type) 
-                                   
+            try:
+                record_object = existing_records[(record_hrn, type)]
+                record = SfaRecord(dict=record_object)
+                logger.info("Import: deleting record: %s" % record.summary_string())
+                record.delete()
+            except:
+                logger.log_exc("Import: failded to delete record")                    
     # save pub keys
     logger.info('Import: saving current pub keys')
     save_keys(keys_filename, person_keys)                
index 3f054ba..ca0bff2 100644 (file)
@@ -12,14 +12,12 @@ from sfa.util.sfalogging import _SfaLogger
 from sfa.util.xrn import get_authority, hrn_to_urn
 from sfa.util.plxrn import email_to_hrn
 from sfa.util.config import Config
-
 from sfa.trust.certificate import convert_public_key, Keypair
 from sfa.trust.trustedroots import TrustedRoots
 from sfa.trust.hierarchy import Hierarchy
 from sfa.trust.gid import create_uuid
-
-from sfa.storage.record import SfaRecord
 from sfa.storage.table import SfaTable
+from sfa.storage.record import SfaRecord
 
 
 def _un_unicode(str):
@@ -51,14 +49,30 @@ class sfaImport:
     def __init__(self):
        self.logger = _SfaLogger(logfile='/var/log/sfa_import.log', loggername='importlog')
        self.AuthHierarchy = Hierarchy()
+#       self.table = SfaTable()     
        self.config = Config()
        self.TrustedRoots = TrustedRoots(Config.get_trustedroots_dir(self.config))
        self.root_auth = self.config.SFA_REGISTRY_ROOT_AUTH
-        
-       # should use a driver instead...
-       from sfa.plc.plshell import PlShell
-       # how to connect to planetlab 
-       self.shell = PlShell (self.config)
+
+    def create_top_level_records(self):
+        """
+        Create top level and interface records
+        """
+        # create root authority
+        interface_hrn = self.config.SFA_INTERFACE_HRN
+        self.create_top_level_auth_records(interface_hrn)
+
+        # create s user record for the slice manager
+        self.create_sm_client_record()
+
+        # create interface records
+        self.logger.info("Import: creating interface records")
+        self.create_interface_records()
+
+        # add local root authority's cert  to trusted list
+        self.logger.info("Import: adding " + interface_hrn + " to trusted list")
+        authority = self.AuthHierarchy.get_auth_info(interface_hrn)
+        self.TrustedRoots.add_gid(authority.get_gid_object())
 
     def create_top_level_auth_records(self, hrn):
         """
@@ -75,14 +89,9 @@ class sfaImport:
         self.AuthHierarchy.create_top_level_auth(hrn)    
         # create the db record if it doesnt already exist    
         auth_info = self.AuthHierarchy.get_auth_info(hrn)
-        table = SfaTable()
-        auth_record = table.find({'type': 'authority', 'hrn': hrn})
-
-        if not auth_record:
-            auth_record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="authority", pointer=-1)
-            auth_record['authority'] = get_authority(auth_record['hrn'])
-            self.logger.info("Import: inserting authority record for %s"%hrn)
-            table.insert(auth_record)
+        auth_record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="authority", pointer=-1, authority=get_authority(hrn))
+        self.logger.info("Import: importing %s " % auth_record.summary_string())
+        auth_record.sync()
 
     def create_sm_client_record(self):
         """
@@ -95,12 +104,10 @@ class sfaImport:
             self.AuthHierarchy.create_auth(urn)
 
         auth_info = self.AuthHierarchy.get_auth_info(hrn)
-        table = SfaTable()
-        sm_user_record = table.find({'type': 'user', 'hrn': hrn})
-        if not sm_user_record:
-            record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="user", pointer=-1)
-            record['authority'] = get_authority(record['hrn'])
-            table.insert(record)    
+        record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), \
+                           type="user", pointer=-1, authority=get_authority(hrn))
+        self.logger.info("Import: importing %s " % record.summary_string())
+        record.sync()
 
     def create_interface_records(self):
         """
@@ -108,144 +115,19 @@ class sfaImport:
         """
         # just create certs for all sfa interfaces even if they
         # arent enabled
-        interface_hrn = self.config.SFA_INTERFACE_HRN
+        hrn = self.config.SFA_INTERFACE_HRN
         interfaces = ['authority+sa', 'authority+am', 'authority+sm']
         table = SfaTable()
-        auth_info = self.AuthHierarchy.get_auth_info(interface_hrn)
+        auth_info = self.AuthHierarchy.get_auth_info(hrn)
         pkey = auth_info.get_pkey_object()
         for interface in interfaces:
-            interface_record = table.find({'type': interface, 'hrn': interface_hrn})
-            if not interface_record:
-                self.logger.info("Import: interface %s %s " % (interface_hrn, interface))
-                urn = hrn_to_urn(interface_hrn, interface)
-                gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
-                record = SfaRecord(hrn=interface_hrn, gid=gid, type=interface, pointer=-1)  
-                record['authority'] = get_authority(interface_hrn)
-                table.insert(record) 
-                                
-
-    
-    def import_person(self, parent_hrn, person):
-        """
-        Register a user record 
-        """
-        hrn = email_to_hrn(parent_hrn, person['email'])
-
-        # ASN.1 will have problems with hrn's longer than 64 characters
-        if len(hrn) > 64:
-            hrn = hrn[:64]
-
-        self.logger.info("Import: person %s"%hrn)
-        key_ids = []
-        if 'key_ids' in person and person['key_ids']:
-            key_ids = person["key_ids"]
-            # get the user's private key from the SSH keys they have uploaded
-            # to planetlab
-            keys = self.shell.GetKeys(key_ids)
-            key = keys[0]['key']
-            pkey = None
-            try:
-                pkey = convert_public_key(key)
-            except:
-                self.logger.warn('unable to convert public key for %s' % hrn) 
-            if not pkey:
-                pkey = Keypair(create=True)
-        else:
-            # the user has no keys
-            self.logger.warn("Import: person %s does not have a PL public key"%hrn)
-            # if a key is unavailable, then we still need to put something in the
-            # user's GID. So make one up.
-            pkey = Keypair(create=True)
-
-        # create the gid
-        urn = hrn_to_urn(hrn, 'user')
-        person_gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
-        table = SfaTable()
-        person_record = SfaRecord(hrn=hrn, gid=person_gid, type="user", pointer=person['person_id'])
-        person_record['authority'] = get_authority(person_record['hrn'])
-        existing_records = table.find({'hrn': hrn, 'type': 'user', 'pointer': person['person_id']})
-        if not existing_records:
-            table.insert(person_record)
-        else:
-            self.logger.info("Import: %s exists, updating " % hrn)
-            existing_record = existing_records[0]
-            person_record['record_id'] = existing_record['record_id']
-            table.update(person_record)
-
-    def import_slice(self, parent_hrn, slice):
-        slicename = slice['name'].split("_",1)[-1]
-        slicename = _cleanup_string(slicename)
-
-        if not slicename:
-            self.logger.error("Import: failed to parse slice name %s" %slice['name'])
-            return
-
-        hrn = parent_hrn + "." + slicename
-        self.logger.info("Import: slice %s"%hrn)
-
-        pkey = Keypair(create=True)
-        urn = hrn_to_urn(hrn, 'slice')
-        slice_gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
-        slice_record = SfaRecord(hrn=hrn, gid=slice_gid, type="slice", pointer=slice['slice_id'])
-        slice_record['authority'] = get_authority(slice_record['hrn'])
-        table = SfaTable()
-        existing_records = table.find({'hrn': hrn, 'type': 'slice', 'pointer': slice['slice_id']})
-        if not existing_records:
-            table.insert(slice_record)
-        else:
-            self.logger.info("Import: %s exists, updating " % hrn)
-            existing_record = existing_records[0]
-            slice_record['record_id'] = existing_record['record_id']
-            table.update(slice_record)
-
-    def import_node(self, hrn, node):
-        self.logger.info("Import: node %s" % hrn)
-        # ASN.1 will have problems with hrn's longer than 64 characters
-        if len(hrn) > 64:
-            hrn = hrn[:64]
-
-        table = SfaTable()
-        node_record = table.find({'type': 'node', 'hrn': hrn})
-        pkey = Keypair(create=True)
-        urn = hrn_to_urn(hrn, 'node')
-        node_gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
-        node_record = SfaRecord(hrn=hrn, gid=node_gid, type="node", pointer=node['node_id'])
-        node_record['authority'] = get_authority(node_record['hrn'])
-        existing_records = table.find({'hrn': hrn, 'type': 'node', 'pointer': node['node_id']})
-        if not existing_records:
-            table.insert(node_record)
-        else:
-            self.logger.info("Import: %s exists, updating " % hrn)
-            existing_record = existing_records[0]
-            node_record['record_id'] = existing_record['record_id']
-            table.update(node_record)
-
-    
-    def import_site(self, hrn, site):
-        urn = hrn_to_urn(hrn, 'authority')
-        self.logger.info("Import: site %s"%hrn)
-
-        # create the authority
-        if not self.AuthHierarchy.auth_exists(urn):
-            self.AuthHierarchy.create_auth(urn)
-
-        auth_info = self.AuthHierarchy.get_auth_info(urn)
-
-        table = SfaTable()
-        auth_record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="authority", pointer=site['site_id'])
-        auth_record['authority'] = get_authority(auth_record['hrn'])
-        existing_records = table.find({'hrn': hrn, 'type': 'authority', 'pointer': site['site_id']})
-        if not existing_records:
-            table.insert(auth_record)
-        else:
-            self.logger.info("Import: %s exists, updating " % hrn)
-            existing_record = existing_records[0]
-            auth_record['record_id'] = existing_record['record_id']
-            table.update(auth_record)
-
-        return hrn
-
-
+            urn = hrn_to_urn(hrn, interface)
+            gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+            interface_record = SfaRecord(hrn=hrn, type=interface, pointer=-1,
+                                         gid = gid, authority=get_authority(hrn))
+            self.logger.info("Import: importing %s " % interface_record.summary_string())
+            interface_record.sync()
+             
     def delete_record(self, hrn, type):
         # delete the record
         table = SfaTable()
index 8955920..fab2af8 100644 (file)
@@ -12,12 +12,14 @@ class AggregateManager:
     
         xrn=Xrn(api.hrn)
         version = version_core()
-        version_generic = {'interface':'aggregate',
-                           'sfa': 2,
-                           'geni_api': 2,
-                           'hrn':xrn.get_hrn(),
-                           'urn':xrn.get_urn(),
-                           }
+        version_generic = {
+            'interface':'aggregate',
+            'sfa': 2,
+            'geni_api': 2,
+            'geni_api_versions': {'2': 'http://%s:%s' % (api.config.SFA_AGGREGATE_HOST, api.config.SFA_AGGREGATE_PORT)}, 
+            'hrn':xrn.get_hrn(),
+            'urn':xrn.get_urn(),
+            }
         version.update(version_generic)
         testbed_version = self.driver.aggregate_version()
         version.update(testbed_version)
diff --git a/sfa/managers/aggregate_manager_openstack.py b/sfa/managers/aggregate_manager_openstack.py
new file mode 100644 (file)
index 0000000..3ed0bba
--- /dev/null
@@ -0,0 +1,117 @@
+from sfa.util.version import version_core
+from sfa.util.xrn import Xrn
+from sfa.util.callids import Callids
+from sfa.managers import aggregate_manager 
+
+class AggregateManager(aggregate_manager.AggregateManager):
+
+    def __init__ (self, config): pass
+    
+    # essentially a union of the core version, the generic version (this code) and
+    # whatever the driver needs to expose
+    def GetVersion(self, api, options):
+    
+        xrn=Xrn(api.hrn)
+        version = version_core()
+        version_generic = {
+            'interface':'aggregate',
+            'sfa': 2,
+            'geni_api': 2,
+            'geni_api_versions': {'2': 'http://%s:%s' % (api.config.SFA_AGGREGATE_HOST, api.config.SFA_AGGREGATE_PORT)}, 
+            'hrn':xrn.get_hrn(),
+            'urn':xrn.get_urn(),
+            }
+        version.update(version_generic)
+        testbed_version = self.driver.aggregate_version()
+        version.update(testbed_version)
+        return version
+    
+    def ListSlices(self, api, creds, options):
+        call_id = options.get('call_id')
+        if Callids().already_handled(call_id): return []
+        return self.driver.list_slices (creds, options)
+
+    def ListResources(self, api, creds, options):
+        call_id = options.get('call_id')
+        if Callids().already_handled(call_id): return ""
+
+        # get slice's hrn from options
+        slice_xrn = options.get('geni_slice_urn', None)
+        # pass None if no slice is specified
+        if not slice_xrn:
+            slice_hrn, slice_urn = None, None
+        else:
+            xrn = Xrn(slice_xrn)
+            slice_urn=xrn.get_urn()
+            slice_hrn=xrn.get_hrn()
+
+        return self.driver.list_resources (slice_urn, slice_hrn, creds, options)
+    
+    def SliverStatus (self, api, xrn, creds, options):
+        call_id = options.get('call_id')
+        if Callids().already_handled(call_id): return {}
+    
+        xrn = Xrn(xrn)
+        slice_urn=xrn.get_urn()
+        slice_hrn=xrn.get_hrn()
+        return self.driver.sliver_status (slice_urn, slice_hrn)
+    
+    def CreateSliver(self, api, xrn, creds, rspec_string, users, options):
+        """
+        Create the sliver[s] (slice) at this aggregate.    
+        Verify HRN and initialize the slice record in PLC if necessary.
+        """
+        call_id = options.get('call_id')
+        if Callids().already_handled(call_id): return ""
+    
+        xrn = Xrn(xrn)
+        slice_urn=xrn.get_urn()
+        slice_hrn=xrn.get_hrn()
+
+        return self.driver.create_sliver (slice_urn, slice_hrn, creds, rspec_string, users, options)
+    
+    def DeleteSliver(self, api, xrn, creds, options):
+        call_id = options.get('call_id')
+        if Callids().already_handled(call_id): return True
+
+        xrn = Xrn(xrn)
+        slice_urn=xrn.get_urn()
+        slice_hrn=xrn.get_hrn()
+        return self.driver.delete_sliver (slice_urn, slice_hrn, creds, options)
+
+    def RenewSliver(self, api, xrn, creds, expiration_time, options):
+        call_id = options.get('call_id')
+        if Callids().already_handled(call_id): return True
+        
+        xrn = Xrn(xrn)
+        slice_urn=xrn.get_urn()
+        slice_hrn=xrn.get_hrn()
+        return self.driver.renew_sliver (slice_urn, slice_hrn, creds, expiration_time, options)
+    
+    ### these methods could use an options extension for at least call_id
+    def start_slice(self, api, xrn, creds):
+        xrn = Xrn(xrn)
+        slice_urn=xrn.get_urn()
+        slice_hrn=xrn.get_hrn()
+        return self.driver.start_slice (slice_urn, slice_hrn, creds)
+     
+    def stop_slice(self, api, xrn, creds):
+        xrn = Xrn(xrn)
+        slice_urn=xrn.get_urn()
+        slice_hrn=xrn.get_hrn()
+        return self.driver.stop_slice (slice_urn, slice_hrn, creds)
+
+    def reset_slice(self, api, xrn):
+        xrn = Xrn(xrn)
+        slice_urn=xrn.get_urn()
+        slice_hrn=xrn.get_hrn()
+        return self.driver.reset_slice (slice_urn, slice_hrn)
+
+    def GetTicket(self, api, xrn, creds, rspec, users, options):
+    
+        xrn = Xrn(xrn)
+        slice_urn=xrn.get_urn()
+        slice_hrn=xrn.get_hrn()
+
+        return self.driver.get_ticket (slice_urn, slice_hrn, creds, rspec, options)
+
index 8a3ed20..b5be45f 100644 (file)
@@ -7,6 +7,7 @@ import commands
 
 from sfa.util.faults import RecordNotFound, AccountNotEnabled, PermissionError, MissingAuthority, \
     UnknownSfaType, ExistingRecord, NonExistingRecord
+from sfa.util.sfatime import utcparse, datetime_to_epoch
 from sfa.util.prefixTree import prefixTree
 from sfa.util.xrn import Xrn, get_authority, hrn_to_urn, urn_to_hrn
 from sfa.util.plxrn import hrn_to_pl_login_base
@@ -30,8 +31,9 @@ class RegistryManager:
         peers = dict ( [ (hrn,interface.get_url()) for (hrn,interface) in api.registries.iteritems() 
                        if hrn != api.hrn])
         xrn=Xrn(api.hrn)
-        return version_core({'interface':'registry', 
+        return version_core({'interface':'registry',
                              'sfa': 2,
+                             'geni_api': 2,
                              'hrn':xrn.get_hrn(),
                              'urn':xrn.get_urn(),
                              'peers':peers})
@@ -85,7 +87,9 @@ class RegistryManager:
         new_cred.set_privileges(rights)
         new_cred.get_privileges().delegate_all_privileges(True)
         if 'expires' in record:
-            new_cred.set_expiration(int(record['expires']))
+            date = utcparse(record['expires'])
+            expires = datetime_to_epoch(date)
+            new_cred.set_expiration(int(expires))
         auth_kind = "authority,ma,sa"
         # Parent not necessary, verify with certs
         #new_cred.set_parent(api.auth.hierarchy.get_auth_cred(auth_hrn, kind=auth_kind))
diff --git a/sfa/managers/registry_manager_openstack.py b/sfa/managers/registry_manager_openstack.py
new file mode 100644 (file)
index 0000000..51c4472
--- /dev/null
@@ -0,0 +1,452 @@
+import types
+import time 
+# for get_key_from_incoming_ip
+import tempfile
+import os
+import commands
+
+from sfa.util.faults import RecordNotFound, AccountNotEnabled, PermissionError, MissingAuthority, \
+    UnknownSfaType, ExistingRecord, NonExistingRecord
+from sfa.util.sfatime import utcparse, datetime_to_epoch
+from sfa.util.prefixTree import prefixTree
+from sfa.util.xrn import Xrn, get_authority, hrn_to_urn, urn_to_hrn
+from sfa.util.plxrn import hrn_to_pl_login_base
+from sfa.util.version import version_core
+from sfa.util.sfalogging import logger
+from sfa.trust.gid import GID 
+from sfa.trust.credential import Credential
+from sfa.trust.certificate import Certificate, Keypair, convert_public_key
+from sfa.trust.gid import create_uuid
+from sfa.storage.record import SfaRecord
+from sfa.storage.table import SfaTable
+from sfa.managers import registry_manager
+
+class RegistryManager(registry_manager.RegistryManager):
+
+    def __init__ (self, config): pass
+
+    # The GENI GetVersion call
+    def GetVersion(self, api, options):
+        peers = dict ( [ (hrn,interface.get_url()) for (hrn,interface) in api.registries.iteritems() 
+                       if hrn != api.hrn])
+        xrn=Xrn(api.hrn)
+        return version_core({'interface':'registry',
+                             'hrn':xrn.get_hrn(),
+                             'urn':xrn.get_urn(),
+                             'peers':peers})
+    
+    def GetCredential(self, api, xrn, type, is_self=False):
+        # convert xrn to hrn     
+        if type:
+            hrn = urn_to_hrn(xrn)[0]
+        else:
+            hrn, type = urn_to_hrn(xrn)
+            
+        # Is this a root or sub authority
+        auth_hrn = api.auth.get_authority(hrn)
+        if not auth_hrn or hrn == api.config.SFA_INTERFACE_HRN:
+            auth_hrn = hrn
+        # get record info
+        auth_info = api.auth.get_auth_info(auth_hrn)
+        table = SfaTable()
+        records = table.findObjects({'type': type, 'hrn': hrn})
+        if not records:
+            raise RecordNotFound(hrn)
+        record = records[0]
+    
+        # verify_cancreate_credential requires that the member lists
+        # (researchers, pis, etc) be filled in
+        self.driver.augment_records_with_testbed_info (record)
+        if not self.driver.is_enabled (record):
+              raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record['email']))
+    
+        # get the callers gid
+        # if this is a self cred the record's gid is the caller's gid
+        if is_self:
+            caller_hrn = hrn
+            caller_gid = record.get_gid_object()
+        else:
+            caller_gid = api.auth.client_cred.get_gid_caller() 
+            caller_hrn = caller_gid.get_hrn()
+        
+        object_hrn = record.get_gid_object().get_hrn()
+        rights = api.auth.determine_user_rights(caller_hrn, record)
+        # make sure caller has rights to this object
+        if rights.is_empty():
+            raise PermissionError(caller_hrn + " has no rights to " + record['name'])
+    
+        object_gid = GID(string=record['gid'])
+        new_cred = Credential(subject = object_gid.get_subject())
+        new_cred.set_gid_caller(caller_gid)
+        new_cred.set_gid_object(object_gid)
+        new_cred.set_issuer_keys(auth_info.get_privkey_filename(), auth_info.get_gid_filename())
+        #new_cred.set_pubkey(object_gid.get_pubkey())
+        new_cred.set_privileges(rights)
+        new_cred.get_privileges().delegate_all_privileges(True)
+        if 'expires' in record:
+            date = utcparse(record['expires'])
+            expires = datetime_to_epoch(date)
+            new_cred.set_expiration(int(expires))
+        auth_kind = "authority,ma,sa"
+        # Parent not necessary, verify with certs
+        #new_cred.set_parent(api.auth.hierarchy.get_auth_cred(auth_hrn, kind=auth_kind))
+        new_cred.encode()
+        new_cred.sign()
+    
+        return new_cred.save_to_string(save_parents=True)
+    
+    
+    def Resolve(self, api, xrns, type=None, full=True):
+    
+        if not isinstance(xrns, types.ListType):
+            xrns = [xrns]
+            # try to infer type if not set and we get a single input
+            if not type:
+                type = Xrn(xrns).get_type()
+        hrns = [urn_to_hrn(xrn)[0] for xrn in xrns] 
+        # load all known registry names into a prefix tree and attempt to find
+        # the longest matching prefix
+        # create a dict where key is a registry hrn and its value is a
+        # hrns at that registry (determined by the known prefix tree).  
+        xrn_dict = {}
+        registries = api.registries
+        tree = prefixTree()
+        registry_hrns = registries.keys()
+        tree.load(registry_hrns)
+        for xrn in xrns:
+            registry_hrn = tree.best_match(urn_to_hrn(xrn)[0])
+            if registry_hrn not in xrn_dict:
+                xrn_dict[registry_hrn] = []
+            xrn_dict[registry_hrn].append(xrn)
+            
+        records = [] 
+        for registry_hrn in xrn_dict:
+            # skip the hrn without a registry hrn
+            # XX should we let the user know the authority is unknown?       
+            if not registry_hrn:
+                continue
+    
+            # if the best match (longest matching hrn) is not the local registry,
+            # forward the request
+            xrns = xrn_dict[registry_hrn]
+            if registry_hrn != api.hrn:
+                credential = api.getCredential()
+                interface = api.registries[registry_hrn]
+                server_proxy = api.server_proxy(interface, credential)
+                peer_records = server_proxy.Resolve(xrns, credential)
+                records.extend([SfaRecord(dict=record).as_dict() for record in peer_records])
+    
+        # try resolving the remaining unfound records at the local registry
+        local_hrns = list ( set(hrns).difference([record['hrn'] for record in records]) )
+        # 
+        table = SfaTable()
+        local_records = table.findObjects({'hrn': local_hrns})
+        
+        if full:
+            # in full mode we get as much info as we can, which involves contacting the 
+            # testbed for getting implementation details about the record
+            self.driver.augment_records_with_testbed_info(local_records)
+            # also we fill the 'url' field for known authorities
+            # used to be in the driver code, sounds like a poorman thing though
+            def solve_neighbour_url (record):
+                if not record['type'].startswith('authority'): return 
+                hrn=record['hrn']
+                for neighbour_dict in [ api.aggregates, api.registries ]:
+                    if hrn in neighbour_dict:
+                        record['url']=neighbour_dict[hrn].get_url()
+                        return 
+            [ solve_neighbour_url (record) for record in local_records ]
+                    
+        
+        
+        # convert local record objects to dicts
+        records.extend([dict(record) for record in local_records])
+        if type:
+            records = filter(lambda rec: rec['type'] in [type], records)
+    
+        if not records:
+            raise RecordNotFound(str(hrns))
+    
+        return records
+    
+    def List(self, api, xrn, origin_hrn=None):
+        hrn, type = urn_to_hrn(xrn)
+        # load all know registry names into a prefix tree and attempt to find
+        # the longest matching prefix
+        records = []
+        registries = api.registries
+        registry_hrns = registries.keys()
+        tree = prefixTree()
+        tree.load(registry_hrns)
+        registry_hrn = tree.best_match(hrn)
+       
+        #if there was no match then this record belongs to an unknow registry
+        if not registry_hrn:
+            raise MissingAuthority(xrn)
+        # if the best match (longest matching hrn) is not the local registry,
+        # forward the request
+        records = []    
+        if registry_hrn != api.hrn:
+            credential = api.getCredential()
+            interface = api.registries[registry_hrn]
+            server_proxy = api.server_proxy(interface, credential)
+            record_list = server_proxy.List(xrn, credential)
+            records = [SfaRecord(dict=record).as_dict() for record in record_list]
+        
+        # if we still have not found the record yet, try the local registry
+        if not records:
+            if not api.auth.hierarchy.auth_exists(hrn):
+                raise MissingAuthority(hrn)
+    
+            table = SfaTable()
+            records = table.find({'authority': hrn})
+    
+        return records
+    
+    
+    def CreateGid(self, api, xrn, cert):
+        # get the authority
+        authority = Xrn(xrn=xrn).get_authority_hrn()
+        auth_info = api.auth.get_auth_info(authority)
+        if not cert:
+            pkey = Keypair(create=True)
+        else:
+            certificate = Certificate(string=cert)
+            pkey = certificate.get_pubkey()    
+        gid = api.auth.hierarchy.create_gid(xrn, create_uuid(), pkey) 
+        return gid.save_to_string(save_parents=True)
+    
+    ####################
+    # utility for handling relationships among the SFA objects 
+    # given that the SFA db does not handle this sort of relationsships
+    # it will rely on side-effects in the testbed to keep this persistent
+    
+    # subject_record describes the subject of the relationships
+    # ref_record contains the target values for the various relationships we need to manage
+    # (to begin with, this is just the slice x person relationship)
+    def update_relations (self, subject_record, ref_record):
+        type=subject_record['type']
+        if type=='slice':
+            self.update_relation(subject_record, 'researcher', ref_record.get('researcher'), 'user')
+        
+    # field_key is the name of one field in the record, typically 'researcher' for a 'slice' record
+    # hrns is the list of hrns that should be linked to the subject from now on
+    # target_type would be e.g. 'user' in the 'slice' x 'researcher' example
+    def update_relation (self, sfa_record, field_key, hrns, target_type):
+        # locate the linked objects in our db
+        subject_type=sfa_record['type']
+        subject_id=sfa_record['pointer']
+        table = SfaTable()
+        link_sfa_records = table.find ({'type':target_type, 'hrn': hrns})
+        link_ids = [ rec.get('pointer') for rec in link_sfa_records ]
+        self.driver.update_relation (subject_type, target_type, subject_id, link_ids)
+        
+
+    def Register(self, api, record):
+    
+        hrn, type = record['hrn'], record['type']
+        urn = hrn_to_urn(hrn,type)
+        # validate the type
+        if type not in ['authority', 'slice', 'node', 'user']:
+            raise UnknownSfaType(type) 
+        
+        # check if record already exists
+        table = SfaTable()
+        existing_records = table.find({'type': type, 'hrn': hrn})
+        if existing_records:
+            raise ExistingRecord(hrn)
+           
+        record = SfaRecord(dict = record)
+        record['authority'] = get_authority(record['hrn'])
+        auth_info = api.auth.get_auth_info(record['authority'])
+        pub_key = None
+        # make sure record has a gid
+        if 'gid' not in record:
+            uuid = create_uuid()
+            pkey = Keypair(create=True)
+            if 'keys' in record and record['keys']:
+                pub_key=record['keys']
+                # use only first key in record
+                if isinstance(record['keys'], types.ListType):
+                    pub_key = record['keys'][0]
+                pkey = convert_public_key(pub_key)
+    
+            gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
+            gid = gid_object.save_to_string(save_parents=True)
+            record['gid'] = gid
+            record.set_gid(gid)
+    
+        if type in ["authority"]:
+            # update the tree
+            if not api.auth.hierarchy.auth_exists(hrn):
+                api.auth.hierarchy.create_auth(hrn_to_urn(hrn,'authority'))
+    
+            # get the GID from the newly created authority
+            gid = auth_info.get_gid_object()
+            record.set_gid(gid.save_to_string(save_parents=True))
+
+        # update testbed-specific data if needed
+        pointer = self.driver.register (record, hrn, pub_key)
+
+        record.set_pointer(pointer)
+        record_id = table.insert(record)
+        record['record_id'] = record_id
+    
+        # update membership for researchers, pis, owners, operators
+        self.update_relations (record, record)
+        
+        return record.get_gid_object().save_to_string(save_parents=True)
+    
+    def Update(self, api, record_dict):
+        new_record = SfaRecord(dict = record_dict)
+        type = new_record['type']
+        hrn = new_record['hrn']
+        urn = hrn_to_urn(hrn,type)
+        table = SfaTable()
+        # make sure the record exists
+        records = table.findObjects({'type': type, 'hrn': hrn})
+        if not records:
+            raise RecordNotFound(hrn)
+        record = records[0]
+        record['last_updated'] = time.gmtime()
+    
+        # validate the type
+        if type not in ['authority', 'slice', 'node', 'user']:
+            raise UnknownSfaType(type) 
+
+        # Use the pointer from the existing record, not the one that the user
+        # gave us. This prevents the user from inserting a forged pointer
+        pointer = record['pointer']
+    
+        # is the a change in keys ?
+        new_key=None
+        if type=='user':
+            if 'keys' in new_record and new_record['keys']:
+                new_key=new_record['keys']
+                if isinstance (new_key,types.ListType):
+                    new_key=new_key[0]
+
+        # update the PLC information that was specified with the record
+        if not self.driver.update (record, new_record, hrn, new_key):
+            logger.warning("driver.update failed")
+    
+        # take new_key into account
+        if new_key:
+            # update the openssl key and gid
+            pkey = convert_public_key(new_key)
+            uuid = create_uuid()
+            gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
+            gid = gid_object.save_to_string(save_parents=True)
+            record['gid'] = gid
+            record = SfaRecord(dict=record)
+            table.update(record)
+        
+        # update membership for researchers, pis, owners, operators
+        self.update_relations (record, new_record)
+        
+        return 1 
+    
+    # expecting an Xrn instance
+    def Remove(self, api, xrn, origin_hrn=None):
+    
+        table = SfaTable()
+        filter = {'hrn': xrn.get_hrn()}
+        hrn=xrn.get_hrn()
+        type=xrn.get_type()
+        if type and type not in ['all', '*']:
+            filter['type'] = type
+    
+        records = table.find(filter)
+        if not records: raise RecordNotFound(hrn)
+        record = records[0]
+        type = record['type']
+        
+        if type not in ['slice', 'user', 'node', 'authority'] :
+            raise UnknownSfaType(type)
+
+        credential = api.getCredential()
+        registries = api.registries
+    
+        # Try to remove the object from the PLCDB of federated agg.
+        # This is attempted before removing the object from the local agg's PLCDB and sfa table
+        if hrn.startswith(api.hrn) and type in ['user', 'slice', 'authority']:
+            for registry in registries:
+                if registry not in [api.hrn]:
+                    try:
+                        result=registries[registry].remove_peer_object(credential, record, origin_hrn)
+                    except:
+                        pass
+
+        # call testbed callback first
+        # IIUC this is done on the local testbed TOO because of the refreshpeer link
+        if not self.driver.remove(record):
+            logger.warning("driver.remove failed")
+
+        # delete from sfa db
+        table.remove(record)
+    
+        return 1
+
+    # This is a PLC-specific thing...
+    def get_key_from_incoming_ip (self, api):
+        # verify that the callers's ip address exist in the db and is an interface
+        # for a node in the db
+        (ip, port) = api.remote_addr
+        interfaces = self.driver.shell.GetInterfaces({'ip': ip}, ['node_id'])
+        if not interfaces:
+            raise NonExistingRecord("no such ip %(ip)s" % locals())
+        nodes = self.driver.shell.GetNodes([interfaces[0]['node_id']], ['node_id', 'hostname'])
+        if not nodes:
+            raise NonExistingRecord("no such node using ip %(ip)s" % locals())
+        node = nodes[0]
+       
+        # look up the sfa record
+        table = SfaTable()
+        records = table.findObjects({'type': 'node', 'pointer': node['node_id']})
+        if not records:
+            raise RecordNotFound("pointer:" + str(node['node_id']))  
+        record = records[0]
+        
+        # generate a new keypair and gid
+        uuid = create_uuid()
+        pkey = Keypair(create=True)
+        urn = hrn_to_urn(record['hrn'], record['type'])
+        gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
+        gid = gid_object.save_to_string(save_parents=True)
+        record['gid'] = gid
+        record.set_gid(gid)
+
+        # update the record
+        table.update(record)
+  
+        # attempt the scp the key
+        # and gid onto the node
+        # this will only work for planetlab based components
+        (kfd, key_filename) = tempfile.mkstemp() 
+        (gfd, gid_filename) = tempfile.mkstemp() 
+        pkey.save_to_file(key_filename)
+        gid_object.save_to_file(gid_filename, save_parents=True)
+        host = node['hostname']
+        key_dest="/etc/sfa/node.key"
+        gid_dest="/etc/sfa/node.gid" 
+        scp = "/usr/bin/scp" 
+        #identity = "/etc/planetlab/root_ssh_key.rsa"
+        identity = "/etc/sfa/root_ssh_key"
+        scp_options=" -i %(identity)s " % locals()
+        scp_options+="-o StrictHostKeyChecking=no " % locals()
+        scp_key_command="%(scp)s %(scp_options)s %(key_filename)s root@%(host)s:%(key_dest)s" %\
+                         locals()
+        scp_gid_command="%(scp)s %(scp_options)s %(gid_filename)s root@%(host)s:%(gid_dest)s" %\
+                         locals()    
+
+        all_commands = [scp_key_command, scp_gid_command]
+        
+        for command in all_commands:
+            (status, output) = commands.getstatusoutput(command)
+            if status:
+                raise Exception, output
+
+        for filename in [key_filename, gid_filename]:
+            os.unlink(filename)
+
+        return 1 
index be6cc7e..e9e446a 100644 (file)
@@ -48,15 +48,17 @@ class SliceManager:
             if rspec_version.content_type in ['*', 'request']:
                 request_rspec_versions.append(rspec_version.to_dict())
         xrn=Xrn(api.hrn, 'authority+sa')
-        version_more = {'interface':'slicemgr',
-                        'sfa': 2,
-                        'geni_api': 2,
-                        'hrn' : xrn.get_hrn(),
-                        'urn' : xrn.get_urn(),
-                        'peers': peers,
-                        'geni_request_rspec_versions': request_rspec_versions,
-                        'geni_ad_rspec_versions': ad_rspec_versions,
-                    }
+        version_more = {
+            'interface':'slicemgr',
+            'sfa': 2,
+            'geni_api': 2,
+            'geni_api_versions': {'2': 'http://%s:%s' % (api.config.SFA_SM_HOST, api.config.SFA_SM_PORT)},
+            'hrn' : xrn.get_hrn(),
+            'urn' : xrn.get_urn(),
+            'peers': peers,
+            'geni_request_rspec_versions': request_rspec_versions,
+            'geni_ad_rspec_versions': ad_rspec_versions,
+            }
         sm_version=version_core(version_more)
         # local aggregate if present needs to have localhost resolved
         if api.hrn in api.aggregates:
@@ -114,6 +116,7 @@ class SliceManager:
                     forward_options['rspec_version'] = version_manager.get_version('SFA 1').to_dict()
                 else:
                     forward_options['rspec_version'] = version_manager.get_version('ProtoGENI 2').to_dict()
+                    forward_options['geni_rspec_version'] = {'type': 'geni', 'version': '3.0'}
                 rspec = server.ListResources(credential, forward_options)
                 return {"aggregate": aggregate, "rspec": rspec, "elapsed": time.time()-tStart, "status": "success"}
             except Exception, e:
@@ -131,7 +134,8 @@ class SliceManager:
         version_string = "rspec_%s" % (rspec_version)
     
         # look in cache first
-        if self.cache and not xrn:
+        cached_requested = options.get('cached', True)
+        if not xrn and self.cache and cached_request:
             rspec =  self.cache.get(version_string)
             if rspec:
                 api.logger.debug("SliceManager.ListResources returns cached advertisement")
@@ -257,18 +261,26 @@ class SliceManager:
         call_id = options.get('call_id')
         if Callids().already_handled(call_id): return True
 
-        def _RenewSliver(server, xrn, creds, expiration_time, options):
-            return server.RenewSliver(xrn, creds, expiration_time, options)
-    
-        (hrn, type) = urn_to_hrn(xrn)
+        def _RenewSliver(aggregate, server, xrn, creds, expiration_time, options):
+            try:
+                result=server.RenewSliver(xrn, creds, expiration_time, options)
+                if type(result)!=dict:
+                    result = {"code": {"geni_code": 0}, value: result}
+                result["aggregate"] = aggregate
+                return result
+            except:
+                logger.log_exc('Something wrong in _RenewSliver with URL %s'%server.url)
+                return {"aggregate": aggregate, "exc_info": traceback.format_exc(), "code": {"geni_code": -1}, "value": False, "output": ""}
+
+        (hrn, urn_type) = urn_to_hrn(xrn)
         # get the callers hrn
         valid_cred = api.auth.checkCredentials(creds, 'renewsliver', hrn)[0]
         caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
-    
+
         # attempt to use delegated credential first
         cred = api.getDelegatedCredential(creds)
         if not cred:
-            cred = api.getCredential()
+            cred = api.getCredential(minimumExpiration=31*86400)
         threads = ThreadManager()
         for aggregate in api.aggregates:
             # prevent infinite loop. Dont send request back to caller
@@ -277,13 +289,24 @@ class SliceManager:
                 continue
             interface = api.aggregates[aggregate]
             server = api.server_proxy(interface, cred)
-            threads.run(_RenewSliver, server, xrn, [cred], expiration_time, options)
-        # 'and' the results
-        results = [ReturnValue.get_value(result) for result in threads.get_results()]
-        return reduce (lambda x,y: x and y, results , True)
-    
+            threads.run(_RenewSliver, aggregate, server, xrn, [cred], expiration_time, options)
+
+        results = threads.get_results()
+
+        geni_code = 0
+        geni_output = ",".join([x.get("output","") for x in results])
+        geni_value = reduce (lambda x,y: x and y, [result.get("value",False) for result in results], True)
+        for agg_result in results:
+            agg_geni_code = agg_result["code"].get("geni_code",0)
+            if agg_geni_code:
+                geni_code = agg_geni_code
+
+        results = {"aggregates": results, "code": {"geni_code": geni_code}, "value": geni_value, "output": geni_output}
+
+        return results
+
     def DeleteSliver(self, api, xrn, creds, options):
-        call_id = options.get('call_id') 
+        call_id = options.get('call_id')
         if Callids().already_handled(call_id): return ""
 
         def _DeleteSliver(server, xrn, creds, options):
@@ -293,7 +316,7 @@ class SliceManager:
         # get the callers hrn
         valid_cred = api.auth.checkCredentials(creds, 'deletesliver', hrn)[0]
         caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
-    
+
         # attempt to use delegated credential first
         cred = api.getDelegatedCredential(creds)
         if not cred:
diff --git a/sfa/openstack/__init__.py b/sfa/openstack/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sfa/openstack/openstack_driver.py b/sfa/openstack/openstack_driver.py
new file mode 100644 (file)
index 0000000..67c5fed
--- /dev/null
@@ -0,0 +1,466 @@
+import time
+import datetime
+#
+from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
+    RecordNotFound, SfaNotImplemented, SliverDoesNotExist
+from sfa.util.sfalogging import logger
+from sfa.util.defaultdict import defaultdict
+from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
+from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf, urn_to_sliver_id
+from sfa.util.cache import Cache
+# one would think the driver should not need to mess with the SFA db, but..
+from sfa.storage.table import SfaTable
+# used to be used in get_ticket
+#from sfa.trust.sfaticket import SfaTicket
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec
+# the driver interface, mostly provides default behaviours
+from sfa.managers.driver import Driver
+from sfa.openstack.openstack_shell import OpenstackShell
+import sfa.plc.peers as peers
+from sfa.plc.plaggregate import PlAggregate
+from sfa.plc.plslices import PlSlices
+from sfa.util.plxrn import slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename, hrn_to_pl_login_base
+
+
+def list_to_dict(recs, key):
+    """
+    convert a list of dictionaries into a dictionary keyed on the 
+    specified dictionary key 
+    """
+    return dict ( [ (rec[key],rec) for rec in recs ] )
+
+#
+# PlShell is just an xmlrpc serverproxy where methods
+# can be sent as-is; it takes care of authentication
+# from the global config
+# 
+class OpenstackDriver (Driver):
+
+    # the cache instance is a class member so it survives across incoming requests
+    cache = None
+
+    def __init__ (self, config):
+        Driver.__init__ (self, config)
+        self.shell = OpenstackShell (config)
+        self.cache=None
+        if config.SFA_AGGREGATE_CACHING:
+            if OpenstackDriver.cache is None:
+                OpenstackDriver.cache = Cache()
+            self.cache = OpenstackDriver.cache
+    ########################################
+    ########## registry oriented
+    ########################################
+
+    ########## disabled users 
+    def is_enabled (self, record):
+        # all records are enabled
+        return True
+
+    def augment_records_with_testbed_info (self, sfa_records):
+        return self.fill_record_info (sfa_records)
+
+    ########## 
+    def register (self, sfa_record, hrn, pub_key):
+        type = sfa_record['type']
+        pl_record = self.sfa_fields_to_pl_fields(type, hrn, sfa_record)
+
+        if type == 'slice':
+            acceptable_fields=['url', 'instantiation', 'name', 'description']
+            # add slice description, name, researchers, PI 
+            pass
+
+        elif type == 'user':
+            # add person roles, projects and keys
+            pass
+        return pointer
+        
+    ##########
+    # xxx actually old_sfa_record comes filled with plc stuff as well in the original code
+    def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
+        pointer = old_sfa_record['pointer']
+        type = old_sfa_record['type']
+
+        # new_key implemented for users only
+        if new_key and type not in [ 'user' ]:
+            raise UnknownSfaType(type)
+
+        elif type == "slice":
+            # can update description, researchers and PI
+            pass 
+        elif type == "user":
+            # can update  slices, keys and roles
+            pass
+        return True
+        
+
+    ##########
+    def remove (self, sfa_record):
+        type=sfa_record['type']
+        name = Xrn(sfa_record['hrn']).get_leaf()     
+        if type == 'user':
+            if self.shell.user_get(name):
+                self.shell.user_delete(name)
+        elif type == 'slice':
+            if self.shell.project_get(name):
+                self.shell.project_delete(name)
+        return True
+
+
+    ####################
+    def fill_record_info(self, records):
+        """
+        Given a (list of) SFA record, fill in the PLC specific 
+        and SFA specific fields in the record. 
+        """
+        if not isinstance(records, list):
+            records = [records]
+
+        for record in records:
+            name = Xrn(record['hrn']).get_leaf()
+            os_record = None
+            if record['type'] == 'user':
+                os_record = self.shell.user_get(name)
+                record['slices'] = [self.hrn + "." + proj.name for \
+                                    proj in os_record.projects]
+                record['roles'] = [role for role in os_record.roles]
+                keys = self.shell.key_pair_get_all_by_user(name)
+                record['keys'] = [key.public_key for key in keys]     
+            elif record['type'] == 'slice': 
+                os_record = self.shell.project_get(name)
+                record['description'] = os_record.description
+                record['PI'] = self.hrn + "." + os_record.project_manager
+                record['geni_creator'] = record['PI'] 
+                record['researcher'] = [self.hrn + "." + user.name for \
+                                         user in os_record.members]
+            else:
+                continue
+            record['geni_urn'] = hrn_to_urn(record['hrn'], record['type'])
+            record['geni_certificate'] = record['gid'] 
+            record['name'] = os_record.name
+            if os_record.created_at is not None:    
+                record['date_created'] = datetime_to_string(utcparse(os_record.created_at))
+            if os_record.updated_at is not None:
+                record['last_updated'] = datetime_to_string(utcparse(os_record.updated_at))
+        return records
+
+
+    ####################
+    # plcapi works by changes, compute what needs to be added/deleted
+    def update_relation (self, subject_type, target_type, subject_id, target_ids):
+        # hard-wire the code for slice/user for now, could be smarter if needed
+        if subject_type =='slice' and target_type == 'user':
+            subject=self.shell.project_get(subject_id)[0]
+            current_target_ids = [user.name for user in subject.members]
+            add_target_ids = list ( set (target_ids).difference(current_target_ids))
+            del_target_ids = list ( set (current_target_ids).difference(target_ids))
+            logger.debug ("subject_id = %s (type=%s)"%(subject_id,type(subject_id)))
+            for target_id in add_target_ids:
+                self.shell.project_add_member(target_id,subject_id)
+                logger.debug ("add_target_id = %s (type=%s)"%(target_id,type(target_id)))
+            for target_id in del_target_ids:
+                logger.debug ("del_target_id = %s (type=%s)"%(target_id,type(target_id)))
+                self.shell.project_remove_member(target_id, subject_id)
+        else:
+            logger.info('unexpected relation to maintain, %s -> %s'%(subject_type,target_type))
+
+        
+    ########################################
+    ########## aggregate oriented
+    ########################################
+
+    def testbed_name (self): return "openstack"
+
+    # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
+    def aggregate_version (self):
+        version_manager = VersionManager()
+        ad_rspec_versions = []
+        request_rspec_versions = []
+        for rspec_version in version_manager.versions:
+            if rspec_version.content_type in ['*', 'ad']:
+                ad_rspec_versions.append(rspec_version.to_dict())
+            if rspec_version.content_type in ['*', 'request']:
+                request_rspec_versions.append(rspec_version.to_dict()) 
+        return {
+            'testbed':self.testbed_name(),
+            'geni_request_rspec_versions': request_rspec_versions,
+            'geni_ad_rspec_versions': ad_rspec_versions,
+            }
+
+    def list_slices (self, creds, options):
+        # look in cache first
+        if self.cache:
+            slices = self.cache.get('slices')
+            if slices:
+                logger.debug("PlDriver.list_slices returns from cache")
+                return slices
+    
+        # get data from db 
+        slices = self.shell.GetSlices({'peer_id': None}, ['name'])
+        slice_hrns = [slicename_to_hrn(self.hrn, slice['name']) for slice in slices]
+        slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
+    
+        # cache the result
+        if self.cache:
+            logger.debug ("PlDriver.list_slices stores value in cache")
+            self.cache.add('slices', slice_urns) 
+    
+        return slice_urns
+        
+    # first 2 args are None in case of resource discovery
+    def list_resources (self, slice_urn, slice_hrn, creds, options):
+        cached_requested = options.get('cached', True) 
+    
+        version_manager = VersionManager()
+        # get the rspec's return format from options
+        rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
+        version_string = "rspec_%s" % (rspec_version)
+    
+        #panos adding the info option to the caching key (can be improved)
+        if options.get('info'):
+            version_string = version_string + "_"+options.get('info', 'default')
+    
+        # look in cache first
+        if cached_requested and self.cache and not slice_hrn:
+            rspec = self.cache.get(version_string)
+            if rspec:
+                logger.debug("PlDriver.ListResources: returning cached advertisement")
+                return rspec 
+    
+        #panos: passing user-defined options
+        #print "manager options = ",options
+        aggregate = PlAggregate(self)
+        rspec =  aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version, 
+                                     options=options)
+    
+        # cache the result
+        if self.cache and not slice_hrn:
+            logger.debug("PlDriver.ListResources: stores advertisement in cache")
+            self.cache.add(version_string, rspec)
+    
+        return rspec
+    
+    def sliver_status (self, slice_urn, slice_hrn):
+        # find out where this slice is currently running
+        slicename = hrn_to_pl_slicename(slice_hrn)
+        
+        slices = self.shell.GetSlices([slicename], ['slice_id', 'node_ids','person_ids','name','expires'])
+        if len(slices) == 0:        
+            raise SliverDoesNotExist("%s (used %s as slicename internally)" % (slice_hrn, slicename))
+        slice = slices[0]
+        
+        # report about the local nodes only
+        nodes = self.shell.GetNodes({'node_id':slice['node_ids'],'peer_id':None},
+                              ['node_id', 'hostname', 'site_id', 'boot_state', 'last_contact'])
+
+        if len(nodes) == 0:
+            raise SliverDoesNotExist("You have not allocated any slivers here") 
+
+        site_ids = [node['site_id'] for node in nodes]
+    
+        result = {}
+        top_level_status = 'unknown'
+        if nodes:
+            top_level_status = 'ready'
+        result['geni_urn'] = slice_urn
+        result['pl_login'] = slice['name']
+        result['pl_expires'] = datetime_to_string(utcparse(slice['expires']))
+        
+        resources = []
+        for node in nodes:
+            res = {}
+            res['pl_hostname'] = node['hostname']
+            res['pl_boot_state'] = node['boot_state']
+            res['pl_last_contact'] = node['last_contact']
+            if node['last_contact'] is not None:
+                
+                res['pl_last_contact'] = datetime_to_string(utcparse(node['last_contact']))
+            sliver_id = urn_to_sliver_id(slice_urn, slice['slice_id'], node['node_id']) 
+            res['geni_urn'] = sliver_id
+            if node['boot_state'] == 'boot':
+                res['geni_status'] = 'ready'
+            else:
+                res['geni_status'] = 'failed'
+                top_level_status = 'failed' 
+                
+            res['geni_error'] = ''
+    
+            resources.append(res)
+            
+        result['geni_status'] = top_level_status
+        result['geni_resources'] = resources
+        return result
+
+    def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
+
+        aggregate = PlAggregate(self)
+        slices = PlSlices(self)
+        peer = slices.get_peer(slice_hrn)
+        sfa_peer = slices.get_sfa_peer(slice_hrn)
+        slice_record=None    
+        if users:
+            slice_record = users[0].get('slice_record', {})
+    
+        # parse rspec
+        rspec = RSpec(rspec_string)
+        requested_attributes = rspec.version.get_slice_attributes()
+        
+        # ensure site record exists
+        site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer, options=options)
+        # ensure slice record exists
+        slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer, options=options)
+        # ensure person records exists
+        persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer, options=options)
+        # ensure slice attributes exists
+        slices.verify_slice_attributes(slice, requested_attributes, options=options)
+        
+        # add/remove slice from nodes
+        requested_slivers = [node.get('component_name') for node in rspec.version.get_nodes_with_slivers()]
+        nodes = slices.verify_slice_nodes(slice, requested_slivers, peer) 
+   
+        # add/remove links links 
+        slices.verify_slice_links(slice, rspec.version.get_link_requests(), nodes)
+    
+        # handle MyPLC peer association.
+        # only used by plc and ple.
+        slices.handle_peer(site, slice, persons, peer)
+        
+        return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
+
+    def delete_sliver (self, slice_urn, slice_hrn, creds, options):
+        slicename = hrn_to_pl_slicename(slice_hrn)
+        slices = self.shell.GetSlices({'name': slicename})
+        if not slices:
+            return 1
+        slice = slices[0]
+    
+        # determine if this is a peer slice
+        # xxx I wonder if this would not need to use PlSlices.get_peer instead 
+        # in which case plc.peers could be deprecated as this here
+        # is the only/last call to this last method in plc.peers
+        peer = peers.get_peer(self, slice_hrn)
+        try:
+            if peer:
+                self.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
+            self.shell.DeleteSliceFromNodes(slicename, slice['node_ids'])
+        finally:
+            if peer:
+                self.shell.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
+        return 1
+    
+    def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
+        slicename = hrn_to_pl_slicename(slice_hrn)
+        slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
+        if not slices:
+            raise RecordNotFound(slice_hrn)
+        slice = slices[0]
+        requested_time = utcparse(expiration_time)
+        record = {'expires': int(datetime_to_epoch(requested_time))}
+        try:
+            self.shell.UpdateSlice(slice['slice_id'], record)
+            return True
+        except:
+            return False
+
+    # remove the 'enabled' tag 
+    def start_slice (self, slice_urn, slice_hrn, creds):
+        slicename = hrn_to_pl_slicename(slice_hrn)
+        slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
+        if not slices:
+            raise RecordNotFound(slice_hrn)
+        slice_id = slices[0]['slice_id']
+        slice_tags = self.shell.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'}, ['slice_tag_id'])
+        # just remove the tag if it exists
+        if slice_tags:
+            self.shell.DeleteSliceTag(slice_tags[0]['slice_tag_id'])
+        return 1
+
+    # set the 'enabled' tag to 0
+    def stop_slice (self, slice_urn, slice_hrn, creds):
+        slicename = hrn_to_pl_slicename(slice_hrn)
+        slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
+        if not slices:
+            raise RecordNotFound(slice_hrn)
+        slice_id = slices[0]['slice_id']
+        slice_tags = self.shell.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'})
+        if not slice_tags:
+            self.shell.AddSliceTag(slice_id, 'enabled', '0')
+        elif slice_tags[0]['value'] != "0":
+            tag_id = slice_tags[0]['slice_tag_id']
+            self.shell.UpdateSliceTag(tag_id, '0')
+        return 1
+    
+    def reset_slice (self, slice_urn, slice_hrn, creds):
+        raise SfaNotImplemented ("reset_slice not available at this interface")
+    
+    # xxx this code is quite old and has not run for ages
+    # it is obviously totally broken and needs a rewrite
+    def get_ticket (self, slice_urn, slice_hrn, creds, rspec_string, options):
+        raise SfaNotImplemented,"PlDriver.get_ticket needs a rewrite"
+# please keep this code for future reference
+#        slices = PlSlices(self)
+#        peer = slices.get_peer(slice_hrn)
+#        sfa_peer = slices.get_sfa_peer(slice_hrn)
+#    
+#        # get the slice record
+#        credential = api.getCredential()
+#        interface = api.registries[api.hrn]
+#        registry = api.server_proxy(interface, credential)
+#        records = registry.Resolve(xrn, credential)
+#    
+#        # make sure we get a local slice record
+#        record = None
+#        for tmp_record in records:
+#            if tmp_record['type'] == 'slice' and \
+#               not tmp_record['peer_authority']:
+#    #Error (E0602, GetTicket): Undefined variable 'SliceRecord'
+#                slice_record = SliceRecord(dict=tmp_record)
+#        if not record:
+#            raise RecordNotFound(slice_hrn)
+#        
+#        # similar to CreateSliver, we must verify that the required records exist
+#        # at this aggregate before we can issue a ticket
+#        # parse rspec
+#        rspec = RSpec(rspec_string)
+#        requested_attributes = rspec.version.get_slice_attributes()
+#    
+#        # ensure site record exists
+#        site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer)
+#        # ensure slice record exists
+#        slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer)
+#        # ensure person records exists
+#    # xxx users is undefined in this context
+#        persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer)
+#        # ensure slice attributes exists
+#        slices.verify_slice_attributes(slice, requested_attributes)
+#        
+#        # get sliver info
+#        slivers = slices.get_slivers(slice_hrn)
+#    
+#        if not slivers:
+#            raise SliverDoesNotExist(slice_hrn)
+#    
+#        # get initscripts
+#        initscripts = []
+#        data = {
+#            'timestamp': int(time.time()),
+#            'initscripts': initscripts,
+#            'slivers': slivers
+#        }
+#    
+#        # create the ticket
+#        object_gid = record.get_gid_object()
+#        new_ticket = SfaTicket(subject = object_gid.get_subject())
+#        new_ticket.set_gid_caller(api.auth.client_gid)
+#        new_ticket.set_gid_object(object_gid)
+#        new_ticket.set_issuer(key=api.key, subject=self.hrn)
+#        new_ticket.set_pubkey(object_gid.get_pubkey())
+#        new_ticket.set_attributes(data)
+#        new_ticket.set_rspec(rspec)
+#        #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
+#        new_ticket.encode()
+#        new_ticket.sign()
+#    
+#        return new_ticket.save_to_string(save_parents=True)
diff --git a/sfa/openstack/openstack_shell.py b/sfa/openstack/openstack_shell.py
new file mode 100644 (file)
index 0000000..d2c19fb
--- /dev/null
@@ -0,0 +1,69 @@
+import sys
+import xmlrpclib
+import socket
+from urlparse import urlparse
+
+from sfa.util.sfalogging import logger
+
+class OpenstackShell:
+    """
+    A simple xmlrpc shell to a myplc instance
+    This class can receive all Openstack calls to the underlying testbed
+    """
+    
+    # dont care about limiting calls yet 
+    direct_calls = []
+    alias_calls = {}
+
+
+    # use the 'capability' auth mechanism for higher performance when the PLC db is local    
+    def __init__ ( self, config ) :
+        url = config.SFA_PLC_URL
+        # try to figure if the url is local
+        hostname=urlparse(url).hostname
+        is_local=False
+        if hostname == 'localhost': is_local=True
+        # otherwise compare IP addresses; 
+        # this might fail for any number of reasons, so let's harden that
+        try:
+            # xxx todo this seems to result in a DNS request for each incoming request to the AM
+            # should be cached or improved
+            url_ip=socket.gethostbyname(hostname)
+            local_ip=socket.gethostbyname(socket.gethostname())
+            if url_ip==local_ip: is_local=True
+        except:
+            pass
+
+
+        # Openstack provides a RESTful api but it is very limited, so we will
+        # ignore it for now and always use the native openstack (nova) library.
+        # This of course will not work if sfa is not installed on the same machine
+        # as the openstack-compute package.   
+        if is_local:
+            try:
+                from nova.auth.manager import AuthManager, db, context
+                direct_access=True
+            except:
+                direct_access=False
+        if is_local and direct_access:
+            
+            logger.debug('openstack access - native')
+            self.auth = context.get_admin_context()
+            # AuthManager isnt' really useful for much yet but it's
+            # more convenient to use than the db reference which requires
+            # a context. Lets hold onto the AuthManager reference for now.
+            #self.proxy = AuthManager()
+            self.auth_manager = AuthManager()
+            self.proxy = db
+
+        else:
+            self.auth = None
+            self.proxy = None
+            logger.debug('openstack access - REST')
+            raise SfaNotImplemented('openstack access - Rest')
+
+    def __getattr__(self, name):
+        def func(*args, **kwds):
+            result=getattr(self.proxy, name)(self.auth, *args, **kwds)
+            return result
+        return func
index 3919f7c..1cace7a 100644 (file)
@@ -1,6 +1,6 @@
 #!/usr/bin/python
 from sfa.util.xrn import Xrn, hrn_to_urn, urn_to_hrn, urn_to_sliver_id
-from sfa.util.sfatime import epochparse
+from sfa.util.sfatime import utcparse, datetime_to_string
 from sfa.util.sfalogging import logger
 
 from sfa.rspecs.rspec import RSpec
@@ -127,7 +127,8 @@ class PlAggregate:
             # most likely a default/global sliver attribute (node_id == None)
             if tag['node_id'] not in slivers:
                 sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, slice['slice_id'], ""),
-                                 'name': 'plab-vserver',
+                                 'name': slice['name'],
+                                 'type': 'plab-vserver',
                                  'tags': []})
                 slivers[tag['node_id']] = sliver
             slivers[tag['node_id']]['tags'].append(tag)
@@ -183,10 +184,12 @@ class PlAggregate:
             rspec_node['component_name'] = node['hostname']
             rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
             rspec_node['authority_id'] = hrn_to_urn(PlXrn.site_hrn(self.driver.hrn, site['login_base']), 'authority+sa')
-            rspec_node['boot_state'] = node['boot_state']
-            rspec_node['exclusive'] = 'False'
-            rspec_node['hardware_types']= [HardwareType({'name': 'plab-pc'}),
-                                           HardwareType({'name': 'pc'})]
+            # do not include boot state (<available> element) in the manifest rspec
+            if not slice:     
+                rspec_node['boot_state'] = node['boot_state']
+            rspec_node['exclusive'] = 'false'
+            rspec_node['hardware_types'] = [HardwareType({'name': 'plab-pc'}),
+                                            HardwareType({'name': 'pc'})]
             # only doing this because protogeni rspec needs
             # to advertise available initscripts 
             rspec_node['pl_initscripts'] = pl_initscripts.values()
@@ -194,7 +197,7 @@ class PlAggregate:
             # assumes that sites, interfaces and tags have already been prepared.
             site = sites_dict[node['site_id']]
             if site['longitude'] and site['latitude']:  
-                location = Location({'longitude': site['longitude'], 'latitude': site['latitude']})
+                location = Location({'longitude': site['longitude'], 'latitude': site['latitude'], 'country': 'unknown'})
                 rspec_node['location'] = location
             rspec_node['interfaces'] = []
             if_count=0
@@ -219,7 +222,7 @@ class PlAggregate:
                 rspec_node['slivers'] = [sliver]
                 
                 # slivers always provide the ssh service
-                login = Login({'authentication': 'ssh-keys', 'hostname': node['hostname'], 'port':'22'})
+                login = Login({'authentication': 'ssh-keys', 'hostname': node['hostname'], 'port':'22', 'username': sliver['name']})
                 service = Services({'login': login})
                 rspec_node['services'] = [service]
             rspec_nodes.append(rspec_node)
@@ -238,7 +241,7 @@ class PlAggregate:
         slice, slivers = self.get_slice_and_slivers(slice_xrn)
         rspec = RSpec(version=rspec_version, user_options=options)
         if slice and 'expires' in slice:
-            rspec.xml.set('expires',  epochparse(slice['expires']))
+            rspec.xml.set('expires',  datetime_to_string(utcparse(slice['expires'])))
 
         nodes, links = self.get_nodes_and_links(slice, slivers)
         rspec.version.add_nodes(nodes)
index fa97183..3f01e7f 100644 (file)
@@ -6,7 +6,7 @@ from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
 
 from sfa.util.sfalogging import logger
 from sfa.util.defaultdict import defaultdict
-from sfa.util.sfatime import utcparse
+from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
 from sfa.util.xrn import hrn_to_urn, get_leaf, urn_to_sliver_id
 from sfa.util.cache import Cache
 
@@ -224,8 +224,10 @@ class PlDriver (Driver):
                pl_record["url"] = sfa_record["url"]
            if "description" in sfa_record:
                pl_record["description"] = sfa_record["description"]
-           if "expires" in sfa_record:
-               pl_record["expires"] = int(sfa_record["expires"])
+        if "expires" in sfa_record:
+            date = utcparse(sfa_record['expires'])
+            expires = datetime_to_epoch(date)
+            pl_record["expires"] = expires
 
         elif type == "node":
             if not "hostname" in pl_record:
@@ -400,6 +402,11 @@ class PlDriver (Driver):
                                if site_id in sites]
                 site_hrns = [".".join([auth_hrn, lbase]) for lbase in login_bases]
                 record['sites'] = site_hrns
+
+            if 'expires' in record:
+                date = utcparse(record['expires'])
+                datestring = datetime_to_string(date)
+                record['expires'] = datestring 
             
         return records   
 
@@ -618,6 +625,10 @@ class PlDriver (Driver):
         # report about the local nodes only
         nodes = self.shell.GetNodes({'node_id':slice['node_ids'],'peer_id':None},
                               ['node_id', 'hostname', 'site_id', 'boot_state', 'last_contact'])
+
+        if len(nodes) == 0:
+            raise SliverDoesNotExist("You have not allocated any slivers here") 
+
         site_ids = [node['site_id'] for node in nodes]
     
         result = {}
@@ -626,7 +637,7 @@ class PlDriver (Driver):
             top_level_status = 'ready'
         result['geni_urn'] = slice_urn
         result['pl_login'] = slice['name']
-        result['pl_expires'] = datetime.datetime.fromtimestamp(slice['expires']).ctime()
+        result['pl_expires'] = datetime_to_string(utcparse(slice['expires']))
         
         resources = []
         for node in nodes:
@@ -635,7 +646,8 @@ class PlDriver (Driver):
             res['pl_boot_state'] = node['boot_state']
             res['pl_last_contact'] = node['last_contact']
             if node['last_contact'] is not None:
-                res['pl_last_contact'] = datetime.datetime.fromtimestamp(node['last_contact']).ctime()
+                
+                res['pl_last_contact'] = datetime_to_string(utcparse(node['last_contact']))
             sliver_id = urn_to_sliver_id(slice_urn, slice['slice_id'], node['node_id']) 
             res['geni_urn'] = sliver_id
             if node['boot_state'] == 'boot':
@@ -716,7 +728,7 @@ class PlDriver (Driver):
             raise RecordNotFound(slice_hrn)
         slice = slices[0]
         requested_time = utcparse(expiration_time)
-        record = {'expires': int(time.mktime(requested_time.timetuple()))}
+        record = {'expires': int(datetime_to_epoch(requested_time))}
         try:
             self.shell.UpdateSlice(slice['slice_id'], record)
             return True
index d40e0b2..d2cd9cd 100644 (file)
@@ -81,6 +81,6 @@ class PlShell:
             if not actual_name:
                 raise Exception, "Illegal method call %s for PL driver"%(name)
             result=getattr(self.proxy, actual_name)(self.plauth, *args, **kwds)
-            logger.debug('%s (%s) returned ... '%(name,actual_name))
+            logger.debug('PlShell %s (%s) returned ... '%(name,actual_name))
             return result
         return func
index 4a88e22..2937d72 100644 (file)
@@ -2,13 +2,12 @@ from types import StringTypes
 from collections import defaultdict
 import sys
 
+from sfa.util.sfatime import utcparse, datetime_to_epoch
 from sfa.util.sfalogging import logger
 from sfa.util.xrn import Xrn, get_leaf, get_authority, urn_to_hrn
 #from sfa.util.policy import Policy
-from sfa.util.xrn import Xrn
-
+from sfa.util.plxrn import PlXrn
 from sfa.rspecs.rspec import RSpec
-
 from sfa.plc.vlink import VLink
 from sfa.util.plxrn import hrn_to_pl_slicename
 
@@ -344,8 +343,10 @@ class PlSlices:
                 # unbind from peer so we can modify if necessary. Will bind back later
                 self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
                #Update existing record (e.g. expires field) it with the latest info.
-            if slice_record and slice['expires'] != slice_record['expires']:
-                self.driver.shell.UpdateSlice( slice['slice_id'], {'expires' : slice_record['expires']})
+            if slice_record.get('expires'):
+                requested_expires = int(datetime_to_epoch(utcparse(slice_record['expires'])))
+                if requested_expires and slice['expires'] != requested_expires:
+                    self.driver.shell.UpdateSlice( slice['slice_id'], {'expires' : requested_expires})
        
         return slice
 
@@ -357,7 +358,7 @@ class PlSlices:
         for user in users:
             hrn, type = urn_to_hrn(user['urn'])
             username = get_leaf(hrn)
-            login_base = get_leaf(get_authority(user['urn']))
+            login_base = PlXrn(xrn=user['urn']).pl_login_base()
             user['username'] = username
             user['site'] = login_base
 
@@ -409,6 +410,7 @@ class PlSlices:
                                 if login_base == site['login_base'] and \
                                    existing_user['email'].startswith(requested_user['username']+'@'):
                                     existing_user_ids.append(existing_user['email'])
+                                    requested_user['email'] = existing_user['email']
                                     users_dict[existing_user['email']] = requested_user
                                     user_found = True
                                     break
@@ -417,6 +419,7 @@ class PlSlices:
       
                     if user_found == False:
                         fake_email = requested_user['username'] + '@geni.net'
+                        requested_user['email'] = fake_email
                         users_dict[fake_email] = requested_user
                 
         # requested slice users        
@@ -440,7 +443,7 @@ class PlSlices:
             for removed_user_id in removed_user_ids:
                 self.driver.shell.DeletePersonFromSlice(removed_user_id, slice_record['name'])
         # update_existing users
-        updated_users_list = [user for user in existing_slice_users if user['email'] in \
+        updated_users_list = [user for user in users_dict.values() if user['email'] in \
           updated_user_ids]
         self.verify_keys(existing_slice_users, updated_users_list, peer, options)
 
@@ -593,7 +596,7 @@ class PlSlices:
                 self.driver.shell.DeleteSliceTag(attribute['slice_tag_id'])
             except Exception, e:
                 logger.warn('Failed to remove sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
-                                % (name, value,  node_id, str(e)))
+                                % (slice['name'], attribute['value'],  attribute.get('node_id'), str(e)))
 
         # add requested_attributes
         for attribute in added_slice_attributes:
@@ -601,5 +604,5 @@ class PlSlices:
                 self.driver.shell.AddSliceTag(slice['name'], attribute['name'], attribute['value'], attribute.get('node_id', None))
             except Exception, e:
                 logger.warn('Failed to add sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
-                                % (name, value,  node_id, str(e)))
+                                % (slice['name'], attribute['value'],  attribute.get('node_id'), str(e)))
 
index 4e38729..0883d39 100644 (file)
@@ -21,7 +21,7 @@ class BaseVersion:
             'version': self.version,
             'schema': self.schema,
             'namespace': self.namespace,
-            'extensions': self.extensions
+            'extensions': self.extensions.values()
         }
 
     def __str__(self):
index ae42641..99dc5c3 100644 (file)
@@ -4,5 +4,6 @@ class Login(Element):
     fields = [
         'authentication',
         'hostname',
-        'port'
+        'port',
+        'username'
     ]
index c37275a..cae644d 100644 (file)
@@ -34,12 +34,13 @@ class PGv2Node:
             # set interfaces
             if node.get('interfaces'):
                 for interface in  node.get('interfaces', []):
-                    node_elem.add_instance('interface', interface, ['component_id', 'client_id', 'ipv4'])
+                    node_elem.add_instance('interface', interface, ['component_id', 'client_id'])
             # set available element
-            if node.get('boot_state') and node.get('boot_state').lower() == 'boot':
-                available_elem = node_elem.add_element('available', now='True')
-            else:
-                available_elem = node_elem.add_element('available', now='False')
+            if node.get('boot_state'):
+                if node.get('boot_state').lower() == 'boot':
+                    available_elem = node_elem.add_element('available', now='true')
+                else:
+                    available_elem = node_elem.add_element('available', now='false')
             # add services
             PGv2Services.add_services(node_elem, node.get('services', [])) 
             # add slivers
@@ -78,8 +79,8 @@ class PGv2Node:
                 node['authority_id'] = Xrn(node_elem.attrib['component_id']).get_authority_urn()
             
             # get hardware types
-            hardware_type_elems = node_elem.xpath('./default:hardwate_type | ./hardware_type')
-            node['hardware_types'] = [hw_type.get_instnace(HardwareType) for hw_type in hardware_type_elems]
+            hardware_type_elems = node_elem.xpath('./default:hardware_type | ./hardware_type')
+            node['hardware_types'] = [hw_type.get_instance(HardwareType) for hw_type in hardware_type_elems]
             
             # get location
             location_elems = node_elem.xpath('./default:location | ./location')
index 94a9f63..ffa4b41 100644 (file)
@@ -15,18 +15,19 @@ class PGv2SliverType:
                 sliver_elem.set('name', sliver['type'])
             if sliver.get('client_id'):
                 sliver_elem.set('client_id', sliver['client_id'])  
-            PGv2SliverType.add_sliver_attributes(sliver_elem, sliver.get('pl_tags', []))
+            PGv2SliverType.add_sliver_attributes(sliver_elem, sliver.get('tags', []))
     
     @staticmethod
     def add_sliver_attributes(xml, attributes):
-        for attribute in attributes:
-            if attribute['name'] == 'initscript':
-                xml.add_element('{%s}initscript' % xml.namespaces['planetlab'], name=attribute['value'])
-            elif tag['tagname'] == 'flack_info':
-                attrib_elem = xml.add_element('{%s}info' % self.namespaces['flack'])
-                attrib_dict = eval(tag['value'])
-                for (key, value) in attrib_dict.items():
-                    attrib_elem.set(key, value)                
+        if attributes: 
+            for attribute in attributes:
+                if attribute['name'] == 'initscript':
+                    xml.add_element('{%s}initscript' % xml.namespaces['planetlab'], name=attribute['value'])
+                elif tag['tagname'] == 'flack_info':
+                    attrib_elem = xml.add_element('{%s}info' % self.namespaces['flack'])
+                    attrib_dict = eval(tag['value'])
+                    for (key, value) in attrib_dict.items():
+                        attrib_elem.set(key, value)                
     @staticmethod
     def get_slivers(xml, filter={}):
         xpath = './default:sliver_type | ./sliver_type'
index 753192d..fdf1eb2 100644 (file)
@@ -24,7 +24,7 @@ class SFAv1Node:
             network_elem = network_elems[0]
         elif len(nodes) > 0 and nodes[0].get('component_manager_id'):
             network_urn = nodes[0]['component_manager_id']
-            network_elem = xml.add_element('network', name = Xrn(network_urn).get_hrn())     
+            network_elem = xml.add_element('network', name = Xrn(network_urn).get_hrn())
         else:
             network_elem = xml
 
@@ -135,7 +135,10 @@ class SFAv1Node:
             # get slivers
             node['slivers'] = SFAv1Sliver.get_slivers(node_elem)
             # get tags
-            node['tags'] =  SFAv1PLTag.get_pl_tags(node_elem, ignore=Node.fields)
+            node['tags'] =  SFAv1PLTag.get_pl_tags(node_elem, ignore=Node.fields+["hardware_type"])
+            # get hardware types
+            hardware_type_elems = node_elem.xpath('./default:hardware_type | ./hardware_type')
+            node['hardware_types'] = [hw_type.get_instance(HardwareType) for hw_type in hardware_type_elems]
 
             # temporary... play nice with old slice manager rspec
             if not node['component_name']:
index 5cccf97..b39184a 100755 (executable)
@@ -51,32 +51,39 @@ class PGRSpecConverter:
 
         # get network
         networks = pg_rspec.version.get_networks()
-        network_hrn = networks[0]
+        network_hrn = networks[0]["name"]
         network_element = sfa_rspec.xml.add_element('network', name=network_hrn, id=network_hrn)
         
         # get nodes
-        pg_nodes_elements = pg_rspec.version.get_node_elements()
+        pg_nodes_elements = pg_rspec.version.get_nodes()
         nodes_with_slivers = pg_rspec.version.get_nodes_with_slivers()
         i = 1
-        for pg_node_element in pg_nodes_elements:
-            attribs = dict(pg_node_element.attrib.items()) 
+        for pg_node in pg_nodes_elements:
+            attribs = dict(pg_node.items())
             attribs['id'] = 'n'+str(i)
             
             node_element = network_element.add_element('node')
             for attrib in attribs:
-                node_element.set(attrib, attribs[attrib])
-            urn = pg_node_element.xpath('@component_id', namespaces=pg_rspec.namespaces)
+                if type(attribs[attrib]) == str:
+                    node_element.set(attrib, attribs[attrib])
+            urn = pg_node["component_id"]
             if urn:
-                urn = urn[0]
+                if type(urn)==list:
+                    # legacy code, not sure if urn is ever a list...
+                    urn = urn[0]
                 hostname = Xrn.urn_split(urn)[-1]
                 hostname_element = node_element.add_element('hostname')
                 hostname_element.set_text(hostname)
                 if hostname in nodes_with_slivers:
-                    node_element.add_element('sliver')  
+                    node_element.add_element('sliver')
+
+            for hardware_type in pg_node["hardware_types"]:
+                if "name" in hardware_type:
+                    node_element.add_element("hardware_type", name=hardware_type["name"])
                      
             # just copy over remaining child elements  
-            for child in pg_node_element.getchildren():
-                node_element.append(transform(child).getroot())
+            #for child in pg_node_element.getchildren():
+            #    node_element.append(transform(child).getroot())
             i = i+1
  
         return sfa_rspec.toxml()
index f8bbe80..b957e51 100755 (executable)
@@ -22,7 +22,7 @@ class RSpec:
         elif version:
             self.create(version)
         else:
-            raise InvalidRSpec("No RSpec or version sepcified. Must specify a valid rspec string or a valid version") 
+            raise InvalidRSpec("No RSpec or version specified. Must specify a valid rspec string or a valid version") 
     def create(self, version=None):
         """
         Create root element
index 3e7500c..5d7358f 100644 (file)
@@ -13,7 +13,7 @@ class VersionManager:
         versions_path = path + os.sep + 'versions'
         versions_module_path = 'sfa.rspecs.versions'
         valid_module = lambda x: os.path.isfile(os.sep.join([versions_path, x])) \
-                        and not x.endswith('.pyc') and x not in ['__init__.py']
+                        and x.endswith('.py') and x !=  '__init__.py'
         files = [f for f in os.listdir(versions_path) if valid_module(f)]
         for filename in files:
             basename = filename.split('.')[0]
@@ -69,11 +69,23 @@ class VersionManager:
             raise InvalidRSpec("Unkwnown RSpec schema: %s" % schema)
         return retval
 
+def show_by_string(string):
+    try:
+        print v.get_version(string)
+    except Exception,e:
+        print e
+def show_by_schema(string):
+    try:
+        print v.get_version_by_schema(string)
+    except Exception,e:
+        print e
+
 if __name__ == '__main__':
     v = VersionManager()
     print v.versions
-    print v.get_version('sfa 1') 
-    print v.get_version('protogeni 2') 
-    print v.get_version('protogeni 2 advertisement') 
-    print v.get_version_by_schema('http://www.protogeni.net/resources/rspec/2/ad.xsd') 
+    show_by_string('sfa 1') 
+    show_by_string('protogeni 2') 
+    show_by_string('protogeni 2 advertisement') 
+    show_by_schema('http://www.protogeni.net/resources/rspec/2/ad.xsd') 
+    show_by_schema('http://sorch.netmode.ntua.gr/ws/RSpec/ad.xsd')
 
index b52b5f0..186e101 100644 (file)
@@ -27,7 +27,7 @@ class PGv2(BaseVersion):
             if 'component_manager_id' in node.attrib:
                 network_urn = node.get('component_manager_id')
                 if network_urn.startswith("urn:"):
-                    network_hrn = Xrn(network_urn).get_hrn()[0]
+                    network_hrn = Xrn(network_urn).get_hrn()
                 else:
                     # some component_manager_ids are hrns instead of urns??
                     network_hrn = network_urn
@@ -120,10 +120,18 @@ class PGv2(BaseVersion):
             
             if not requested_sliver_type:
                 continue
-            sliver = {'name': requested_sliver_type,
+            sliver = {'type': requested_sliver_type,
                      'pl_tags': attributes}
 
-            # remove existing sliver_type tags
+            # remove available element
+            for available_elem in node_elem.xpath('./default:available | ./available'):
+                node_elem.remove(available_elem)
+            
+            # remove interface elements
+            for interface_elem in node_elem.xpath('./default:interface | ./interface'):
+                node_elem.remove(interface_elem)
+        
+            # remove existing sliver_type elements
             for sliver_type in node_elem.get('slivers', []):
                 node_elem.element.remove(sliver_type.element)
 
index 3fe60e5..6bedec6 100644 (file)
@@ -1,6 +1,6 @@
 from sfa.rspecs.versions.pgv2 import PGv2
 
-class PGv3(PGv2):
+class GENIv3(PGv2):
     type = 'GENI'
     content_type = 'ad'
     version = '3'
@@ -14,21 +14,21 @@ class PGv3(PGv2):
     elements = []
 
 
-class PGv3Ad(PGv3):
+class GENIv3Ad(GENIv3):
     enabled = True
     content_type = 'ad'
     schema = 'http://www.geni.net/resources/rspec/3/ad.xsd'
     template = '<rspec type="advertisement" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.geni.net/resources/rspec/3" xmlns:flack="http://www.protogeni.net/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/ad.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
 
-class PGv3Request(PGv3):
+class GENIv3Request(GENIv3):
     enabled = True
     content_type = 'request'
     schema = 'http://www.geni.net/resources/rspec/3/request.xsd'
     template = '<rspec type="request" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.geni.net/resources/rspec/3" xmlns:flack="http://www.protogeni.net/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/request.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
 
-class PGv2Manifest(PGv3):
+class GENIv2Manifest(GENIv3):
     enabled = True
     content_type = 'manifest'
     schema = 'http://www.geni.net/resources/rspec/3/manifest.xsd'
-    template = '<rspec type="manifest" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.geni.net/resources/rspec/3" xmlns:flack="http://www.protogeni.net/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/ad.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+    template = '<rspec type="manifest" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.geni.net/resources/rspec/3" xmlns:flack="http://www.protogeni.net/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/manifest.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
      
index ec27971..39bbac5 100644 (file)
@@ -93,6 +93,7 @@ class SFAv1(BaseVersion):
         attributes = []
         nodes_with_slivers = self.get_nodes_with_slivers()
         for default_attribute in self.get_default_sliver_attributes(network):
+            attribute = default_attribute.copy()
             attribute['node_id'] = None
             attributes.append(attribute)
         for node in nodes_with_slivers:
@@ -194,6 +195,9 @@ class SFAv1(BaseVersion):
         Merge contents for specified rspec with current rspec
         """
 
+        if not in_rspec:
+            return
+
         from sfa.rspecs.rspec import RSpec
         if isinstance(in_rspec, RSpec):
             rspec = in_rspec
index c18452b..ad5cbe1 100644 (file)
@@ -15,8 +15,6 @@ from sfa.server.xmlrpcapi import XmlrpcApi
 
 from sfa.client.return_value import ReturnValue
 
-# thgen xxx fixme this is wrong all right, but temporary, will use generic
-from sfa.storage.table import SfaTable
 
 ####################
 class SfaApi (XmlrpcApi): 
@@ -93,9 +91,9 @@ class SfaApi (XmlrpcApi):
         return server
                
         
-    def getCredential(self):
+    def getCredential(self, minimumExpiration=0):
         """
-        Return a valid credential for this interface. 
+        Return a valid credential for this interface.
         """
         type = 'authority'
         path = self.config.SFA_DATA_DIR
@@ -106,7 +104,7 @@ class SfaApi (XmlrpcApi):
             cred = Credential(filename = cred_filename)
             # make sure cred isnt expired
             if not cred.get_expiration or \
-               datetime.datetime.utcnow() < cred.get_expiration():    
+               datetime.datetime.utcnow() + datetime.timedelta(seconds=minimumExpiration) < cred.get_expiration():
                 return cred.save_to_string(save_parents=True)
 
         # get a new credential
@@ -163,7 +161,8 @@ class SfaApi (XmlrpcApi):
             auth_hrn = hrn
         auth_info = self.auth.get_auth_info(auth_hrn)
         # xxx thgen fixme - use SfaTable hardwired for now 
-        #table = self.SfaTable()
+        # thgen xxx fixme this is wrong all right, but temporary, will use generic
+        from sfa.storage.table import SfaTable
         table = SfaTable()
         records = table.findObjects({'hrn': hrn, 'type': 'authority+sa'})
         if not records:
@@ -238,6 +237,7 @@ class SfaApi (XmlrpcApi):
 
     def prepare_response_v2_am(self, result):
         response = {
+            'geni_api': 2,             
             'code': self.get_geni_code(result),
             'value': self.get_geni_value(result),
             'output': self.get_geni_output(result),
index 46c6699..f9f807e 100644 (file)
@@ -5,13 +5,12 @@
 ##
 
 from types import StringTypes
-
 from sfa.trust.gid import GID
-
 from sfa.storage.parameter import Parameter
 from sfa.util.xrn import get_authority
 from sfa.storage.row import Row
 from sfa.util.xml import XML 
+from sfa.util.sfalogging import logger
 
 class SfaRecord(Row):
     """ 
@@ -33,14 +32,13 @@ class SfaRecord(Row):
     of different types.
     """
 
-    table_name = 'sfa'
-    
-    primary_key = 'record_id'
+#    table_name = 'sfa'
+#    primary_key = 'record_id'
 
     ### the wsdl generator assumes this is named 'fields'
     internal_fields = {
-        'record_id': Parameter(int, 'An id that uniquely identifies this record', ro=True),
-        'pointer': Parameter(int, 'An id that uniquely identifies this record in an external database ')
+        'record_id': Parameter(int, "An id that uniquely identifies this record", ro=True),
+        'pointer': Parameter(int, "An id that uniquely identifies this record in an external database")
     }
 
     fields = {
@@ -49,8 +47,8 @@ class SfaRecord(Row):
         'hrn': Parameter(str, "Human readable name of object"),
         'gid': Parameter(str, "GID of the object"),
         'type': Parameter(str, "Record type"),
-        'last_updated': Parameter(int, 'Date and time of last update', ro=True),
-        'date_created': Parameter(int, 'Date and time this record was created', ro=True),
+        'last_updated': Parameter(int, "Date and time of last update", ro=True),
+        'date_created': Parameter(int, "Date and time this record was created", ro=True),
     }
     all_fields = dict(fields.items() + internal_fields.items())
     ##
@@ -62,13 +60,15 @@ class SfaRecord(Row):
     # @param pointer is a pointer to a PLC record
     # @param dict if !=None, then fill in this record from the dictionary
 
-    def __init__(self, hrn=None, gid=None, type=None, pointer=None, peer_authority=None, dict=None, string=None):
+    def __init__(self, hrn=None, gid=None, type=None, pointer=None, authority=None, 
+                 peer_authority=None, dict=None, string=None):
         self.dirty = True
         self.hrn = None
         self.gid = None
         self.type = None
         self.pointer = None
         self.set_peer_auth(peer_authority)
+        self.set_authority(authority)
         if hrn:
             self.set_name(hrn)
         if gid:
@@ -112,6 +112,17 @@ class SfaRecord(Row):
         self['hrn'] = hrn
         self.dirty = True
 
+    def set_authority(self, authority):
+        """
+        Set the authority
+        """
+        if not authority:
+            authority = ""
+        self.authority = authority
+        self['authority'] = authority
+        self.dirty = True    
+        
+
     ##
     # Set the GID of the record
     #
@@ -302,9 +313,9 @@ class SfaRecord(Row):
         """
         recorddict = self.as_dict()
         filteredDict = dict([(key, val) for (key, val) in recorddict.iteritems() if key in self.fields.keys()])
-        record = XML('<record/>')
-        record.parse_dict(filteredDict)
-        str = record.toxml()
+        xml_record = XML('<record/>')
+        xml_record.parse_dict(filteredDict)
+        str = xml_record.toxml()
         return str
 
     ##
@@ -318,8 +329,8 @@ class SfaRecord(Row):
         """
         #dict = xmlrpclib.loads(str)[0][0]
 
-        record = XML(str)
-        self.load_from_dict(record.todict())
+        xml_record = XML(str)
+        self.load_from_dict(xml_record.todict())
 
     ##
     # Dump the record to stdout
@@ -353,9 +364,54 @@ class SfaRecord(Row):
                 else:    
                     print "     %s: %s" % (key, self[key])
     
+    def summary_string(self):
+        return "Record(record_id=%s, hrn=%s, type=%s, authority=%s, pointer=%s)" % \
+                (self.get('record_id'), self.get('hrn'), self.get('type'), self.get('authority'), \
+                 self.get('pointer'))
+
     def getdict(self):
         return dict(self)
-    
+   
+    def sync(self):
+        """ 
+        Sync this record with the database.
+        """ 
+        from sfa.storage.table import SfaTable
+        table = SfaTable()
+        filter = {}
+        if self.get('record_id'):
+            filter['record_id'] = self.get('record_id')
+        if self.get('hrn') and self.get('type'):
+            filter['hrn'] = self.get('hrn') 
+            filter['type'] = self.get('type')
+            if self.get('pointer'):
+                filter['pointer'] = self.get('pointer')
+        existing_records = table.find(filter)
+        if not existing_records:
+            table.insert(self)
+        else:
+            existing_record = existing_records[0]
+            self['record_id'] = existing_record['record_id']
+            table.update(self) 
+
+    def delete(self):
+        """
+        Remove record from the database.
+        """
+        from sfa.storage.table import SfaTable
+        table = SfaTable()
+        filter = {}
+        if self.get('record_id'):
+            filter['record_id'] = self.get('record_id')
+        if self.get('hrn') and self.get('type'):
+            filter['hrn'] = self.get('hrn')
+            filter['type'] = self.get('type')
+            if self.get('pointer'):
+                filter['pointer'] = self.get('pointer')
+        if filter:
+            existing_records = table.find(filter)
+            for record in existing_records:
+                table.remove(record)
 
 class UserRecord(SfaRecord):
 
@@ -385,7 +441,6 @@ class NodeRecord(SfaRecord):
     fields = {
         'hostname': Parameter(str, 'This nodes dns name'),
         'node_type': Parameter(str, 'Type of node this is'),
-        'node_type': Parameter(str, 'Type of node this is'),
         'latitude': Parameter(str, 'latitude'),
         'longitude': Parameter(str, 'longitude'),
         }
index 129f514..ef66844 100644 (file)
@@ -55,3 +55,4 @@ class Row(dict):
             raise SfaInvalidArgument, "'%s' not in the future" % human
 
         return human
+
index f0a2d71..595812b 100644 (file)
@@ -89,7 +89,7 @@ def convert_public_key(key):
 
     # we can only convert rsa keys
     if "ssh-dss" in key:
-        return None
+        raise Exception, "keyconvert: dss keys are not supported"  
 
     (ssh_f, ssh_fn) = tempfile.mkstemp()
     ssl_fn = tempfile.mktemp()
@@ -103,20 +103,22 @@ def convert_public_key(key):
     # that it can be expected to see why it failed.
     # TODO: for production, cleanup the temporary files
     if not os.path.exists(ssl_fn):
-        return None
+        raise Exception, "keyconvert: generated certificate not found. keyconvert may have failed." 
 
     k = Keypair()
     try:
         k.load_pubkey_from_file(ssl_fn)
+        return k
     except:
         logger.log_exc("convert_public_key caught exception")
-        k = None
+        raise
+    finally:
+        # remove the temporary files
+        if os.path.exists(ssh_fn):
+            os.remove(ssh_fn)
+        if os.path.exists(ssl_fn):
+            os.remove(ssl_fn)
 
-    # remove the temporary files
-    os.remove(ssh_fn)
-    os.remove(ssl_fn)
-
-    return k
 
 ##
 # Public-private key pairs are implemented by the Keypair class.
index 7f34757..733884e 100644 (file)
@@ -51,7 +51,7 @@ from sfa.trust.gid import GID
 from sfa.util.xrn import urn_to_hrn, hrn_authfor_hrn
 
 # 2 weeks, in seconds 
-DEFAULT_CREDENTIAL_LIFETIME = 86400 * 14
+DEFAULT_CREDENTIAL_LIFETIME = 86400 * 31
 
 
 # TODO:
index c5c6a55..34f4dce 100644 (file)
@@ -5,13 +5,22 @@ import time
 
 from sfa.util.sfalogging import logger
 
+DATEFORMAT = "%Y-%m-%dT%H:%M:%SZ"
+
 def utcparse(input):
     """ Translate a string into a time using dateutil.parser.parse but make sure it's in UTC time and strip
 the timezone, so that it's compatible with normal datetime.datetime objects.
 
 For safety this can also handle inputs that are either timestamps, or datetimes
 """
-    
+    # perpare the input for the checks below by
+    # casting strings ('1327098335') to ints
+    if isinstance(input, StringTypes):
+        try:
+            input = int(input)
+        except ValueError:
+            pass
+          
     if isinstance (input, datetime.datetime):
         logger.warn ("argument to utcparse already a datetime - doing nothing")
         return input
@@ -20,10 +29,19 @@ For safety this can also handle inputs that are either timestamps, or datetimes
         if t.utcoffset() is not None:
             t = t.utcoffset() + t.replace(tzinfo=None)
         return t
-    elif isinstance (input, (int,float)):
+    elif isinstance (input, (int,float,long)):
         return datetime.datetime.fromtimestamp(input)
     else:
         logger.error("Unexpected type in utcparse [%s]"%type(input))
 
-def epochparse(input):
-    return time.strftime("%Y-%d-%m-T%H:%M:%SZ", time.localtime(input)) 
+def datetime_to_string(input):
+    return datetime.datetime.strftime(input, DATEFORMAT)
+
+def datetime_to_utc(input):
+    return time.gmtime(datetime_to_epoch(input))    
+
+def datetime_to_epoch(input):
+    return int(time.mktime(input.timetuple()))
+
+
+
index 90693f2..d438e24 100755 (executable)
@@ -198,8 +198,9 @@ class XML:
         # set schema
         for key in self.root.attrib.keys():
             if key.endswith('schemaLocation'):
-                # schema location should be at the end of the list
-                schema_parts  = self.root.attrib[key].split(' ')
+                # schemaLocation should be at the end of the list.
+                # Use list comprehension to filter out empty strings 
+                schema_parts  = [x for x in self.root.attrib[key].split(' ') if x]
                 self.schema = schema_parts[1]    
                 namespace, schema  = schema_parts[0], schema_parts[1]
                 break