fix merge problem
authorsmbaker <smbaker@fc8clean.lan>
Tue, 8 Nov 2011 03:13:04 +0000 (19:13 -0800)
committersmbaker <smbaker@fc8clean.lan>
Tue, 8 Nov 2011 03:13:04 +0000 (19:13 -0800)
108 files changed:
Makefile
config/topology [new file with mode: 0644]
setup.py
sfa.spec
sfa/client/Makefile
sfa/client/sfascan.py
sfa/client/sfi.py
sfa/client/xmlrpcprotocol.py [moved from sfa/util/xmlrpcprotocol.py with 98% similarity]
sfa/generic/__init__.py
sfa/generic/architecture.txt [new file with mode: 0644]
sfa/generic/pl.py
sfa/generic/plcm.py [deleted file]
sfa/importer/__init__.py [new file with mode: 0644]
sfa/importer/sfa-import-plc.py [moved from sfa/plc/sfa-import-plc.py with 99% similarity]
sfa/importer/sfa-nuke-plc.py [moved from sfa/plc/sfa-nuke-plc.py with 100% similarity]
sfa/importer/sfaImport.py [moved from sfa/plc/sfaImport.py with 100% similarity]
sfa/managers/aggregate_manager.py
sfa/managers/aggregate_manager_eucalyptus.py
sfa/managers/aggregate_manager_max.py
sfa/managers/aggregate_manager_openflow.py [deleted file]
sfa/managers/component_manager_pl.py
sfa/managers/managerwrapper.py
sfa/managers/registry_manager.py
sfa/managers/slice_manager.py
sfa/managers/vini/utils.py
sfa/managers/vini/vini_network.py
sfa/methods/CreateGid.py
sfa/methods/CreateSliver.py
sfa/methods/DeleteSliver.py
sfa/methods/GetCredential.py
sfa/methods/GetGids.py
sfa/methods/GetSelfCredential.py
sfa/methods/GetTicket.py
sfa/methods/GetVersion.py
sfa/methods/List.py
sfa/methods/ListResources.py
sfa/methods/ListSlices.py
sfa/methods/RedeemTicket.py
sfa/methods/Register.py
sfa/methods/Remove.py
sfa/methods/RemovePeerObject.py
sfa/methods/RenewSliver.py
sfa/methods/Resolve.py
sfa/methods/SliverStatus.py
sfa/methods/Start.py
sfa/methods/Stop.py
sfa/methods/Update.py
sfa/methods/get_key.py
sfa/methods/remove_peer_object.py
sfa/methods/reset_slice.py
sfa/plc/aggregate.py
sfa/plc/network.py
sfa/plc/nodemanager.py [moved from sfa/util/nodemanager.py with 100% similarity]
sfa/plc/peers.py
sfa/plc/plcomponentdriver.py [moved from sfa/plc/plccomponentapi.py with 79% similarity]
sfa/plc/pldriver.py [moved from sfa/plc/plcsfaapi.py with 72% similarity]
sfa/plc/plshell.py [new file with mode: 0644]
sfa/plc/slices.py
sfa/rspecs/elements/bwlimit.py [new file with mode: 0644]
sfa/rspecs/elements/component_manager.py [new file with mode: 0644]
sfa/rspecs/elements/disk_image.py [new file with mode: 0644]
sfa/rspecs/elements/element.py
sfa/rspecs/elements/execute.py [new file with mode: 0644]
sfa/rspecs/elements/hardware_type.py [new file with mode: 0644]
sfa/rspecs/elements/install.py [new file with mode: 0644]
sfa/rspecs/elements/interface.py
sfa/rspecs/elements/link.py
sfa/rspecs/elements/link_type.py [new file with mode: 0644]
sfa/rspecs/elements/location.py [new file with mode: 0644]
sfa/rspecs/elements/login.py [new file with mode: 0644]
sfa/rspecs/elements/network.py
sfa/rspecs/elements/node.py
sfa/rspecs/elements/pl_tag.py [new file with mode: 0644]
sfa/rspecs/elements/property.py [new file with mode: 0644]
sfa/rspecs/elements/services.py [new file with mode: 0644]
sfa/rspecs/elements/sliver.py
sfa/rspecs/elements/tag.py [new file with mode: 0644]
sfa/rspecs/elements/versions/element_version.py [new file with mode: 0644]
sfa/rspecs/elements/versions/pgv2Link.py
sfa/rspecs/elements/versions/pgv2Node.py [new file with mode: 0644]
sfa/rspecs/elements/versions/pgv2Services.py [new file with mode: 0644]
sfa/rspecs/elements/versions/pgv2SliverType.py [new file with mode: 0644]
sfa/rspecs/elements/versions/sfav1Network.py [new file with mode: 0644]
sfa/rspecs/elements/versions/sfav1Node.py [new file with mode: 0644]
sfa/rspecs/elements/versions/sfav1Sliver.py [new file with mode: 0644]
sfa/rspecs/rspec_elements.py
sfa/server/aggregate.py
sfa/server/component.py
sfa/server/interface.py
sfa/server/modpython/SfaAggregateModPython.py
sfa/server/modpython/SfaRegistryModPython.py
sfa/server/modpython/SfaSliceMgrModPython.py
sfa/server/registry.py
sfa/server/sfa-clean-peer-records.py
sfa/server/sfa-start.py
sfa/server/sfa_component_setup.py
sfa/server/sfaapi.py
sfa/server/threadmanager.py [moved from sfa/util/threadmanager.py with 100% similarity]
sfa/util/config.py
sfa/util/defaultdict.py [new file with mode: 0644]
sfa/util/record.py
sfa/util/rspecHelper.py [deleted file]
sfa/util/sfalogging.py
sfa/util/soapprotocol.py [deleted file]
sfa/util/ssl_socket.py [deleted file]
sfa/util/topology.py [new file with mode: 0644]
sfa/util/xml.py
tests/testInterfaces.py

index 04ed4ba..72fa41a 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -15,10 +15,13 @@ uninstall: python-uninstall tests-uninstall
 
 .PHONY: all install clean uninstall
 
-VERSIONTAG=0.0-0-should.be-redefined-by-specfile
+##########
+rpmversion:=$(shell rpm -q --specfile sfa.spec --queryformat="%{version}\n" | head -1)
+# somehow %{taglevel} is empty, turns out %{release} has what we want
+rpmtaglevel:=$(shell rpm -q --specfile sfa.spec --queryformat="%{release}\n" 2> /dev/null | head -1)
+VERSIONTAG=$(rpmversion)-$(rpmtaglevel)
 SCMURL=should-be-redefined-by-specfile
 
-##########
 python: version
 
 version: sfa/util/version.py
@@ -125,7 +128,7 @@ sfiAddAttribute.py sfiAddSliver.py sfiDeleteAttribute.py sfiDeleteSliver.py sfiL
 sfiListSlivers.py sfadump.py
 
 BINS = ./config/sfa-config-tty ./config/gen-sfa-cm-config.py \
-       ./sfa/plc/sfa-import-plc.py ./sfa/plc/sfa-nuke-plc.py ./sfa/server/sfa-start.py \
+       ./sfa/importer/sfa-import-plc.py ./sfa/importer/sfa-nuke-plc.py ./sfa/server/sfa-start.py \
        $(foreach client,$(CLIENTS),./sfa/client/$(client))
 
 sync:
diff --git a/config/topology b/config/topology
new file mode 100644 (file)
index 0000000..24a8e13
--- /dev/null
@@ -0,0 +1,20 @@
+# Links in the physical topology, gleaned from looking at the Internet2
+# topology map.  Link (a, b) connects sites with IDs a and b.
+#
+# 2 12  # I2 Princeton - New York
+# 11 13 # I2 Chicago - Wash DC
+# 11 15 # I2 Chicago - Atlanta
+# 11 16 # I2 Chicago - CESNET
+# 11 17 # I2 Chicago - Kansas City
+# 12 13 # I2 New York - Wash DC
+# 13 15 # I2 Wash DC - Atlanta
+# 14 15 # Ga Tech - I2 Atlanta
+# 15 19 # I2 Atlanta - Houston
+# 17 19 # I2 Kansas City - Houston
+# 17 22 # I2 Kansas City - Salt Lake City
+# 17 24 # I2 Kansas City - UMKC
+# 19 20 # I2 Houston - Los Angeles
+# 20 21 # I2 Los Angeles - Seattle
+# 20 22 # I2 Los Angeles - Salt Lake City
+# 21 22 # I2 Seattle - Salt Lake City
+
index 61c087e..a9735a0 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -12,8 +12,8 @@ from distutils.core import setup
 bins = [ 
     'config/sfa-config-tty',
     'config/gen-sfa-cm-config.py',
-    'sfa/plc/sfa-import-plc.py', 
-    'sfa/plc/sfa-nuke-plc.py', 
+    'sfa/importer/sfa-import-plc.py', 
+    'sfa/importer/sfa-nuke-plc.py', 
     'sfa/server/sfa-ca.py', 
     'sfa/server/sfa-start.py', 
     'sfa/server/sfa-clean-peer-records.py', 
@@ -43,7 +43,7 @@ package_dirs = [
     'sfa/methods',
     'sfa/generic',
     'sfa/managers',
-    'sfa/managers/vini',
+    'sfa/importer',
     'sfa/plc',
     'sfa/rspecs',
     'sfa/rspecs/elements',
@@ -60,6 +60,7 @@ data_files = [('/etc/sfa/', [ 'config/aggregates.xml',
                               'config/registries.xml',
                               'config/default_config.xml',
                               'config/sfi_config',
+                              'config/topology',
                               'sfa/managers/pl/pl.rng',
                               'sfa/trust/credential.xsd',
                               'sfa/trust/top.xsd',
index c470c49..55b6ec3 100644 (file)
--- a/sfa.spec
+++ b/sfa.spec
@@ -1,6 +1,6 @@
 %define name sfa
 %define version 1.1
-%define taglevel 1
+%define taglevel 2
 
 %define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}}
 %global python_sitearch        %( python -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)" )
@@ -144,6 +144,7 @@ rm -rf $RPM_BUILD_ROOT
 /etc/sfa/sig.xsd
 /etc/sfa/xml.xsd
 /etc/sfa/protogeni-rspec-common.xsd
+/etc/sfa/topology
 %{_bindir}/sfa-config-tty
 %{_bindir}/sfa-import-plc.py*
 %{_bindir}/sfa-clean-peer-records.py*
@@ -196,6 +197,14 @@ fi
 [ "$1" -ge "1" ] && service sfa-cm restart || :
 
 %changelog
+* Mon Nov 07 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-1.1-2
+- checkpoint tag: use SFA_GENERIC_FLAVOUR instead of SFA_*_TYPE
+- improvements in the pgv2 rspecs
+- driver separated from api
+- code starts moving around where it belongs
+- sfascan caches getversion across invokations
+- vini topology extracted as a config file
+
 * Fri Oct 28 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-1.1-1
 - first support for protogeni rspecs is working
 - vini no longer needs a specific manager
index af366fc..061e1b2 100644 (file)
@@ -1,5 +1,6 @@
 # recompute the SFA graphs from different locations
 
+## use -vv for the full story
 SFASCAN = ./sfascan.py -v
 
 # AMs, at least MyPLC AMs, are boring
@@ -40,8 +41,8 @@ BUNDLES-LR += http://www.planet-lab.jp:12347/@auto-plj-sa
 BUNDLES-LR += http://www.emanicslab.org:12345/@auto-elc-reg 
 BUNDLES-LR += http://www.emanicslab.org:12347/@auto-elc-sa
 
-#EXTENSIONS := png svg
-EXTENSIONS := png
+EXTENSIONS := png svg
+#EXTENSIONS := png
 
 ####################
 ALL += $(foreach bundle,$(BUNDLES),$(word 2,$(subst @, ,$(bundle))))
@@ -89,8 +90,9 @@ $(foreach bundle,$(BUNDLES),$(eval $(call bundle_version_target,$(bundle))))
 
 ####################
 clean:
-       rm -f auto-*.{out,version}
+       rm -f .auto*
        $(foreach extension,$(EXTENSIONS),rm -rf auto-*.$(extension);)
+       $(SFASCAN) -c
 
 DATE=$(shell date '+%Y-%m-%d')
 PUBEXTENSIONS=png
index 494a727..f252378 100755 (executable)
@@ -1,6 +1,8 @@
 #!/usr/bin/env python
 
-import sys
+import sys, os.path
+import pickle
+import time
 import socket
 import traceback
 from urlparse import urlparse
@@ -11,7 +13,7 @@ from optparse import OptionParser
 
 from sfa.client.sfi import Sfi
 from sfa.util.sfalogging import logger, DEBUG
-import sfa.util.xmlrpcprotocol as xmlrpcprotocol
+import sfa.client.xmlrpcprotocol as xmlrpcprotocol
 
 def url_hostname_port (url):
     if url.find("://")<0:
@@ -28,11 +30,82 @@ def url_hostname_port (url):
     else:
         return (url,parts[0],parts[1])
 
+### a very simple cache mechanism so that successive runs (see make) 
+### will go *much* faster
+### assuming everything is sequential, as simple as it gets
+### { url -> (timestamp,version)}
+class VersionCache:
+    def __init__ (self, filename=None, expires=60*60):
+        # default is to store cache in the same dir as argv[0]
+        if filename is None:
+            filename=os.path.join(os.path.dirname(sys.argv[0]),"sfascan-version-cache.pickle")
+        self.filename=filename
+        self.expires=expires
+        self.url2version={}
+        self.load()
+
+    def load (self):
+        try:
+            infile=file(self.filename,'r')
+            self.url2version=pickle.load(infile)
+            infile.close()
+        except:
+            logger.debug("Cannot load version cache, restarting from scratch")
+            self.url2version = {}
+        logger.debug("loaded version cache with %d entries %s"%(len(self.url2version),self.url2version.keys()))
+
+    def save (self):
+        try:
+            outfile=file(self.filename,'w')
+            pickle.dump(self.url2version,outfile)
+            outfile.close()
+        except:
+            logger.log_exc ("Cannot save version cache into %s"%self.filename)
+    def clean (self):
+        try:
+            retcod=os.unlink(self.filename)
+            logger.info("Cleaned up version cache %s, retcod=%d"%(self.filename,retcod))
+        except:
+            logger.info ("Could not unlink version cache %s"%self.filename)
+
+    def show (self):
+        entries=len(self.url2version)
+        print "version cache from file %s has %d entries"%(self.filename,entries)
+        key_values=self.url2version.items()
+        def old_first (kv1,kv2): return int(kv1[1][0]-kv2[1][0])
+        key_values.sort(old_first)
+        for key_value in key_values:
+            (url,tuple) = key_value
+            (timestamp,version) = tuple
+            how_old = time.time()-timestamp
+            if how_old<=self.expires:
+                print url,"-- %d seconds ago"%how_old
+            else:
+                print "OUTDATED",url,"(%d seconds ago, expires=%d)"%(how_old,self.expires)
+    
+    # turns out we might have trailing slashes or not
+    def normalize (self, url):
+        return url.strip("/")
+        
+    def set (self,url,version):
+        url=self.normalize(url)
+        self.url2version[url]=( time.time(), version)
+    def get (self,url):
+        url=self.normalize(url)
+        try:
+            (timestamp,version)=self.url2version[url]
+            how_old = time.time()-timestamp
+            if how_old<=self.expires: return version
+            else: return None
+        except:
+            return None
+
 ###
 class Interface:
 
-    def __init__ (self,url):
+    def __init__ (self,url,verbose=False):
         self._url=url
+        self.verbose=verbose
         try:
             (self._url,self.hostname,self.port)=url_hostname_port(url)
             self.ip=socket.gethostbyname(self.hostname)
@@ -54,13 +127,21 @@ class Interface:
 
     # connect to server and trigger GetVersion
     def get_version(self):
+        ### if we already know the answer:
         if self.probed:
             return self._version
+        ### otherwise let's look in the cache file
+        logger.debug("searching in version cache %s"%self.url())
+        cached_version = VersionCache().get(self.url())
+        if cached_version is not None:
+            logger.info("Retrieved version info from cache")
+            return cached_version
+        ### otherwise let's do the hard work
         # dummy to meet Sfi's expectations for its 'options' field
         class DummyOptions:
             pass
         options=DummyOptions()
-        options.verbose=False
+        options.verbose=self.verbose
         options.timeout=10
         try:
             client=Sfi(options)
@@ -68,13 +149,22 @@ class Interface:
             key_file = client.get_key_file()
             cert_file = client.get_cert_file(key_file)
             url=self.url()
-            logger.info('issuing get version at %s'%url)
-            logger.debug("GetVersion, using timeout=%d"%options.timeout)
-            server=xmlrpcprotocol.get_server(url, key_file, cert_file, timeout=options.timeout, verbose=options.verbose)
+            logger.info('issuing GetVersion at %s'%url)
+            # setting timeout here seems to get the call to fail - even though the response time is fast
+            #server=xmlrpcprotocol.server_proxy(url, key_file, cert_file, verbose=self.verbose, timeout=options.timeout)
+            server=xmlrpcprotocol.server_proxy(url, key_file, cert_file, verbose=self.verbose)
             self._version=server.GetVersion()
         except:
+            logger.log_exc("failed to get version")
             self._version={}
+        # so that next run from this process will find out
         self.probed=True
+        # store in version cache so next processes will remember for an hour
+        cache=VersionCache()
+        cache.set(self.url(),self._version)
+        cache.save()
+        logger.debug("Saved version for url=%s in version cache"%self.url())
+        # that's our result
         return self._version
 
     @staticmethod
@@ -155,19 +245,18 @@ class SfaScan:
         while to_scan:
             for interface in to_scan:
                 # performing xmlrpc call
+                logger.info("retrieving/fetching version at interface %s"%interface.url())
                 version=interface.get_version()
-                if self.verbose:
-                    logger.info("GetVersion at interface %s"%interface.url())
-                    if not version:
-                        logger.info("<EMPTY GetVersion(); offline or cannot authenticate>")
-                    else: 
-                        for (k,v) in version.iteritems(): 
-                            if not isinstance(v,dict):
-                                logger.info("\r\t%s:%s"%(k,v))
-                            else:
-                                logger.info(k)
-                                for (k1,v1) in v.iteritems():
-                                    logger.info("\r\t\t%s:%s"%(k1,v1))
+                if not version:
+                    logger.info("<EMPTY GetVersion(); offline or cannot authenticate>")
+                else: 
+                    for (k,v) in version.iteritems(): 
+                        if not isinstance(v,dict):
+                            logger.debug("\r\t%s:%s"%(k,v))
+                        else:
+                            logger.debug(k)
+                            for (k1,v1) in v.iteritems():
+                                logger.debug("\r\t\t%s:%s"%(k1,v1))
                 # 'geni_api' is expected if the call succeeded at all
                 # 'peers' is needed as well as AMs typically don't have peers
                 if 'geni_api' in version and 'peers' in version: 
@@ -206,21 +295,35 @@ def main():
                       help="output filenames (cumulative) - defaults are %r"%default_outfiles)
     parser.add_option("-l","--left-to-right",action="store_true",dest="left_to_right",default=False,
                       help="instead of top-to-bottom")
-    parser.add_option("-v","--verbose",action='store_true',dest='verbose',default=False,
-                      help="verbose")
-    parser.add_option("-d","--debug",action='store_true',dest='debug',default=False,
-                      help="debug")
+    parser.add_option("-v", "--verbose", action="count", dest="verbose", default=0,
+                      help="verbose - can be repeated for more verbosity")
+    parser.add_option("-c", "--clean-cache",action='store_true',
+                      dest='clean_cache',default=False,
+                      help='clean/trash version cache and exit')
+    parser.add_option("-s","--show-cache",action='store_true',
+                      dest='show_cache',default=False,
+                      help='show/display version cache')
+    
     (options,args)=parser.parse_args()
+    logger.enable_console()
+    # apply current verbosity to logger
+    logger.setLevelFromOptVerbose(options.verbose)
+    # figure if we need to be verbose for these local classes that only have a bool flag
+    bool_verbose=logger.getBoolVerboseFromOpt(options.verbose)
+
+    if options.show_cache: 
+        VersionCache().show()
+        sys.exit(0)
+    if options.clean_cache:
+        VersionCache().clean()
+        sys.exit(0)
     if not args:
         parser.print_help()
         sys.exit(1)
+        
     if not options.outfiles:
         options.outfiles=default_outfiles
-    logger.enable_console()
-    if options.debug:
-        options.verbose=True
-        logger.setLevel(DEBUG)
-    scanner=SfaScan(left_to_right=options.left_to_right, verbose=options.verbose)
+    scanner=SfaScan(left_to_right=options.left_to_right, verbose=bool_verbose)
     entries = [ Interface(entry) for entry in args ]
     g=scanner.graph(entries)
     logger.info("creating layout")
index 90fb233..fc6a7b4 100755 (executable)
@@ -23,7 +23,7 @@ from sfa.util.record import SfaRecord, UserRecord, SliceRecord, NodeRecord, Auth
 from sfa.rspecs.rspec import RSpec
 from sfa.rspecs.rspec_converter import RSpecConverter
 from sfa.util.xrn import get_leaf, get_authority, hrn_to_urn
-import sfa.util.xmlrpcprotocol as xmlrpcprotocol
+import sfa.client.xmlrpcprotocol as xmlrpcprotocol
 from sfa.util.config import Config
 from sfa.util.version import version_core
 from sfa.util.cache import Cache
@@ -398,9 +398,9 @@ class Sfi:
        self.cert_file = cert_file
        self.cert = GID(filename=cert_file)
        self.logger.info("Contacting Registry at: %s"%self.reg_url)
-       self.registry = xmlrpcprotocol.get_server(self.reg_url, key_file, cert_file, timeout=self.options.timeout, verbose=self.options.debug)  
+       self.registry = xmlrpcprotocol.server_proxy(self.reg_url, key_file, cert_file, timeout=self.options.timeout, verbose=self.options.debug)  
        self.logger.info("Contacting Slice Manager at: %s"%self.sm_url)
-       self.slicemgr = xmlrpcprotocol.get_server(self.sm_url, key_file, cert_file, timeout=self.options.timeout, verbose=self.options.debug)
+       self.slicemgr = xmlrpcprotocol.server_proxy(self.sm_url, key_file, cert_file, timeout=self.options.timeout, verbose=self.options.debug)
        return
 
     def get_cached_server_version(self, server):
@@ -493,7 +493,7 @@ class Sfi:
             self.logger.info("Getting Registry issued cert")
             self.read_config()
             # *hack.  need to set registyr before _get_gid() is called 
-            self.registry = xmlrpcprotocol.get_server(self.reg_url, key_file, cert_file, timeout=self.options.timeout, verbose=self.options.debug)
+            self.registry = xmlrpcprotocol.server_proxy(self.reg_url, key_file, cert_file, timeout=self.options.timeout, verbose=self.options.debug)
             gid = self._get_gid(type='user')
             self.registry = None 
             self.logger.info("Writing certificate to %s"%cert_file)
@@ -521,7 +521,7 @@ class Sfi:
         if args:
             hrn = args[0]
         gid = self._get_gid(hrn)
-        self.logger.debug("Sfi.get_gid-> %s",gid.save_to_string(save_parents=True))
+        self.logger.debug("Sfi.get_gid-> %s" % gid.save_to_string(save_parents=True))
         return gid
 
     def _get_gid(self, hrn=None, type=None):
@@ -646,7 +646,7 @@ class Sfi:
        return key_string
 
     # xxx opts undefined
-    def get_component_server_from_hrn(self, hrn):
+    def get_component_proxy_from_hrn(self, hrn):
         # direct connection to the nodes component manager interface
         user_cred = self.get_user_cred().save_to_string(save_parents=True)
         records = self.registry.Resolve(hrn, user_cred)
@@ -655,9 +655,9 @@ class Sfi:
             self.logger.warning("No such component:%r"% opts.component)
         record = records[0]
   
-        return self.get_server(record['hostname'], CM_PORT, self.key_file, self.cert_file)
+        return self.server_proxy(record['hostname'], CM_PORT, self.key_file, self.cert_file)
  
-    def get_server(self, host, port, keyfile, certfile):
+    def server_proxy(self, host, port, keyfile, certfile):
         """
         Return an instance of an xmlrpc server connection    
         """
@@ -666,10 +666,10 @@ class Sfi:
         host_parts = host.split('/')
         host_parts[0] = host_parts[0] + ":" + str(port)
         url =  "http://%s" %  "/".join(host_parts)    
-        return xmlrpcprotocol.get_server(url, keyfile, certfile, timeout=self.options.timeout, verbose=self.options.debug)
+        return xmlrpcprotocol.server_proxy(url, keyfile, certfile, timeout=self.options.timeout, verbose=self.options.debug)
 
     # xxx opts could be retrieved in self.options
-    def get_server_from_opts(self, opts):
+    def server_proxy_from_opts(self, opts):
         """
         Return instance of an xmlrpc connection to a slice manager, aggregate
         or component server depending on the specified opts
@@ -677,10 +677,10 @@ class Sfi:
         server = self.slicemgr
         # direct connection to an aggregate
         if hasattr(opts, 'aggregate') and opts.aggregate:
-            server = self.get_server(opts.aggregate, opts.port, self.key_file, self.cert_file)
+            server = self.server_proxy(opts.aggregate, opts.port, self.key_file, self.cert_file)
         # direct connection to the nodes component manager interface
         if hasattr(opts, 'component') and opts.component:
-            server = self.get_component_server_from_hrn(opts.component)    
+            server = self.get_component_proxy_from_hrn(opts.component)    
  
         return server
     #==========================================================================
@@ -911,7 +911,7 @@ class Sfi:
             if opts.version_registry:
                 server=self.registry
             else:
-                server = self.get_server_from_opts(opts)
+                server = self.server_proxy_from_opts(opts)
             version=server.GetVersion()
         for (k,v) in version.iteritems():
             print "%-20s: %s"%(k,v)
@@ -928,7 +928,7 @@ class Sfi:
         if opts.delegate:
             delegated_cred = self.delegate_cred(user_cred, get_authority(self.authority))
             creds.append(delegated_cred)  
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         #results = server.ListSlices(creds, unique_call_id())
         results = server.ListSlices(creds)
         display_list(results)
@@ -939,7 +939,7 @@ class Sfi:
         user_cred = self.get_user_cred().save_to_string(save_parents=True)
         server = self.slicemgr
         call_options = {}
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         
         if args:
             cred = self.get_slice_cred(args[0]).save_to_string(save_parents=True)
@@ -979,7 +979,7 @@ class Sfi:
 
     # created named slice with given rspec
     def create(self, opts, args):
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         server_version = self.get_cached_server_version(server)
         slice_hrn = args[0]
         slice_urn = hrn_to_urn(slice_hrn, 'slice')
@@ -1044,7 +1044,7 @@ class Sfi:
             creds.append(delegated_cred)
         rspec_file = self.get_rspec_file(rspec_path) 
         rspec = open(rspec_file).read()
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         ticket_string = server.GetTicket(slice_urn, creds, rspec, [])
         file = os.path.join(self.options.sfi_dir, get_leaf(slice_hrn) + ".ticket")
         self.logger.info("writing ticket to %s"%file)
@@ -1075,7 +1075,7 @@ class Sfi:
         for hostname in hostnames:
             try:
                 self.logger.info("Calling redeem_ticket at %(hostname)s " % locals())
-                server = self.get_server(hostname, CM_PORT, self.key_file, \
+                server = self.server_proxy(hostname, CM_PORT, self.key_file, \
                                          self.cert_file, self.options.debug)
                 server.RedeemTicket(ticket.save_to_string(save_parents=True), slice_cred)
                 self.logger.info("Success")
@@ -1094,7 +1094,7 @@ class Sfi:
         if opts.delegate:
             delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
             creds.append(delegated_cred)
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
 
         call_args = [slice_urn, creds]
         if self.server_supports_call_id_arg(server):
@@ -1110,7 +1110,7 @@ class Sfi:
         if opts.delegate:
             delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
             creds.append(delegated_cred)
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         return server.Start(slice_urn, creds)
     
     # stop named slice
@@ -1122,14 +1122,14 @@ class Sfi:
         if opts.delegate:
             delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
             creds.append(delegated_cred)
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         return server.Stop(slice_urn, creds)
     
     # reset named slice
     def reset(self, opts, args):
         slice_hrn = args[0]
         slice_urn = hrn_to_urn(slice_hrn, 'slice') 
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         slice_cred = self.get_slice_cred(args[0]).save_to_string(save_parents=True)
         creds = [slice_cred]
         if opts.delegate:
@@ -1140,7 +1140,7 @@ class Sfi:
     def renew(self, opts, args):
         slice_hrn = args[0]
         slice_urn = hrn_to_urn(slice_hrn, 'slice') 
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         slice_cred = self.get_slice_cred(args[0]).save_to_string(save_parents=True)
         creds = [slice_cred]
         if opts.delegate:
@@ -1162,7 +1162,7 @@ class Sfi:
         if opts.delegate:
             delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
             creds.append(delegated_cred)
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         call_args = [slice_urn, creds]
         if self.server_supports_call_id_arg(server):
             call_args.append(unique_call_id())
@@ -1180,7 +1180,7 @@ class Sfi:
         if opts.delegate:
             delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
             creds.append(delegated_cred)
-        server = self.get_server_from_opts(opts)
+        server = self.server_proxy_from_opts(opts)
         return server.Shutdown(slice_urn, creds)         
     
     def print_help (self):
similarity index 98%
rename from sfa/util/xmlrpcprotocol.py
rename to sfa/client/xmlrpcprotocol.py
index 2263b28..bd741a4 100644 (file)
@@ -87,7 +87,7 @@ class XMLRPCServerProxy(xmlrpclib.ServerProxy):
         logger.debug ("xml-rpc %s method:%s"%(self.url,attr))
         return xmlrpclib.ServerProxy.__getattr__(self, attr)
 
-def get_server(url, key_file, cert_file, timeout=None, verbose=False):
+def server_proxy(url, key_file, cert_file, timeout=None, verbose=False):
     transport = XMLRPCTransport(key_file, cert_file, timeout)
     return XMLRPCServerProxy(url, transport, allow_none=True, verbose=verbose)
 
index 843cd7b..ea6ce05 100644 (file)
@@ -58,12 +58,18 @@ class Generic:
     def make_api (self, *args, **kwargs):
         # interface is a required arg
         if not 'interface' in kwargs:
-            logger.fatal("Generic.make_api: no interface found")
+            logger.critical("Generic.make_api: no interface found")
         api = self.api_class()(*args, **kwargs)
-        interface=kwargs['interface']
-        # or simpler, interface=api.interface
-        manager = self.make_manager(interface)
-        api.manager = ManagerWrapper(manager,interface)
+        manager = self.make_manager(api.interface)
+        driver = self.make_driver (api.config, api.interface)
+        ### arrange stuff together
+        # add a manager wrapper
+        manager = ManagerWrapper(manager,api.interface)
+        api.manager=manager
+        # insert driver in manager
+        manager.driver=driver
+        # add it in api as well for convenience
+        api.driver=driver
         return api
 
     def make_manager (self, interface):
@@ -74,31 +80,28 @@ class Generic:
         flavour = self.flavour
         message="Generic.make_manager for interface=%s and flavour=%s"%(interface,flavour)
         
-        classname = "%s_class"%interface
+        classname = "%s_manager_class"%interface
         try:
             module = getattr(self,classname)()
             logger.info("%s : %s"%(message,module))
             return module
         except:
-            logger.log_exc(message)
-            logger.fatal("Aborting")
+            logger.log_exc_critical(message)
+        
+    # need interface to select the right driver
+    def make_driver (self, config, interface):
+        flavour = self.flavour
+        message="Generic.make_driver for flavour=%s and interface=%s"%(flavour,interface)
+        
+        if interface == "component":
+            classname = "component_driver_class"
+        else:
+            classname = "driver_class"
+        try:
+            class_obj = getattr(self,classname)()
+            logger.info("%s : %s"%(message,class_obj))
+            return class_obj(config)
+        except:
+            logger.log_exc_critical(message)
         
-# former logic was
-#        basepath = 'sfa.managers'
-#        qualified = "%s.%s_manager_%s"%(basepath,interface,flavour)
-#        generic = "%s.%s_manager"%(basepath,interface)
-#
-#        try: 
-#            manager = __import__(qualified, fromlist=[basepath])
-#            logger.info ("%s: loaded %s"%(message,qualified))
-#        except:
-#            try:
-#                manager = __import__ (generic, fromlist=[basepath])
-#                if flavour != 'pl' : 
-#                    logger.warn ("%s: using generic with flavour!='pl'"%(message))
-#                logger.info("%s: loaded %s"%(message,generic))
-#            except:
-#                logger.log_exc("%s: unable to import either %s or %s"%(message,qualified,generic))
-#                logger.fatal("Aborted")
-#        return manager
         
diff --git a/sfa/generic/architecture.txt b/sfa/generic/architecture.txt
new file mode 100644 (file)
index 0000000..ff63549
--- /dev/null
@@ -0,0 +1,40 @@
+We identify 3 layers in the server-side aspects:
+
+. api: this object reacts to an incoming SFA request
+
+. manager: this implements a given interface, either registry,
+aggregate, or slicemgr
+
+. driver: this object is in charge of actually talking to the
+underlying testbed
+
+-----
+
+the generic layer is in charge of instantiating these and to link them
+as follows:
+* the classes actually used for creating the 3 elements are
+configurable in a flavour (e.g. sfa.generic.pl.py)
+* which is then configured from sfa-config-tty as SFA_GENERIC_FLAVOUR
+
+* a call to make_api will then create the 3 elements with the
+  following layout:
+
+api.manager 
+manager.driver
+api.driver (for convenience)
+
+------
+example
+
+from sfa.generic import Generic
+generic=Generic.the_flavour()
+-> returns an instance of a Generic object with a flavour from the
+   config; by default it would thus be an instance of sfa.generic.pl
+
+api = generic.make_api (...) returns an instance of the given class
+with the arguments passed as arguments to the constructor
+
+------
+more in sfa/generic/__init__.py
+
+
index 853053d..098a27a 100644 (file)
@@ -1,19 +1,35 @@
 from sfa.generic import Generic
-import sfa.plc.plcsfaapi
+
+import sfa.server.sfaapi
+import sfa.plc.pldriver
 import sfa.managers.registry_manager
 import sfa.managers.slice_manager
 import sfa.managers.aggregate_manager
 
 class pl (Generic):
     
+    # use the standard api class
     def api_class (self):
-        return sfa.plc.plcsfaapi.PlcSfaApi
+        return sfa.server.sfaapi.SfaApi
 
-    def registry_class (self) : 
+    # the manager classes for the server-side services
+    def registry_manager_class (self) : 
         return sfa.managers.registry_manager
-    def slicemgr_class (self) : 
-        return sfa.managers.slice_manager
-    def aggregate_class (self) :
-        return sfa.managers.aggregate_manager
+    def slicemgr_manager_class (self) : 
+        return sfa.managers.slice_manager.SliceManager
+    def aggregate_manager_class (self) :
+        return sfa.managers.aggregate_manager.AggregateManager
+
+    # driver class for server-side services, talk to the whole testbed
+    def driver_class (self):
+        return sfa.plc.pldriver.PlDriver
+
+    # for the component mode, to be run on board planetlab nodes
+    # manager class
+    def component_manager_class (self):
+        return sfa.managers.component_manager_pl
+    # driver_class
+    def component_driver_class (self):
+        return sfa.plc.plcomponentdriver.PlComponentDriver
 
 
diff --git a/sfa/generic/plcm.py b/sfa/generic/plcm.py
deleted file mode 100644 (file)
index dd24d3c..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-from sfa.generic.pl import pl
-import sfa.plc.plccomponentapi
-import sfa.managers.component_manager_pl
-
-class plcm (pl):
-
-    def api_class (self):
-        return sfa.plc.plccomponentapi.PlcComponentApi
-
-    def component_class (self):
-        return sfa.managers.component_manager_pl
diff --git a/sfa/importer/__init__.py b/sfa/importer/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
similarity index 99%
rename from sfa/plc/sfa-import-plc.py
rename to sfa/importer/sfa-import-plc.py
index d57b4b8..6873f48 100755 (executable)
@@ -24,7 +24,7 @@ from sfa.util.plxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_
 from sfa.util.config import Config
 from sfa.util.xrn import Xrn
 
-from sfa.plc.sfaImport import sfaImport
+from sfa.importer.sfaImport import sfaImport
 
 def process_options():
 
index 827b2d9..fb49cb6 100644 (file)
@@ -14,399 +14,378 @@ from sfa.trust.credential import Credential
 from sfa.rspecs.version_manager import VersionManager
 from sfa.rspecs.rspec import RSpec
 
+from sfa.server.sfaapi import SfaApi
+
 import sfa.plc.peers as peers
-from sfa.plc.plcsfaapi import PlcSfaApi
 from sfa.plc.aggregate import Aggregate
 from sfa.plc.slices import Slices
 
-def GetVersion(api):
-
-    version_manager = VersionManager()
-    ad_rspec_versions = []
-    request_rspec_versions = []
-    for rspec_version in version_manager.versions:
-        if rspec_version.content_type in ['*', 'ad']:
-            ad_rspec_versions.append(rspec_version.to_dict())
-        if rspec_version.content_type in ['*', 'request']:
-            request_rspec_versions.append(rspec_version.to_dict()) 
-    default_rspec_version = version_manager.get_version("sfa 1").to_dict()
-    xrn=Xrn(api.hrn)
-    version_more = {'interface':'aggregate',
-                    'testbed':'myplc',
-                    'hrn':xrn.get_hrn(),
-                    'request_rspec_versions': request_rspec_versions,
-                    'ad_rspec_versions': ad_rspec_versions,
-                    'default_ad_rspec': default_rspec_version
-                    }
-    return version_core(version_more)
-
-def __get_registry_objects(slice_xrn, creds, users):
-    """
-
-    """
-    hrn, _ = urn_to_hrn(slice_xrn)
+class AggregateManager:
 
-    hrn_auth = get_authority(hrn)
-
-    # Build up objects that an SFA registry would return if SFA
-    # could contact the slice's registry directly
-    reg_objects = None
-
-    if users:
-        # dont allow special characters in the site login base
-        #only_alphanumeric = re.compile('[^a-zA-Z0-9]+')
-        #login_base = only_alphanumeric.sub('', hrn_auth[:20]).lower()
+    def __init__ (self):
+        # xxx Thierry : caching at the aggregate level sounds wrong...
+        #self.caching=True
+        self.caching=False
+    
+    def GetVersion(self, api):
+    
+        version_manager = VersionManager()
+        ad_rspec_versions = []
+        request_rspec_versions = []
+        for rspec_version in version_manager.versions:
+            if rspec_version.content_type in ['*', 'ad']:
+                ad_rspec_versions.append(rspec_version.to_dict())
+            if rspec_version.content_type in ['*', 'request']:
+                request_rspec_versions.append(rspec_version.to_dict()) 
+        default_rspec_version = version_manager.get_version("sfa 1").to_dict()
+        xrn=Xrn(api.hrn)
+        version_more = {'interface':'aggregate',
+                        'testbed':'myplc',
+                        'hrn':xrn.get_hrn(),
+                        'request_rspec_versions': request_rspec_versions,
+                        'ad_rspec_versions': ad_rspec_versions,
+                        'default_ad_rspec': default_rspec_version
+                        }
+        return version_core(version_more)
+    
+    def _get_registry_objects(self, slice_xrn, creds, users):
+        """
+    
+        """
+        hrn, _ = urn_to_hrn(slice_xrn)
+    
+        hrn_auth = get_authority(hrn)
+    
+        # Build up objects that an SFA registry would return if SFA
+        # could contact the slice's registry directly
+        reg_objects = None
+    
+        if users:
+            # dont allow special characters in the site login base
+            #only_alphanumeric = re.compile('[^a-zA-Z0-9]+')
+            #login_base = only_alphanumeric.sub('', hrn_auth[:20]).lower()
+            slicename = hrn_to_pl_slicename(hrn)
+            login_base = slicename.split('_')[0]
+            reg_objects = {}
+            site = {}
+            site['site_id'] = 0
+            site['name'] = 'geni.%s' % login_base 
+            site['enabled'] = True
+            site['max_slices'] = 100
+    
+            # Note:
+            # Is it okay if this login base is the same as one already at this myplc site?
+            # Do we need uniqueness?  Should use hrn_auth instead of just the leaf perhaps?
+            site['login_base'] = login_base
+            site['abbreviated_name'] = login_base
+            site['max_slivers'] = 1000
+            reg_objects['site'] = site
+    
+            slice = {}
+            
+            # get_expiration always returns a normalized datetime - no need to utcparse
+            extime = Credential(string=creds[0]).get_expiration()
+            # If the expiration time is > 60 days from now, set the expiration time to 60 days from now
+            if extime > datetime.datetime.utcnow() + datetime.timedelta(days=60):
+                extime = datetime.datetime.utcnow() + datetime.timedelta(days=60)
+            slice['expires'] = int(time.mktime(extime.timetuple()))
+            slice['hrn'] = hrn
+            slice['name'] = hrn_to_pl_slicename(hrn)
+            slice['url'] = hrn
+            slice['description'] = hrn
+            slice['pointer'] = 0
+            reg_objects['slice_record'] = slice
+    
+            reg_objects['users'] = {}
+            for user in users:
+                user['key_ids'] = []
+                hrn, _ = urn_to_hrn(user['urn'])
+                user['email'] = hrn_to_pl_slicename(hrn) + "@geni.net"
+                user['first_name'] = hrn
+                user['last_name'] = hrn
+                reg_objects['users'][user['email']] = user
+    
+            return reg_objects
+    
+    def SliverStatus(self, api, slice_xrn, creds, call_id):
+        if Callids().already_handled(call_id): return {}
+    
+        (hrn, _) = urn_to_hrn(slice_xrn)
+        # find out where this slice is currently running
         slicename = hrn_to_pl_slicename(hrn)
-        login_base = slicename.split('_')[0]
-        reg_objects = {}
-        site = {}
-        site['site_id'] = 0
-        site['name'] = 'geni.%s' % login_base 
-        site['enabled'] = True
-        site['max_slices'] = 100
-
-        # Note:
-        # Is it okay if this login base is the same as one already at this myplc site?
-        # Do we need uniqueness?  Should use hrn_auth instead of just the leaf perhaps?
-        site['login_base'] = login_base
-        site['abbreviated_name'] = login_base
-        site['max_slivers'] = 1000
-        reg_objects['site'] = site
-
-        slice = {}
         
-        # get_expiration always returns a normalized datetime - no need to utcparse
-        extime = Credential(string=creds[0]).get_expiration()
-        # If the expiration time is > 60 days from now, set the expiration time to 60 days from now
-        if extime > datetime.datetime.utcnow() + datetime.timedelta(days=60):
-            extime = datetime.datetime.utcnow() + datetime.timedelta(days=60)
-        slice['expires'] = int(time.mktime(extime.timetuple()))
-        slice['hrn'] = hrn
-        slice['name'] = hrn_to_pl_slicename(hrn)
-        slice['url'] = hrn
-        slice['description'] = hrn
-        slice['pointer'] = 0
-        reg_objects['slice_record'] = slice
-
-        reg_objects['users'] = {}
-        for user in users:
-            user['key_ids'] = []
-            hrn, _ = urn_to_hrn(user['urn'])
-            user['email'] = hrn_to_pl_slicename(hrn) + "@geni.net"
-            user['first_name'] = hrn
-            user['last_name'] = hrn
-            reg_objects['users'][user['email']] = user
-
-        return reg_objects
-
-def __get_hostnames(nodes):
-    hostnames = []
-    for node in nodes:
-        hostnames.append(node.hostname)
-    return hostnames
-
-def SliverStatus(api, slice_xrn, creds, call_id):
-    if Callids().already_handled(call_id): return {}
-
-    (hrn, _) = urn_to_hrn(slice_xrn)
-    # find out where this slice is currently running
-    slicename = hrn_to_pl_slicename(hrn)
-    
-    slices = api.plshell.GetSlices(api.plauth, [slicename], ['slice_id', 'node_ids','person_ids','name','expires'])
-    if len(slices) == 0:        
-        raise Exception("Slice %s not found (used %s as slicename internally)" % (slice_xrn, slicename))
-    slice = slices[0]
-    
-    # report about the local nodes only
-    nodes = api.plshell.GetNodes(api.plauth, {'node_id':slice['node_ids'],'peer_id':None},
-                                 ['node_id', 'hostname', 'site_id', 'boot_state', 'last_contact'])
-    site_ids = [node['site_id'] for node in nodes]
-
-    result = {}
-    top_level_status = 'unknown'
-    if nodes:
-        top_level_status = 'ready'
-    slice_urn = Xrn(slice_xrn, 'slice').get_urn()
-    result['geni_urn'] = slice_urn
-    result['pl_login'] = slice['name']
-    result['pl_expires'] = datetime.datetime.fromtimestamp(slice['expires']).ctime()
-    
-    resources = []
-    for node in nodes:
-        res = {}
-        res['pl_hostname'] = node['hostname']
-        res['pl_boot_state'] = node['boot_state']
-        res['pl_last_contact'] = node['last_contact']
-        if node['last_contact'] is not None:
-            res['pl_last_contact'] = datetime.datetime.fromtimestamp(node['last_contact']).ctime()
-        sliver_id = urn_to_sliver_id(slice_urn, slice['slice_id'], node['node_id']) 
-        res['geni_urn'] = sliver_id
-        if node['boot_state'] == 'boot':
-            res['geni_status'] = 'ready'
-        else:
-            res['geni_status'] = 'failed'
-            top_level_status = 'failed' 
+        slices = api.driver.GetSlices([slicename], ['slice_id', 'node_ids','person_ids','name','expires'])
+        if len(slices) == 0:        
+            raise Exception("Slice %s not found (used %s as slicename internally)" % (slice_xrn, slicename))
+        slice = slices[0]
+        
+        # report about the local nodes only
+        nodes = api.driver.GetNodes({'node_id':slice['node_ids'],'peer_id':None},
+                                     ['node_id', 'hostname', 'site_id', 'boot_state', 'last_contact'])
+        site_ids = [node['site_id'] for node in nodes]
+    
+        result = {}
+        top_level_status = 'unknown'
+        if nodes:
+            top_level_status = 'ready'
+        slice_urn = Xrn(slice_xrn, 'slice').get_urn()
+        result['geni_urn'] = slice_urn
+        result['pl_login'] = slice['name']
+        result['pl_expires'] = datetime.datetime.fromtimestamp(slice['expires']).ctime()
+        
+        resources = []
+        for node in nodes:
+            res = {}
+            res['pl_hostname'] = node['hostname']
+            res['pl_boot_state'] = node['boot_state']
+            res['pl_last_contact'] = node['last_contact']
+            if node['last_contact'] is not None:
+                res['pl_last_contact'] = datetime.datetime.fromtimestamp(node['last_contact']).ctime()
+            sliver_id = urn_to_sliver_id(slice_urn, slice['slice_id'], node['node_id']) 
+            res['geni_urn'] = sliver_id
+            if node['boot_state'] == 'boot':
+                res['geni_status'] = 'ready'
+            else:
+                res['geni_status'] = 'failed'
+                top_level_status = 'failed' 
+                
+            res['geni_error'] = ''
+    
+            resources.append(res)
             
-        res['geni_error'] = ''
-
-        resources.append(res)
+        result['geni_status'] = top_level_status
+        result['geni_resources'] = resources
+        return result
+    
+    def CreateSliver(self, api, slice_xrn, creds, rspec_string, users, call_id):
+        """
+        Create the sliver[s] (slice) at this aggregate.    
+        Verify HRN and initialize the slice record in PLC if necessary.
+        """
+        if Callids().already_handled(call_id): return ""
+    
+        aggregate = Aggregate(api)
+        slices = Slices(api)
+        (hrn, _) = urn_to_hrn(slice_xrn)
+        peer = slices.get_peer(hrn)
+        sfa_peer = slices.get_sfa_peer(hrn)
+        slice_record=None    
+        if users:
+            slice_record = users[0].get('slice_record', {})
+    
+        # parse rspec
+        rspec = RSpec(rspec_string)
+        requested_attributes = rspec.version.get_slice_attributes()
         
-    result['geni_status'] = top_level_status
-    result['geni_resources'] = resources
-    return result
-
-def CreateSliver(api, slice_xrn, creds, rspec_string, users, call_id):
-    """
-    Create the sliver[s] (slice) at this aggregate.    
-    Verify HRN and initialize the slice record in PLC if necessary.
-    """
-    if Callids().already_handled(call_id): return ""
-
-    aggregate = Aggregate(api)
-    slices = Slices(api)
-    (hrn, _) = urn_to_hrn(slice_xrn)
-    peer = slices.get_peer(hrn)
-    sfa_peer = slices.get_sfa_peer(hrn)
-    slice_record=None    
-    if users:
-        slice_record = users[0].get('slice_record', {})
-
-    # parse rspec
-    rspec = RSpec(rspec_string)
-    requested_attributes = rspec.version.get_slice_attributes()
-    
-    # ensure site record exists
-    site = slices.verify_site(hrn, slice_record, peer, sfa_peer)
-    # ensure slice record exists
-    slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)
-    # ensure person records exists
-    persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)
-    # ensure slice attributes exists
-    slices.verify_slice_attributes(slice, requested_attributes)
-    
-    # add/remove slice from nodes
-    requested_slivers = [str(host) for host in rspec.version.get_nodes_with_slivers()]
-    slices.verify_slice_nodes(slice, requested_slivers, peer) 
-
-    aggregate.prepare_nodes({'hostname': requested_slivers})
-    aggregate.prepare_interfaces({'node_id': aggregate.nodes.keys()})    
-    slices.verify_slice_links(slice, rspec.version.get_link_requests(), aggregate)
-
-    # hanlde MyPLC peer association.
-    # only used by plc and ple.
-    slices.handle_peer(site, slice, persons, peer)
+        # ensure site record exists
+        site = slices.verify_site(hrn, slice_record, peer, sfa_peer)
+        # ensure slice record exists
+        slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)
+        # ensure person records exists
+        persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)
+        # ensure slice attributes exists
+        slices.verify_slice_attributes(slice, requested_attributes)
+        
+        # add/remove slice from nodes
+        requested_slivers = [str(host) for host in rspec.version.get_nodes_with_slivers()]
+        slices.verify_slice_nodes(slice, requested_slivers, peer) 
+    
+        aggregate.prepare_nodes({'hostname': requested_slivers})
+        aggregate.prepare_interfaces({'node_id': aggregate.nodes.keys()})    
+        slices.verify_slice_links(slice, rspec.version.get_link_requests(), aggregate)
+    
+        # handle MyPLC peer association.
+        # only used by plc and ple.
+        slices.handle_peer(site, slice, persons, peer)
+        
+        return aggregate.get_rspec(slice_xrn=slice_xrn, version=rspec.version)
+    
+    
+    def RenewSliver(self, api, xrn, creds, expiration_time, call_id):
+        if Callids().already_handled(call_id): return True
+        (hrn, _) = urn_to_hrn(xrn)
+        slicename = hrn_to_pl_slicename(hrn)
+        slices = api.driver.GetSlices({'name': slicename}, ['slice_id'])
+        if not slices:
+            raise RecordNotFound(hrn)
+        slice = slices[0]
+        requested_time = utcparse(expiration_time)
+        record = {'expires': int(time.mktime(requested_time.timetuple()))}
+        try:
+            api.driver.UpdateSlice(slice['slice_id'], record)
+            return True
+        except:
+            return False
+    
+    def start_slice(self, api, xrn, creds):
+        (hrn, _) = urn_to_hrn(xrn)
+        slicename = hrn_to_pl_slicename(hrn)
+        slices = api.driver.GetSlices({'name': slicename}, ['slice_id'])
+        if not slices:
+            raise RecordNotFound(hrn)
+        slice_id = slices[0]['slice_id']
+        slice_tags = api.driver.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'}, ['slice_tag_id'])
+        # just remove the tag if it exists
+        if slice_tags:
+            api.driver.DeleteSliceTag(slice_tags[0]['slice_tag_id'])
     
-    return aggregate.get_rspec(slice_xrn=slice_xrn, version=rspec.version)
-
-
-def RenewSliver(api, xrn, creds, expiration_time, call_id):
-    if Callids().already_handled(call_id): return True
-    (hrn, _) = urn_to_hrn(xrn)
-    slicename = hrn_to_pl_slicename(hrn)
-    slices = api.plshell.GetSlices(api.plauth, {'name': slicename}, ['slice_id'])
-    if not slices:
-        raise RecordNotFound(hrn)
-    slice = slices[0]
-    requested_time = utcparse(expiration_time)
-    record = {'expires': int(time.mktime(requested_time.timetuple()))}
-    try:
-        api.plshell.UpdateSlice(api.plauth, slice['slice_id'], record)
-        return True
-    except:
-        return False
-
-def start_slice(api, xrn, creds):
-    (hrn, _) = urn_to_hrn(xrn)
-    slicename = hrn_to_pl_slicename(hrn)
-    slices = api.plshell.GetSlices(api.plauth, {'name': slicename}, ['slice_id'])
-    if not slices:
-        raise RecordNotFound(hrn)
-    slice_id = slices[0]['slice_id']
-    slice_tags = api.plshell.GetSliceTags(api.plauth, {'slice_id': slice_id, 'tagname': 'enabled'}, ['slice_tag_id'])
-    # just remove the tag if it exists
-    if slice_tags:
-        api.plshell.DeleteSliceTag(api.plauth, slice_tags[0]['slice_tag_id'])
-
-    return 1
-def stop_slice(api, xrn, creds):
-    hrn, _ = urn_to_hrn(xrn)
-    slicename = hrn_to_pl_slicename(hrn)
-    slices = api.plshell.GetSlices(api.plauth, {'name': slicename}, ['slice_id'])
-    if not slices:
-        raise RecordNotFound(hrn)
-    slice_id = slices[0]['slice_id']
-    slice_tags = api.plshell.GetSliceTags(api.plauth, {'slice_id': slice_id, 'tagname': 'enabled'})
-    if not slice_tags:
-        api.plshell.AddSliceTag(api.plauth, slice_id, 'enabled', '0')
-    elif slice_tags[0]['value'] != "0":
-        tag_id = slice_tags[0]['slice_tag_id']
-        api.plshell.UpdateSliceTag(api.plauth, tag_id, '0')
-    return 1
-
-def reset_slice(api, xrn):
-    # XX not implemented at this interface
-    return 1
-
-def DeleteSliver(api, xrn, creds, call_id):
-    if Callids().already_handled(call_id): return ""
-    (hrn, _) = urn_to_hrn(xrn)
-    slicename = hrn_to_pl_slicename(hrn)
-    slices = api.plshell.GetSlices(api.plauth, {'name': slicename})
-    if not slices:
         return 1
-    slice = slices[0]
-
-    # determine if this is a peer slice
-    peer = peers.get_peer(api, hrn)
-    try:
-        if peer:
-            api.plshell.UnBindObjectFromPeer(api.plauth, 'slice', slice['slice_id'], peer)
-        api.plshell.DeleteSliceFromNodes(api.plauth, slicename, slice['node_ids'])
-    finally:
-        if peer:
-            api.plshell.BindObjectToPeer(api.plauth, 'slice', slice['slice_id'], peer, slice['peer_slice_id'])
-    return 1
-
-# xxx Thierry : caching at the aggregate level sounds wrong...
-#caching=True
-caching=False
-def ListSlices(api, creds, call_id):
-    if Callids().already_handled(call_id): return []
-    # look in cache first
-    if caching and api.cache:
-        slices = api.cache.get('slices')
-        if slices:
-            return slices
-
-    # get data from db 
-    slices = api.plshell.GetSlices(api.plauth, {'peer_id': None}, ['name'])
-    slice_hrns = [slicename_to_hrn(api.hrn, slice['name']) for slice in slices]
-    slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
-
-    # cache the result
-    if caching and api.cache:
-        api.cache.add('slices', slice_urns) 
-
-    return slice_urns
+     
+    def stop_slice(self, api, xrn, creds):
+        hrn, _ = urn_to_hrn(xrn)
+        slicename = hrn_to_pl_slicename(hrn)
+        slices = api.driver.GetSlices({'name': slicename}, ['slice_id'])
+        if not slices:
+            raise RecordNotFound(hrn)
+        slice_id = slices[0]['slice_id']
+        slice_tags = api.driver.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'})
+        if not slice_tags:
+            api.driver.AddSliceTag(slice_id, 'enabled', '0')
+        elif slice_tags[0]['value'] != "0":
+            tag_id = slice_tags[0]['slice_tag_id']
+            api.driver.UpdateSliceTag(tag_id, '0')
+        return 1
     
-def ListResources(api, creds, options, call_id):
-    if Callids().already_handled(call_id): return ""
-    # get slice's hrn from options
-    xrn = options.get('geni_slice_urn', None)
-    (hrn, _) = urn_to_hrn(xrn)
-
-    version_manager = VersionManager()
-    # get the rspec's return format from options
-    rspec_version = version_manager.get_version(options.get('rspec_version'))
-    version_string = "rspec_%s" % (rspec_version.to_string())
-
-    #panos adding the info option to the caching key (can be improved)
-    if options.get('info'):
-        version_string = version_string + "_"+options.get('info', 'default')
-
-    # look in cache first
-    if caching and api.cache and not xrn:
-        rspec = api.cache.get(version_string)
-        if rspec:
-            api.logger.info("aggregate.ListResources: returning cached value for hrn %s"%hrn)
-            return rspec 
-
-    #panos: passing user-defined options
-    #print "manager options = ",options
-    aggregate = Aggregate(api, options)
-    rspec =  aggregate.get_rspec(slice_xrn=xrn, version=rspec_version)
-
-    # cache the result
-    if caching and api.cache and not xrn:
-        api.cache.add(version_string, rspec)
-
-    return rspec
-
-
-def get_ticket(api, xrn, creds, rspec, users):
-
-    (slice_hrn, _) = urn_to_hrn(xrn)
-    slices = Slices(api)
-    peer = slices.get_peer(slice_hrn)
-    sfa_peer = slices.get_sfa_peer(slice_hrn)
-
-    # get the slice record
-    credential = api.getCredential()
-    interface = api.registries[api.hrn]
-    registry = api.get_server(interface, credential)
-    records = registry.Resolve(xrn, credential)
-
-    # make sure we get a local slice record
-    record = None
-    for tmp_record in records:
-        if tmp_record['type'] == 'slice' and \
-           not tmp_record['peer_authority']:
-#Error (E0602, get_ticket): Undefined variable 'SliceRecord'
-            record = SliceRecord(dict=tmp_record)
-    if not record:
-        raise RecordNotFound(slice_hrn)
-    
-    # similar to CreateSliver, we must verify that the required records exist
-    # at this aggregate before we can issue a ticket
-    # parse rspec
-    rspec = RSpec(rspec_string)
-    requested_attributes = rspec.version.get_slice_attributes()
-
-    # ensure site record exists
-    site = slices.verify_site(hrn, slice_record, peer, sfa_peer)
-    # ensure slice record exists
-    slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)
-    # ensure person records exists
-    persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)
-    # ensure slice attributes exists
-    slices.verify_slice_attributes(slice, requested_attributes)
-    
-    # get sliver info
-    slivers = slices.get_slivers(slice_hrn)
-
-    if not slivers:
-        raise SliverDoesNotExist(slice_hrn)
-
-    # get initscripts
-    initscripts = []
-    data = {
-        'timestamp': int(time.time()),
-        'initscripts': initscripts,
-        'slivers': slivers
-    }
-
-    # create the ticket
-    object_gid = record.get_gid_object()
-    new_ticket = SfaTicket(subject = object_gid.get_subject())
-    new_ticket.set_gid_caller(api.auth.client_gid)
-    new_ticket.set_gid_object(object_gid)
-    new_ticket.set_issuer(key=api.key, subject=api.hrn)
-    new_ticket.set_pubkey(object_gid.get_pubkey())
-    new_ticket.set_attributes(data)
-    new_ticket.set_rspec(rspec)
-    #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
-    new_ticket.encode()
-    new_ticket.sign()
-
-    return new_ticket.save_to_string(save_parents=True)
-
-
-
-def main():
-    """
-    rspec = ListResources(api, "plc.princeton.sapan", None, 'pl_test_sapan')
-    #rspec = ListResources(api, "plc.princeton.coblitz", None, 'pl_test_coblitz')
-    #rspec = ListResources(api, "plc.pl.sirius", None, 'pl_test_sirius')
-    print rspec
-    """
-    api = PlcSfaApi()
-    f = open(sys.argv[1])
-    xml = f.read()
-    f.close()
-#Error (E1120, main): No value passed for parameter 'users' in function call
-#Error (E1120, main): No value passed for parameter 'call_id' in function call
-    CreateSliver(api, "plc.princeton.sapan", xml, 'CreateSliver_sapan')
-
-if __name__ == "__main__":
-    main()
+    def reset_slice(self, api, xrn):
+        # XX not implemented at this interface
+        return 1
+    
+    def DeleteSliver(self, api, xrn, creds, call_id):
+        if Callids().already_handled(call_id): return ""
+        (hrn, _) = urn_to_hrn(xrn)
+        slicename = hrn_to_pl_slicename(hrn)
+        slices = api.driver.GetSlices({'name': slicename})
+        if not slices:
+            return 1
+        slice = slices[0]
+    
+        # determine if this is a peer slice
+        peer = peers.get_peer(api, hrn)
+        try:
+            if peer:
+                api.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
+            api.driver.DeleteSliceFromNodes(slicename, slice['node_ids'])
+        finally:
+            if peer:
+                api.driver.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
+        return 1
+    
+    def ListSlices(self, api, creds, call_id):
+        if Callids().already_handled(call_id): return []
+        # look in cache first
+        if self.caching and api.cache:
+            slices = api.cache.get('slices')
+            if slices:
+                return slices
+    
+        # get data from db 
+        slices = api.driver.GetSlices({'peer_id': None}, ['name'])
+        slice_hrns = [slicename_to_hrn(api.hrn, slice['name']) for slice in slices]
+        slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
+    
+        # cache the result
+        if self.caching and api.cache:
+            api.cache.add('slices', slice_urns) 
+    
+        return slice_urns
+        
+    def ListResources(self, api, creds, options, call_id):
+        if Callids().already_handled(call_id): return ""
+        # get slice's hrn from options
+        xrn = options.get('geni_slice_urn', None)
+        (hrn, _) = urn_to_hrn(xrn)
+    
+        version_manager = VersionManager()
+        # get the rspec's return format from options
+        rspec_version = version_manager.get_version(options.get('rspec_version'))
+        version_string = "rspec_%s" % (rspec_version.to_string())
+    
+        #panos adding the info option to the caching key (can be improved)
+        if options.get('info'):
+            version_string = version_string + "_"+options.get('info', 'default')
+    
+        # look in cache first
+        if self.caching and api.cache and not xrn:
+            rspec = api.cache.get(version_string)
+            if rspec:
+                api.logger.info("aggregate.ListResources: returning cached value for hrn %s"%hrn)
+                return rspec 
+    
+        #panos: passing user-defined options
+        #print "manager options = ",options
+        aggregate = Aggregate(api, options)
+        rspec =  aggregate.get_rspec(slice_xrn=xrn, version=rspec_version)
+    
+        # cache the result
+        if self.caching and api.cache and not xrn:
+            api.cache.add(version_string, rspec)
+    
+        return rspec
+    
+    
+    def get_ticket(self, api, xrn, creds, rspec, users):
+    
+        (slice_hrn, _) = urn_to_hrn(xrn)
+        slices = Slices(api)
+        peer = slices.get_peer(slice_hrn)
+        sfa_peer = slices.get_sfa_peer(slice_hrn)
+    
+        # get the slice record
+        credential = api.getCredential()
+        interface = api.registries[api.hrn]
+        registry = api.server_proxy(interface, credential)
+        records = registry.Resolve(xrn, credential)
+    
+        # make sure we get a local slice record
+        record = None
+        for tmp_record in records:
+            if tmp_record['type'] == 'slice' and \
+               not tmp_record['peer_authority']:
+    #Error (E0602, get_ticket): Undefined variable 'SliceRecord'
+                record = SliceRecord(dict=tmp_record)
+        if not record:
+            raise RecordNotFound(slice_hrn)
+        
+        # similar to CreateSliver, we must verify that the required records exist
+        # at this aggregate before we can issue a ticket
+        # parse rspec
+        rspec = RSpec(rspec_string)
+        requested_attributes = rspec.version.get_slice_attributes()
+    
+        # ensure site record exists
+        site = slices.verify_site(hrn, slice_record, peer, sfa_peer)
+        # ensure slice record exists
+        slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)
+        # ensure person records exists
+        persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)
+        # ensure slice attributes exists
+        slices.verify_slice_attributes(slice, requested_attributes)
+        
+        # get sliver info
+        slivers = slices.get_slivers(slice_hrn)
+    
+        if not slivers:
+            raise SliverDoesNotExist(slice_hrn)
+    
+        # get initscripts
+        initscripts = []
+        data = {
+            'timestamp': int(time.time()),
+            'initscripts': initscripts,
+            'slivers': slivers
+        }
+    
+        # create the ticket
+        object_gid = record.get_gid_object()
+        new_ticket = SfaTicket(subject = object_gid.get_subject())
+        new_ticket.set_gid_caller(api.auth.client_gid)
+        new_ticket.set_gid_object(object_gid)
+        new_ticket.set_issuer(key=api.key, subject=api.hrn)
+        new_ticket.set_pubkey(object_gid.get_pubkey())
+        new_ticket.set_attributes(data)
+        new_ticket.set_rspec(rspec)
+        #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
+        new_ticket.encode()
+        new_ticket.sign()
+    
+        return new_ticket.save_to_string(save_parents=True)
index 6c68118..a94e0fa 100644 (file)
@@ -4,6 +4,8 @@ import sys
 import os, errno
 import logging
 import datetime
+from multiprocessing import Process
+from time import sleep
 
 import boto
 from boto.ec2.regioninfo import RegionInfo
@@ -13,35 +15,21 @@ from xmlbuilder import XMLBuilder
 from lxml import etree as ET
 from sqlobject import *
 
-from sfa.util.faults import *
+from sfa.util.faults import InvalidRSpec, 
 from sfa.util.xrn import urn_to_hrn, Xrn
-from sfa.server.registry import Registries
-from sfa.trust.credential import Credential
-from sfa.plc.plcsfaapi import PlcSfaApi
-from sfa.plc.aggregate import Aggregate
-from sfa.plc.slices import *
 from sfa.util.plxrn import hrn_to_pl_slicename, slicename_to_hrn
 from sfa.util.callids import Callids
-from sfa.util.sfalogging import logger
-from sfa.rspecs.sfa_rspec import sfa_rspec_version
+#comes with its own logging
+#from sfa.util.sfalogging import logger
 from sfa.util.version import version_core
-
-from multiprocessing import Process
-from time import sleep
-
-##
-# The data structure used to represent a cloud.
-# It contains the cloud name, its ip address, image information,
-# key pairs, and clusters information.
-#
-cloud = {}
-
-##
-# The location of the RelaxNG schema.
-#
-EUCALYPTUS_RSPEC_SCHEMA='/etc/sfa/eucalyptus.rng'
-
-api = PlcSfaApi()
+from sfa.trust.credential import Credential
+from sfa.server.sfaapi import SfaApi
+from sfa.plc.aggregate import Aggregate
+from sfa.plc.slices import Slice, Slices
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec
+# not sure what this used to be nor where it is now defined
+#from sfa.rspecs.sfa_rspec import sfa_rspec_version
 
 ##
 # Meta data of an instance.
@@ -80,9 +68,6 @@ class EucaInstance(SQLObject):
                     (self.image_id, self.kernel_id, self.ramdisk_id,
                     self.inst_type, self.key_pair))
 
-        # XXX The return statement is for testing. REMOVE in production
-        #return
-
         try:
             reservation = botoConn.run_instances(self.image_id,
                                                  kernel_id = self.kernel_id,
@@ -109,143 +94,6 @@ class Slice(SQLObject):
     #slice_index = DatabaseIndex('slice_hrn')
     instances = MultipleJoin('EucaInstance')
 
-##
-# Initialize the aggregate manager by reading a configuration file.
-#
-def init_server():
-    logger = logging.getLogger('EucaAggregate')
-    fileHandler = logging.FileHandler('/var/log/euca.log')
-    fileHandler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
-    logger.addHandler(fileHandler)
-    fileHandler.setLevel(logging.DEBUG)
-    logger.setLevel(logging.DEBUG)
-
-    configParser = ConfigParser()
-    configParser.read(['/etc/sfa/eucalyptus_aggregate.conf', 'eucalyptus_aggregate.conf'])
-    if len(configParser.sections()) < 1:
-        logger.error('No cloud defined in the config file')
-        raise Exception('Cannot find cloud definition in configuration file.')
-
-    # Only read the first section.
-    cloudSec = configParser.sections()[0]
-    cloud['name'] = cloudSec
-    cloud['access_key'] = configParser.get(cloudSec, 'access_key')
-    cloud['secret_key'] = configParser.get(cloudSec, 'secret_key')
-    cloud['cloud_url']  = configParser.get(cloudSec, 'cloud_url')
-    cloudURL = cloud['cloud_url']
-    if cloudURL.find('https://') >= 0:
-        cloudURL = cloudURL.replace('https://', '')
-    elif cloudURL.find('http://') >= 0:
-        cloudURL = cloudURL.replace('http://', '')
-    (cloud['ip'], parts) = cloudURL.split(':')
-
-    # Create image bundles
-    images = getEucaConnection().get_all_images()
-    cloud['images'] = images
-    cloud['imageBundles'] = {}
-    for i in images:
-        if i.type != 'machine' or i.kernel_id is None: continue
-        name = os.path.dirname(i.location)
-        detail = {'imageID' : i.id, 'kernelID' : i.kernel_id, 'ramdiskID' : i.ramdisk_id}
-        cloud['imageBundles'][name] = detail
-
-    # Initialize sqlite3 database and tables.
-    dbPath = '/etc/sfa/db'
-    dbName = 'euca_aggregate.db'
-
-    if not os.path.isdir(dbPath):
-        logger.info('%s not found. Creating directory ...' % dbPath)
-        os.mkdir(dbPath)
-
-    conn = connectionForURI('sqlite://%s/%s' % (dbPath, dbName))
-    sqlhub.processConnection = conn
-    Slice.createTable(ifNotExists=True)
-    EucaInstance.createTable(ifNotExists=True)
-    Meta.createTable(ifNotExists=True)
-
-    # Start the update process to keep track of the meta data
-    # about Eucalyptus instance.
-    Process(target=updateMeta).start()
-
-    # Make sure the schema exists.
-    if not os.path.exists(EUCALYPTUS_RSPEC_SCHEMA):
-        err = 'Cannot location schema at %s' % EUCALYPTUS_RSPEC_SCHEMA
-        logger.error(err)
-        raise Exception(err)
-
-##
-# Creates a connection to Eucalytpus. This function is inspired by 
-# the make_connection() in Euca2ools.
-#
-# @return A connection object or None
-#
-def getEucaConnection():
-    global cloud
-    accessKey = cloud['access_key']
-    secretKey = cloud['secret_key']
-    eucaURL   = cloud['cloud_url']
-    useSSL    = False
-    srvPath   = '/'
-    eucaPort  = 8773
-    logger    = logging.getLogger('EucaAggregate')
-
-    if not accessKey or not secretKey or not eucaURL:
-        logger.error('Please set ALL of the required environment ' \
-                     'variables by sourcing the eucarc file.')
-        return None
-    
-    # Split the url into parts
-    if eucaURL.find('https://') >= 0:
-        useSSL  = True
-        eucaURL = eucaURL.replace('https://', '')
-    elif eucaURL.find('http://') >= 0:
-        useSSL  = False
-        eucaURL = eucaURL.replace('http://', '')
-    (eucaHost, parts) = eucaURL.split(':')
-    if len(parts) > 1:
-        parts = parts.split('/')
-        eucaPort = int(parts[0])
-        parts = parts[1:]
-        srvPath = '/'.join(parts)
-
-    return boto.connect_ec2(aws_access_key_id=accessKey,
-                            aws_secret_access_key=secretKey,
-                            is_secure=useSSL,
-                            region=RegionInfo(None, 'eucalyptus', eucaHost), 
-                            port=eucaPort,
-                            path=srvPath)
-
-##
-# Returns a string of keys that belong to the users of the given slice.
-# @param sliceHRN The hunman readable name of the slice.
-# @return sting()
-#
-# This method is no longer needed because the user keys are passed into
-# CreateSliver
-#
-def getKeysForSlice(api, sliceHRN):
-    logger   = logging.getLogger('EucaAggregate')
-    cred     = api.getCredential()
-    registry = api.registries[api.hrn]
-    keys     = []
-
-    # Get the slice record
-    records = registry.Resolve(sliceHRN, cred)
-    if not records:
-        logging.warn('Cannot find any record for slice %s' % sliceHRN)
-        return []
-
-    # Find who can log into this slice
-    persons = records[0]['persons']
-
-    # Extract the keys from persons records
-    for p in persons:
-        sliceUser = registry.Resolve(p, cred)
-        userKeys = sliceUser[0]['keys']
-        keys += userKeys
-
-    return '\n'.join(keys)
-
 ##
 # A class that builds the RSpec for Eucalyptus.
 #
@@ -422,322 +270,430 @@ class ZoneResultParser(object):
 
         return clusterList
 
-def ListResources(api, creds, options, call_id): 
-    if Callids().already_handled(call_id): return ""
-    global cloud
-    # get slice's hrn from options
-    xrn = options.get('geni_slice_urn', '')
-    hrn, type = urn_to_hrn(xrn)
-    logger = logging.getLogger('EucaAggregate')
-
-    # get hrn of the original caller
-    origin_hrn = options.get('origin_hrn', None)
-    if not origin_hrn:
-        origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
-
-    conn = getEucaConnection()
-
-    if not conn:
-        logger.error('Cannot create a connection to Eucalyptus')
-        return 'Cannot create a connection to Eucalyptus'
-
-    try:
-        # Zones
-        zones = conn.get_all_zones(['verbose'])
-        p = ZoneResultParser(zones)
-        clusters = p.parse()
-        cloud['clusters'] = clusters
-        
-        # Images
-        images = conn.get_all_images()
-        cloud['images'] = images
-        cloud['imageBundles'] = {}
+class AggregateManagerEucalyptus:
+
+    # The data structure used to represent a cloud.
+    # It contains the cloud name, its ip address, image information,
+    # key pairs, and clusters information.
+    cloud = {}
+    
+    # The location of the RelaxNG schema.
+    EUCALYPTUS_RSPEC_SCHEMA='/etc/sfa/eucalyptus.rng'
+    
+    _inited=False
+
+    # the init_server mechanism has vanished
+    def __init__ (self):
+        if AggregateManagerEucalyptus._inited: return
+        AggregateManagerEucalyptus.init_server()
+
+    # Initialize the aggregate manager by reading a configuration file.
+    @staticmethod
+    def init_server():
+        logger = logging.getLogger('EucaAggregate')
+        fileHandler = logging.FileHandler('/var/log/euca.log')
+        fileHandler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
+        logger.addHandler(fileHandler)
+        fileHandler.setLevel(logging.DEBUG)
+        logger.setLevel(logging.DEBUG)
+
+        configParser = ConfigParser()
+        configParser.read(['/etc/sfa/eucalyptus_aggregate.conf', 'eucalyptus_aggregate.conf'])
+        if len(configParser.sections()) < 1:
+            logger.error('No cloud defined in the config file')
+            raise Exception('Cannot find cloud definition in configuration file.')
+    
+        # Only read the first section.
+        cloudSec = configParser.sections()[0]
+        AggregateManagerEucalyptus.cloud['name'] = cloudSec
+        AggregateManagerEucalyptus.cloud['access_key'] = configParser.get(cloudSec, 'access_key')
+        AggregateManagerEucalyptus.cloud['secret_key'] = configParser.get(cloudSec, 'secret_key')
+        AggregateManagerEucalyptus.cloud['cloud_url']  = configParser.get(cloudSec, 'cloud_url')
+        cloudURL = AggregateManagerEucalyptus.cloud['cloud_url']
+        if cloudURL.find('https://') >= 0:
+            cloudURL = cloudURL.replace('https://', '')
+        elif cloudURL.find('http://') >= 0:
+            cloudURL = cloudURL.replace('http://', '')
+        (AggregateManagerEucalyptus.cloud['ip'], parts) = cloudURL.split(':')
+    
+        # Create image bundles
+        images = self.getEucaConnection().get_all_images()
+        AggregateManagerEucalyptus.cloud['images'] = images
+        AggregateManagerEucalyptus.cloud['imageBundles'] = {}
         for i in images:
             if i.type != 'machine' or i.kernel_id is None: continue
             name = os.path.dirname(i.location)
             detail = {'imageID' : i.id, 'kernelID' : i.kernel_id, 'ramdiskID' : i.ramdisk_id}
-            cloud['imageBundles'][name] = detail
-
-        # Key Pairs
-        keyPairs = conn.get_all_key_pairs()
-        cloud['keypairs'] = keyPairs
-
-        if hrn:
-            instanceId = []
-            instances  = []
-
-            # Get the instances that belong to the given slice from sqlite3
-            # XXX use getOne() in production because the slice's hrn is supposed
-            # to be unique. For testing, uniqueness is turned off in the db.
-            # If the slice isn't found in the database, create a record for the 
-            # slice.
-            matchedSlices = list(Slice.select(Slice.q.slice_hrn == hrn))
-            if matchedSlices:
-                theSlice = matchedSlices[-1]
-            else:
-                theSlice = Slice(slice_hrn = hrn)
-            for instance in theSlice.instances:
-                instanceId.append(instance.instance_id)
-
-            # Get the information about those instances using their ids.
-            if len(instanceId) > 0:
-                reservations = conn.get_all_instances(instanceId)
-            else:
-                reservations = []
+            AggregateManagerEucalyptus.cloud['imageBundles'][name] = detail
+    
+        # Initialize sqlite3 database and tables.
+        dbPath = '/etc/sfa/db'
+        dbName = 'euca_aggregate.db'
+    
+        if not os.path.isdir(dbPath):
+            logger.info('%s not found. Creating directory ...' % dbPath)
+            os.mkdir(dbPath)
+    
+        conn = connectionForURI('sqlite://%s/%s' % (dbPath, dbName))
+        sqlhub.processConnection = conn
+        Slice.createTable(ifNotExists=True)
+        EucaInstance.createTable(ifNotExists=True)
+        Meta.createTable(ifNotExists=True)
+    
+        # Start the update process to keep track of the meta data
+        # about Eucalyptus instance.
+        Process(target=AggregateManagerEucalyptus.updateMeta).start()
+    
+        # Make sure the schema exists.
+        if not os.path.exists(AggregateManagerEucalyptus.EUCALYPTUS_RSPEC_SCHEMA):
+            err = 'Cannot location schema at %s' % AggregateManagerEucalyptus.EUCALYPTUS_RSPEC_SCHEMA
+            logger.error(err)
+            raise Exception(err)
+    
+    #
+    # A separate process that will update the meta data.
+    #
+    @staticmethod    
+    def updateMeta():
+        logger = logging.getLogger('EucaMeta')
+        fileHandler = logging.FileHandler('/var/log/euca_meta.log')
+        fileHandler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
+        logger.addHandler(fileHandler)
+        fileHandler.setLevel(logging.DEBUG)
+        logger.setLevel(logging.DEBUG)
+    
+        while True:
+            sleep(30)
+    
+            # Get IDs of the instances that don't have IPs yet.
+            dbResults = Meta.select(
+                          AND(Meta.q.pri_addr == None,
+                              Meta.q.state    != 'deleted')
+                        )
+            dbResults = list(dbResults)
+            logger.debug('[update process] dbResults: %s' % dbResults)
+            instids = []
+            for r in dbResults:
+                if not r.instance:
+                    continue
+                instids.append(r.instance.instance_id)
+            logger.debug('[update process] Instance Id: %s' % ', '.join(instids))
+    
+            # Get instance information from Eucalyptus
+            conn = self.getEucaConnection()
+            vmInstances = []
+            reservations = conn.get_all_instances(instids)
             for reservation in reservations:
-                for instance in reservation.instances:
-                    instances.append(instance)
-
-            # Construct a dictionary for the EucaRSpecBuilder
-            instancesDict = {}
-            for instance in instances:
-                instList = instancesDict.setdefault(instance.instance_type, [])
-                instInfoDict = {} 
-
-                instInfoDict['id'] = instance.id
-                instInfoDict['public_dns'] = instance.public_dns_name
-                instInfoDict['state'] = instance.state
-                instInfoDict['key'] = instance.key_name
-
-                instList.append(instInfoDict)
-            cloud['instances'] = instancesDict
-
-    except EC2ResponseError, ec2RespErr:
-        errTree = ET.fromstring(ec2RespErr.body)
-        errMsgE = errTree.find('.//Message')
-        logger.error(errMsgE.text)
-
-    rspec = EucaRSpecBuilder(cloud).toXML()
-
-    # Remove the instances records so next time they won't 
-    # show up.
-    if 'instances' in cloud:
-        del cloud['instances']
-
-    return rspec
-
-"""
-Hook called via 'sfi.py create'
-"""
-def CreateSliver(api, slice_xrn, creds, xml, users, call_id):
-    if Callids().already_handled(call_id): return ""
-
-    global cloud
-    logger = logging.getLogger('EucaAggregate')
-    logger.debug("In CreateSliver")
-
-    aggregate = Aggregate(api)
-    slices = Slices(api)
-    (hrn, type) = urn_to_hrn(slice_xrn)
-    peer = slices.get_peer(hrn)
-    sfa_peer = slices.get_sfa_peer(hrn)
-    slice_record=None
-    if users:
-        slice_record = users[0].get('slice_record', {})
-
-    conn = getEucaConnection()
-    if not conn:
-        logger.error('Cannot create a connection to Eucalyptus')
-        return ""
-
-    # Validate RSpec
-    schemaXML = ET.parse(EUCALYPTUS_RSPEC_SCHEMA)
-    rspecValidator = ET.RelaxNG(schemaXML)
-    rspecXML = ET.XML(xml)
-    for network in rspecXML.iterfind("./network"):
-        if network.get('name') != cloud['name']:
-            # Throw away everything except my own RSpec
-            # sfa_logger().error("CreateSliver: deleting %s from rspec"%network.get('id'))
-            network.getparent().remove(network)
-    if not rspecValidator(rspecXML):
-        error = rspecValidator.error_log.last_error
-        message = '%s (line %s)' % (error.message, error.line) 
-        raise InvalidRSpec(message)
-
+                vmInstances += reservation.instances
+    
+            # Check the IPs
+            instIPs = [ {'id':i.id, 'pri_addr':i.private_dns_name, 'pub_addr':i.public_dns_name}
+                        for i in vmInstances if i.private_dns_name != '0.0.0.0' ]
+            logger.debug('[update process] IP dict: %s' % str(instIPs))
+    
+            # Update the local DB
+            for ipData in instIPs:
+                dbInst = EucaInstance.select(EucaInstance.q.instance_id == ipData['id']).getOne(None)
+                if not dbInst:
+                    logger.info('[update process] Could not find %s in DB' % ipData['id'])
+                    continue
+                dbInst.meta.pri_addr = ipData['pri_addr']
+                dbInst.meta.pub_addr = ipData['pub_addr']
+                dbInst.meta.state    = 'running'
+    
+            self.dumpinstanceInfo()
+    
+    ##
+    # Creates a connection to Eucalytpus. This function is inspired by 
+    # the make_connection() in Euca2ools.
+    #
+    # @return A connection object or None
+    #
+    def getEucaConnection():
+        accessKey = AggregateManagerEucalyptus.cloud['access_key']
+        secretKey = AggregateManagerEucalyptus.cloud['secret_key']
+        eucaURL   = AggregateManagerEucalyptus.cloud['cloud_url']
+        useSSL    = False
+        srvPath   = '/'
+        eucaPort  = 8773
+        logger    = logging.getLogger('EucaAggregate')
+    
+        if not accessKey or not secretKey or not eucaURL:
+            logger.error('Please set ALL of the required environment ' \
+                         'variables by sourcing the eucarc file.')
+            return None
+        
+        # Split the url into parts
+        if eucaURL.find('https://') >= 0:
+            useSSL  = True
+            eucaURL = eucaURL.replace('https://', '')
+        elif eucaURL.find('http://') >= 0:
+            useSSL  = False
+            eucaURL = eucaURL.replace('http://', '')
+        (eucaHost, parts) = eucaURL.split(':')
+        if len(parts) > 1:
+            parts = parts.split('/')
+            eucaPort = int(parts[0])
+            parts = parts[1:]
+            srvPath = '/'.join(parts)
+    
+        return boto.connect_ec2(aws_access_key_id=accessKey,
+                                aws_secret_access_key=secretKey,
+                                is_secure=useSSL,
+                                region=RegionInfo(None, 'eucalyptus', eucaHost), 
+                                port=eucaPort,
+                                path=srvPath)
+    
+    def ListResources(api, creds, options, call_id): 
+        if Callids().already_handled(call_id): return ""
+        # get slice's hrn from options
+        xrn = options.get('geni_slice_urn', '')
+        hrn, type = urn_to_hrn(xrn)
+        logger = logging.getLogger('EucaAggregate')
+    
+        # get hrn of the original caller
+        origin_hrn = options.get('origin_hrn', None)
+        if not origin_hrn:
+            origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
+    
+        conn = self.getEucaConnection()
+    
+        if not conn:
+            logger.error('Cannot create a connection to Eucalyptus')
+            return 'Cannot create a connection to Eucalyptus'
+    
+        try:
+            # Zones
+            zones = conn.get_all_zones(['verbose'])
+            p = ZoneResultParser(zones)
+            clusters = p.parse()
+            AggregateManagerEucalyptus.cloud['clusters'] = clusters
+            
+            # Images
+            images = conn.get_all_images()
+            AggregateManagerEucalyptus.cloud['images'] = images
+            AggregateManagerEucalyptus.cloud['imageBundles'] = {}
+            for i in images:
+                if i.type != 'machine' or i.kernel_id is None: continue
+                name = os.path.dirname(i.location)
+                detail = {'imageID' : i.id, 'kernelID' : i.kernel_id, 'ramdiskID' : i.ramdisk_id}
+                AggregateManagerEucalyptus.cloud['imageBundles'][name] = detail
+    
+            # Key Pairs
+            keyPairs = conn.get_all_key_pairs()
+            AggregateManagerEucalyptus.cloud['keypairs'] = keyPairs
+    
+            if hrn:
+                instanceId = []
+                instances  = []
+    
+                # Get the instances that belong to the given slice from sqlite3
+                # XXX use getOne() in production because the slice's hrn is supposed
+                # to be unique. For testing, uniqueness is turned off in the db.
+                # If the slice isn't found in the database, create a record for the 
+                # slice.
+                matchedSlices = list(Slice.select(Slice.q.slice_hrn == hrn))
+                if matchedSlices:
+                    theSlice = matchedSlices[-1]
+                else:
+                    theSlice = Slice(slice_hrn = hrn)
+                for instance in theSlice.instances:
+                    instanceId.append(instance.instance_id)
+    
+                # Get the information about those instances using their ids.
+                if len(instanceId) > 0:
+                    reservations = conn.get_all_instances(instanceId)
+                else:
+                    reservations = []
+                for reservation in reservations:
+                    for instance in reservation.instances:
+                        instances.append(instance)
+    
+                # Construct a dictionary for the EucaRSpecBuilder
+                instancesDict = {}
+                for instance in instances:
+                    instList = instancesDict.setdefault(instance.instance_type, [])
+                    instInfoDict = {} 
+    
+                    instInfoDict['id'] = instance.id
+                    instInfoDict['public_dns'] = instance.public_dns_name
+                    instInfoDict['state'] = instance.state
+                    instInfoDict['key'] = instance.key_name
+    
+                    instList.append(instInfoDict)
+                AggregateManagerEucalyptus.cloud['instances'] = instancesDict
+    
+        except EC2ResponseError, ec2RespErr:
+            errTree = ET.fromstring(ec2RespErr.body)
+            errMsgE = errTree.find('.//Message')
+            logger.error(errMsgE.text)
+    
+        rspec = EucaRSpecBuilder(AggregateManagerEucalyptus.cloud).toXML()
+    
+        # Remove the instances records so next time they won't 
+        # show up.
+        if 'instances' in AggregateManagerEucalyptus.cloud:
+            del AggregateManagerEucalyptus.cloud['instances']
+    
+        return rspec
+    
     """
-    Create the sliver[s] (slice) at this aggregate.
-    Verify HRN and initialize the slice record in PLC if necessary.
+    Hook called via 'sfi.py create'
     """
-
-    # ensure site record exists
-    site = slices.verify_site(hrn, slice_record, peer, sfa_peer)
-    # ensure slice record exists
-    slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)
-    # ensure person records exists
-    persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)
-
-    # Get the slice from db or create one.
-    s = Slice.select(Slice.q.slice_hrn == hrn).getOne(None)
-    if s is None:
-        s = Slice(slice_hrn = hrn)
-
-    # Process any changes in existing instance allocation
-    pendingRmInst = []
-    for sliceInst in s.instances:
-        pendingRmInst.append(sliceInst.instance_id)
-    existingInstGroup = rspecXML.findall(".//euca_instances")
-    for instGroup in existingInstGroup:
-        for existingInst in instGroup:
-            if existingInst.get('id') in pendingRmInst:
-                pendingRmInst.remove(existingInst.get('id'))
-    for inst in pendingRmInst:
-        dbInst = EucaInstance.select(EucaInstance.q.instance_id == inst).getOne(None)
-        if dbInst.meta.state != 'deleted':
-            logger.debug('Instance %s will be terminated' % inst)
-            # Terminate instances one at a time for robustness
-            conn.terminate_instances([inst])
-            # Only change the state but do not remove the entry from the DB.
-            dbInst.meta.state = 'deleted'
-            #dbInst.destroySelf()
-
-    # Process new instance requests
-    requests = rspecXML.findall(".//request")
-    if requests:
-        # Get all the public keys associate with slice.
-        keys = []
-        for user in users:
-            keys += user['keys']
-            logger.debug("Keys: %s" % user['keys'])
-        pubKeys = '\n'.join(keys)
-        logger.debug('Passing the following keys to the instance:\n%s' % pubKeys)
-    for req in requests:
-        vmTypeElement = req.getparent()
-        instType = vmTypeElement.get('name')
-        numInst  = int(req.find('instances').text)
-        
-        bundleName = req.find('bundle').text
-        if not cloud['imageBundles'][bundleName]:
-            logger.error('Cannot find bundle %s' % bundleName)
-        bundleInfo = cloud['imageBundles'][bundleName]
-        instKernel  = bundleInfo['kernelID']
-        instDiskImg = bundleInfo['imageID']
-        instRamDisk = bundleInfo['ramdiskID']
-        instKey     = None
-
-        # Create the instances
-        for i in range(0, numInst):
-            eucaInst = EucaInstance(slice      = s,
-                                    kernel_id  = instKernel,
-                                    image_id   = instDiskImg,
-                                    ramdisk_id = instRamDisk,
-                                    key_pair   = instKey,
-                                    inst_type  = instType,
-                                    meta       = Meta(start_time=datetime.datetime.now()))
-            eucaInst.reserveInstance(conn, pubKeys)
-
-    # xxx - should return altered rspec 
-    # with enough data for the client to understand what's happened
-    return xml
-
-##
-# Return information on the IP addresses bound to each slice's instances
-#
-def dumpInstanceInfo():
-    logger = logging.getLogger('EucaMeta')
-    outdir = "/var/www/html/euca/"
-    outfile = outdir + "instances.txt"
-
-    try:
-        os.makedirs(outdir)
-    except OSError, e:
-        if e.errno != errno.EEXIST:
-            raise
-
-    dbResults = Meta.select(
-        AND(Meta.q.pri_addr != None,
-            Meta.q.state    == 'running')
-        )
-    dbResults = list(dbResults)
-    f = open(outfile, "w")
-    for r in dbResults:
-        instId = r.instance.instance_id
-        ipaddr = r.pri_addr
-        hrn = r.instance.slice.slice_hrn
-        logger.debug('[dumpInstanceInfo] %s %s %s' % (instId, ipaddr, hrn))
-        f.write("%s %s %s\n" % (instId, ipaddr, hrn))
-    f.close()
-
-##
-# A separate process that will update the meta data.
-#
-def updateMeta():
-    logger = logging.getLogger('EucaMeta')
-    fileHandler = logging.FileHandler('/var/log/euca_meta.log')
-    fileHandler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
-    logger.addHandler(fileHandler)
-    fileHandler.setLevel(logging.DEBUG)
-    logger.setLevel(logging.DEBUG)
-
-    while True:
-        sleep(30)
-
-        # Get IDs of the instances that don't have IPs yet.
+    def CreateSliver(api, slice_xrn, creds, xml, users, call_id):
+        if Callids().already_handled(call_id): return ""
+    
+        logger = logging.getLogger('EucaAggregate')
+        logger.debug("In CreateSliver")
+    
+        aggregate = Aggregate(api)
+        slices = Slices(api)
+        (hrn, type) = urn_to_hrn(slice_xrn)
+        peer = slices.get_peer(hrn)
+        sfa_peer = slices.get_sfa_peer(hrn)
+        slice_record=None
+        if users:
+            slice_record = users[0].get('slice_record', {})
+    
+        conn = self.getEucaConnection()
+        if not conn:
+            logger.error('Cannot create a connection to Eucalyptus')
+            return ""
+    
+        # Validate RSpec
+        schemaXML = ET.parse(AggregateManagerEucalyptus.EUCALYPTUS_RSPEC_SCHEMA)
+        rspecValidator = ET.RelaxNG(schemaXML)
+        rspecXML = ET.XML(xml)
+        for network in rspecXML.iterfind("./network"):
+            if network.get('name') != AggregateManagerEucalyptus.cloud['name']:
+                # Throw away everything except my own RSpec
+                # sfa_logger().error("CreateSliver: deleting %s from rspec"%network.get('id'))
+                network.getparent().remove(network)
+        if not rspecValidator(rspecXML):
+            error = rspecValidator.error_log.last_error
+            message = '%s (line %s)' % (error.message, error.line) 
+            raise InvalidRSpec(message)
+    
+        """
+        Create the sliver[s] (slice) at this aggregate.
+        Verify HRN and initialize the slice record in PLC if necessary.
+        """
+    
+        # ensure site record exists
+        site = slices.verify_site(hrn, slice_record, peer, sfa_peer)
+        # ensure slice record exists
+        slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)
+        # ensure person records exists
+        persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)
+    
+        # Get the slice from db or create one.
+        s = Slice.select(Slice.q.slice_hrn == hrn).getOne(None)
+        if s is None:
+            s = Slice(slice_hrn = hrn)
+    
+        # Process any changes in existing instance allocation
+        pendingRmInst = []
+        for sliceInst in s.instances:
+            pendingRmInst.append(sliceInst.instance_id)
+        existingInstGroup = rspecXML.findall(".//euca_instances")
+        for instGroup in existingInstGroup:
+            for existingInst in instGroup:
+                if existingInst.get('id') in pendingRmInst:
+                    pendingRmInst.remove(existingInst.get('id'))
+        for inst in pendingRmInst:
+            dbInst = EucaInstance.select(EucaInstance.q.instance_id == inst).getOne(None)
+            if dbInst.meta.state != 'deleted':
+                logger.debug('Instance %s will be terminated' % inst)
+                # Terminate instances one at a time for robustness
+                conn.terminate_instances([inst])
+                # Only change the state but do not remove the entry from the DB.
+                dbInst.meta.state = 'deleted'
+                #dbInst.destroySelf()
+    
+        # Process new instance requests
+        requests = rspecXML.findall(".//request")
+        if requests:
+            # Get all the public keys associate with slice.
+            keys = []
+            for user in users:
+                keys += user['keys']
+                logger.debug("Keys: %s" % user['keys'])
+            pubKeys = '\n'.join(keys)
+            logger.debug('Passing the following keys to the instance:\n%s' % pubKeys)
+        for req in requests:
+            vmTypeElement = req.getparent()
+            instType = vmTypeElement.get('name')
+            numInst  = int(req.find('instances').text)
+            
+            bundleName = req.find('bundle').text
+            if not AggregateManagerEucalyptus.cloud['imageBundles'][bundleName]:
+                logger.error('Cannot find bundle %s' % bundleName)
+            bundleInfo = AggregateManagerEucalyptus.cloud['imageBundles'][bundleName]
+            instKernel  = bundleInfo['kernelID']
+            instDiskImg = bundleInfo['imageID']
+            instRamDisk = bundleInfo['ramdiskID']
+            instKey     = None
+    
+            # Create the instances
+            for i in range(0, numInst):
+                eucaInst = EucaInstance(slice      = s,
+                                        kernel_id  = instKernel,
+                                        image_id   = instDiskImg,
+                                        ramdisk_id = instRamDisk,
+                                        key_pair   = instKey,
+                                        inst_type  = instType,
+                                        meta       = Meta(start_time=datetime.datetime.now()))
+                eucaInst.reserveInstance(conn, pubKeys)
+    
+        # xxx - should return altered rspec 
+        # with enough data for the client to understand what's happened
+        return xml
+    
+    ##
+    # Return information on the IP addresses bound to each slice's instances
+    #
+    def dumpInstanceInfo():
+        logger = logging.getLogger('EucaMeta')
+        outdir = "/var/www/html/euca/"
+        outfile = outdir + "instances.txt"
+    
+        try:
+            os.makedirs(outdir)
+        except OSError, e:
+            if e.errno != errno.EEXIST:
+                raise
+    
         dbResults = Meta.select(
-                      AND(Meta.q.pri_addr == None,
-                          Meta.q.state    != 'deleted')
-                    )
+            AND(Meta.q.pri_addr != None,
+                Meta.q.state    == 'running')
+            )
         dbResults = list(dbResults)
-        logger.debug('[update process] dbResults: %s' % dbResults)
-        instids = []
+        f = open(outfile, "w")
         for r in dbResults:
-            if not r.instance:
-                continue
-            instids.append(r.instance.instance_id)
-        logger.debug('[update process] Instance Id: %s' % ', '.join(instids))
-
-        # Get instance information from Eucalyptus
-        conn = getEucaConnection()
-        vmInstances = []
-        reservations = conn.get_all_instances(instids)
-        for reservation in reservations:
-            vmInstances += reservation.instances
-
-        # Check the IPs
-        instIPs = [ {'id':i.id, 'pri_addr':i.private_dns_name, 'pub_addr':i.public_dns_name}
-                    for i in vmInstances if i.private_dns_name != '0.0.0.0' ]
-        logger.debug('[update process] IP dict: %s' % str(instIPs))
-
-        # Update the local DB
-        for ipData in instIPs:
-            dbInst = EucaInstance.select(EucaInstance.q.instance_id == ipData['id']).getOne(None)
-            if not dbInst:
-                logger.info('[update process] Could not find %s in DB' % ipData['id'])
-                continue
-            dbInst.meta.pri_addr = ipData['pri_addr']
-            dbInst.meta.pub_addr = ipData['pub_addr']
-            dbInst.meta.state    = 'running'
-
-        dumpInstanceInfo()
-
-def GetVersion(api):
-    xrn=Xrn(api.hrn)
-    request_rspec_versions = [dict(sfa_rspec_version)]
-    ad_rspec_versions = [dict(sfa_rspec_version)]
-    version_more = {'interface':'aggregate',
-                    'testbed':'myplc',
-                    'hrn':xrn.get_hrn(),
-                    'request_rspec_versions': request_rspec_versions,
-                    'ad_rspec_versions': ad_rspec_versions,
-                    'default_ad_rspec': dict(sfa_rspec_version)
-                    }
-    return version_core(version_more)
-
-def main():
-    init_server()
-
-    #theRSpec = None
-    #with open(sys.argv[1]) as xml:
-    #    theRSpec = xml.read()
-    #CreateSliver(None, 'planetcloud.pc.test', theRSpec, 'call-id-cloudtest')
-
-    #rspec = ListResources('euca', 'planetcloud.pc.test', 'planetcloud.pc.marcoy', 'test_euca')
-    #print rspec
-
-    server_key_file = '/var/lib/sfa/authorities/server.key'
-    server_cert_file = '/var/lib/sfa/authorities/server.cert'
-    api = PlcSfaApi(key_file = server_key_file, cert_file = server_cert_file, interface='aggregate')
-    print getKeysForSlice(api, 'gc.gc.test1')
-
-if __name__ == "__main__":
-    main()
-
+            instId = r.instance.instance_id
+            ipaddr = r.pri_addr
+            hrn = r.instance.slice.slice_hrn
+            logger.debug('[dumpInstanceInfo] %s %s %s' % (instId, ipaddr, hrn))
+            f.write("%s %s %s\n" % (instId, ipaddr, hrn))
+        f.close()
+    
+    def GetVersion(api):
+
+        version_manager = VersionManager()
+        ad_rspec_versions = []
+        request_rspec_versions = []
+        for rspec_version in version_manager.versions:
+            if rspec_version.content_type in ['*', 'ad']:
+                ad_rspec_versions.append(rspec_version.to_dict())
+            if rspec_version.content_type in ['*', 'request']:
+                request_rspec_versions.append(rspec_version.to_dict())
+        default_rspec_version = version_manager.get_version("sfa 1").to_dict()
+        xrn=Xrn(api.hrn)
+        version_more = {'interface':'aggregate',
+                        'testbed':'myplc',
+                        'hrn':xrn.get_hrn(),
+                        'request_rspec_versions': request_rspec_versions,
+                        'ad_rspec_versions': ad_rspec_versions,
+                        'default_ad_rspec': default_rspec_version
+                        }
+        return version_core(version_more)
index ff6ea73..09e56b7 100644 (file)
-from sfa.plc.slices import Slices\r
-from sfa.server.registry import Registries\r
-from sfa.util.xrn import urn_to_hrn, hrn_to_urn, get_authority, Xrn\r
-from sfa.util.plxrn import hrn_to_pl_slicename\r
-from sfa.util.sfalogging import logger\r
-from sfa.util.faults import *\r
-from sfa.util.config import Config\r
-from sfa.util.sfatime import utcparse\r
-from sfa.util.callids import Callids\r
-from sfa.util.version import version_core\r
-from sfa.rspecs.rspec_version import RSpecVersion\r
-from sfa.rspecs.sfa_rspec import sfa_rspec_version\r
-from sfa.rspecs.rspec_parser import parse_rspec\r
-from sfa.managers.aggregate_manager_pl import __get_registry_objects, ListSlices\r
-import os\r
-import time\r
-import re\r
-\r
-RSPEC_TMP_FILE_PREFIX = "/tmp/max_rspec"\r
-\r
-# execute shell command and return both exit code and text output\r
-def shell_execute(cmd, timeout):\r
-    pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')\r
-    pipe = os.popen(cmd + ' 2>&1', 'r')\r
-    text = ''\r
-    while timeout:\r
-        line = pipe.read()\r
-        text += line\r
-        time.sleep(1)\r
-        timeout = timeout-1\r
-    code = pipe.close()\r
-    if code is None: code = 0\r
-    if text[-1:] == '\n': text = text[:-1]\r
-    return code, text\r
-\r
-"""\r
- call AM API client with command like in the following example:\r
- cd aggregate_client; java -classpath AggregateWS-client-api.jar:lib/* \\r
-      net.geni.aggregate.client.examples.CreateSliceNetworkClient \\r
-      ./repo https://geni:8443/axis2/services/AggregateGENI \\r
-      ... params ...\r
-"""\r
-\r
-def call_am_apiclient(client_app, params, timeout):\r
-    (client_path, am_url) = Config().get_max_aggrMgr_info()\r
-    sys_cmd = "cd " + client_path + "; java -classpath AggregateWS-client-api.jar:lib/* net.geni.aggregate.client.examples." + client_app + " ./repo " + am_url + " " + ' '.join(params)\r
-    ret = shell_execute(sys_cmd, timeout)\r
-    logger.debug("shell_execute cmd: %s returns %s" % (sys_cmd, ret))\r
-    return ret\r
-\r
-# save request RSpec xml content to a tmp file\r
-def save_rspec_to_file(rspec):\r
-    path = RSPEC_TMP_FILE_PREFIX + "_" + time.strftime('%Y%m%dT%H:%M:%S', time.gmtime(time.time())) +".xml"\r
-    file = open(path, "w")\r
-    file.write(rspec)\r
-    file.close()\r
-    return path\r
-\r
-# get stripped down slice id/name plc.maxpl.xislice1 --> maxpl_xislice1\r
-def get_plc_slice_id(cred, xrn):\r
-    (hrn, type) = urn_to_hrn(xrn)\r
-    slice_id = hrn.find(':')\r
-    sep = '.'\r
-    if hrn.find(':') != -1:\r
-        sep=':'\r
-    elif hrn.find('+') != -1:\r
-        sep='+'\r
-    else:\r
-        sep='.'\r
-    slice_id = hrn.split(sep)[-2] + '_' + hrn.split(sep)[-1]\r
-    return slice_id\r
-\r
-# extract xml \r
-def get_xml_by_tag(text, tag):\r
-    indx1 = text.find('<'+tag)\r
-    indx2 = text.find('/'+tag+'>')\r
-    xml = None\r
-    if indx1!=-1 and indx2>indx1:\r
-        xml = text[indx1:indx2+len(tag)+2]\r
-    return xml\r
-\r
-def prepare_slice(api, slice_xrn, creds, users):\r
-    reg_objects = __get_registry_objects(slice_xrn, creds, users)\r
-    (hrn, type) = urn_to_hrn(slice_xrn)\r
-    slices = Slices(api)\r
-    peer = slices.get_peer(hrn)\r
-    sfa_peer = slices.get_sfa_peer(hrn)\r
-    slice_record=None\r
-    if users:\r
-        slice_record = users[0].get('slice_record', {})\r
-    registry = api.registries[api.hrn]\r
-    credential = api.getCredential()\r
-    # ensure site record exists\r
-    site = slices.verify_site(hrn, slice_record, peer, sfa_peer)\r
-    # ensure slice record exists\r
-    slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)\r
-    # ensure person records exists\r
-    persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)\r
-\r
-def parse_resources(text, slice_xrn):\r
-    resources = []\r
-    urn = hrn_to_urn(slice_xrn, 'sliver')\r
-    plc_slice = re.search("Slice Status => ([^\n]+)", text)\r
-    if plc_slice.group(1) != 'NONE':\r
-        res = {}\r
-        res['geni_urn'] = urn + '_plc_slice'\r
-        res['geni_error'] = ''\r
-        res['geni_status'] = 'unknown'\r
-        if plc_slice.group(1) == 'CREATED':\r
-            res['geni_status'] = 'ready'\r
-        resources.append(res)\r
-    vlans = re.findall("GRI => ([^\n]+)\n\t  Status => ([^\n]+)", text)\r
-    for vlan in vlans:\r
-        res = {}\r
-        res['geni_error'] = ''\r
-        res['geni_urn'] = urn + '_vlan_' + vlan[0]\r
-        if vlan[1] == 'ACTIVE':\r
-            res['geni_status'] = 'ready'\r
-        elif vlan[1] == 'FAILED':\r
-            res['geni_status'] = 'failed'\r
-        else:\r
-            res['geni_status'] = 'configuring'\r
-        resources.append(res)\r
-    return resources\r
-\r
-def slice_status(api, slice_xrn, creds):\r
-    urn = hrn_to_urn(slice_xrn, 'slice')\r
-    result = {}\r
-    top_level_status = 'unknown'\r
-    slice_id = get_plc_slice_id(creds, urn)\r
-    (ret, output) = call_am_apiclient("QuerySliceNetworkClient", [slice_id,], 5)\r
-    # parse output into rspec XML\r
-    if output.find("Unkown Rspec:") > 0:\r
-        top_level_staus = 'failed'\r
-        result['geni_resources'] = ''\r
-    else:\r
-        has_failure = 0\r
-        all_active = 0\r
-        if output.find("Status => FAILED") > 0:\r
-            top_level_staus = 'failed'\r
-        elif (    output.find("Status => ACCEPTED") > 0 or output.find("Status => PENDING") > 0\r
-               or output.find("Status => INSETUP") > 0 or output.find("Status => INCREATE") > 0\r
-             ):\r
-            top_level_status = 'configuring'\r
-        else:\r
-            top_level_status = 'ready'\r
-        result['geni_resources'] = parse_resources(output, slice_xrn)\r
-    result['geni_urn'] = urn\r
-    result['geni_status'] = top_level_status\r
-    return result\r
-\r
-def create_slice(api, xrn, cred, rspec, users):\r
-    indx1 = rspec.find("<RSpec")\r
-    indx2 = rspec.find("</RSpec>")\r
-    if indx1 > -1 and indx2 > indx1:\r
-        rspec = rspec[indx1+len("<RSpec type=\"SFA\">"):indx2-1]\r
-    rspec_path = save_rspec_to_file(rspec)\r
-    prepare_slice(api, xrn, cred, users)\r
-    slice_id = get_plc_slice_id(cred, xrn)\r
-    sys_cmd = "sed -i \"s/rspec id=\\\"[^\\\"]*/rspec id=\\\"" +slice_id+ "/g\" " + rspec_path + ";sed -i \"s/:rspec=[^:'<\\\" ]*/:rspec=" +slice_id+ "/g\" " + rspec_path\r
-    ret = shell_execute(sys_cmd, 1)\r
-    sys_cmd = "sed -i \"s/rspec id=\\\"[^\\\"]*/rspec id=\\\"" + rspec_path + "/g\""\r
-    ret = shell_execute(sys_cmd, 1)\r
-    (ret, output) = call_am_apiclient("CreateSliceNetworkClient", [rspec_path,], 3)\r
-    # parse output ?\r
-    rspec = "<RSpec type=\"SFA\"> Done! </RSpec>"\r
-    return True\r
-\r
-def delete_slice(api, xrn, cred):\r
-    slice_id = get_plc_slice_id(cred, xrn)\r
-    (ret, output) = call_am_apiclient("DeleteSliceNetworkClient", [slice_id,], 3)\r
-    # parse output ?\r
-    return 1\r
-\r
-\r
-def get_rspec(api, cred, slice_urn):\r
-    logger.debug("#### called max-get_rspec")\r
-    #geni_slice_urn: urn:publicid:IDN+plc:maxpl+slice+xi_rspec_test1\r
-    if slice_urn == None:\r
-        (ret, output) = call_am_apiclient("GetResourceTopology", ['all', '\"\"'], 5)\r
-    else:\r
-        slice_id = get_plc_slice_id(cred, slice_urn)\r
-        (ret, output) = call_am_apiclient("GetResourceTopology", ['all', slice_id,], 5)\r
-    # parse output into rspec XML\r
-    if output.find("No resouce found") > 0:\r
-        rspec = "<RSpec type=\"SFA\"> <Fault>No resource found</Fault> </RSpec>"\r
-    else:\r
-        comp_rspec = get_xml_by_tag(output, 'computeResource')\r
-        logger.debug("#### computeResource %s" % comp_rspec)\r
-        topo_rspec = get_xml_by_tag(output, 'topology')\r
-        logger.debug("#### topology %s" % topo_rspec)\r
-        rspec = "<RSpec type=\"SFA\"> <network name=\"" + Config().get_interface_hrn() + "\">";\r
-        if comp_rspec != None:\r
-            rspec = rspec + get_xml_by_tag(output, 'computeResource')\r
-        if topo_rspec != None:\r
-            rspec = rspec + get_xml_by_tag(output, 'topology')\r
-        rspec = rspec + "</network> </RSpec>"\r
-    return (rspec)\r
-\r
-def start_slice(api, xrn, cred):\r
-    # service not supported\r
-    return None\r
-\r
-def stop_slice(api, xrn, cred):\r
-    # service not supported\r
-    return None\r
-\r
-def reset_slices(api, xrn):\r
-    # service not supported\r
-    return None\r
-\r
-"""\r
-    GENI AM API Methods\r
-"""\r
-\r
-def GetVersion(api):\r
-    xrn=Xrn(api.hrn)\r
-    request_rspec_versions = [dict(sfa_rspec_version)]\r
-    ad_rspec_versions = [dict(sfa_rspec_version)]\r
-    #TODO: MAX-AM specific\r
-    version_more = {'interface':'aggregate',\r
-                    'testbed':'myplc',\r
-                    'hrn':xrn.get_hrn(),\r
-                    'request_rspec_versions': request_rspec_versions,\r
-                    'ad_rspec_versions': ad_rspec_versions,\r
-                    'default_ad_rspec': dict(sfa_rspec_version)\r
-                    }\r
-    return version_core(version_more)\r
-\r
-def SliverStatus(api, slice_xrn, creds, call_id):\r
-    if Callids().already_handled(call_id): return {}\r
-    return slice_status(api, slice_xrn, creds)\r
-\r
-def CreateSliver(api, slice_xrn, creds, rspec_string, users, call_id):\r
-    if Callids().already_handled(call_id): return ""\r
-    #TODO: create real CreateSliver response rspec\r
-    ret = create_slice(api, slice_xrn, creds, rspec_string, users)\r
-    if ret:\r
-        return get_rspec(api, creds, slice_xrn)\r
-    else:\r
-        return "<?xml version=\"1.0\" ?> <RSpec type=\"SFA\"> Error! </RSpec>"\r
-\r
-def DeleteSliver(api, xrn, creds, call_id):\r
-    if Callids().already_handled(call_id): return ""\r
-    return delete_slice(api, xrn, creds)\r
-\r
-# no caching\r
-def ListResources(api, creds, options,call_id):\r
-    if Callids().already_handled(call_id): return ""\r
-    # version_string = "rspec_%s" % (rspec_version.get_version_name())\r
-    slice_urn = options.get('geni_slice_urn')\r
-    return get_rspec(api, creds, slice_urn)\r
-\r
-"""\r
-Returns the request context required by sfatables. At some point, this mechanism should be changed\r
-to refer to "contexts", which is the information that sfatables is requesting. But for now, we just\r
-return the basic information needed in a dict.\r
-"""\r
-def fetch_context(slice_hrn, user_hrn, contexts):\r
-    base_context = {'sfa':{'user':{'hrn':user_hrn}}}\r
-    return base_context\r
-    api = PlcSfaApi()\r
-    create_slice(api, "plc.maxpl.test000", None, rspec_xml, None)\r
-\r
+import os
+import time
+import re
+
+#from sfa.util.faults import *
+from sfa.util.sfalogging import logger
+from sfa.util.config import Config
+from sfa.util.callids import Callids
+from sfa.util.version import version_core
+from sfa.util.xrn import urn_to_hrn, hrn_to_urn, Xrn
+
+# xxx the sfa.rspecs module is dead - this symbol is now undefined
+#from sfa.rspecs.sfa_rspec import sfa_rspec_version
+
+from sfa.managers.aggregate_manager import AggregateManager
+
+from sfa.plc.slices import Slices
+
+class AggregateManagerMax (AggregateManager):
+
+    RSPEC_TMP_FILE_PREFIX = "/tmp/max_rspec"
+    
+    # execute shell command and return both exit code and text output
+    def shell_execute(self, cmd, timeout):
+        pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
+        pipe = os.popen(cmd + ' 2>&1', 'r')
+        text = ''
+        while timeout:
+            line = pipe.read()
+            text += line
+            time.sleep(1)
+            timeout = timeout-1
+        code = pipe.close()
+        if code is None: code = 0
+        if text[-1:] == '\n': text = text[:-1]
+        return code, text
+    
+   
+    def call_am_apiclient(self, client_app, params, timeout):
+        """
+        call AM API client with command like in the following example:
+        cd aggregate_client; java -classpath AggregateWS-client-api.jar:lib/* \
+          net.geni.aggregate.client.examples.CreateSliceNetworkClient \
+          ./repo https://geni:8443/axis2/services/AggregateGENI \
+          ... params ...
+        """
+        (client_path, am_url) = Config().get_max_aggrMgr_info()
+        sys_cmd = "cd " + client_path + "; java -classpath AggregateWS-client-api.jar:lib/* net.geni.aggregate.client.examples." + client_app + " ./repo " + am_url + " " + ' '.join(params)
+        ret = self.shell_execute(sys_cmd, timeout)
+        logger.debug("shell_execute cmd: %s returns %s" % (sys_cmd, ret))
+        return ret
+    
+    # save request RSpec xml content to a tmp file
+    def save_rspec_to_file(self, rspec):
+        path = AggregateManagerMax.RSPEC_TMP_FILE_PREFIX + "_" + \
+            time.strftime('%Y%m%dT%H:%M:%S', time.gmtime(time.time())) +".xml"
+        file = open(path, "w")
+        file.write(rspec)
+        file.close()
+        return path
+    
+    # get stripped down slice id/name plc.maxpl.xislice1 --> maxpl_xislice1
+    def get_plc_slice_id(self, cred, xrn):
+        (hrn, type) = urn_to_hrn(xrn)
+        slice_id = hrn.find(':')
+        sep = '.'
+        if hrn.find(':') != -1:
+            sep=':'
+        elif hrn.find('+') != -1:
+            sep='+'
+        else:
+            sep='.'
+        slice_id = hrn.split(sep)[-2] + '_' + hrn.split(sep)[-1]
+        return slice_id
+    
+    # extract xml 
+    def get_xml_by_tag(self, text, tag):
+        indx1 = text.find('<'+tag)
+        indx2 = text.find('/'+tag+'>')
+        xml = None
+        if indx1!=-1 and indx2>indx1:
+            xml = text[indx1:indx2+len(tag)+2]
+        return xml
+    
+    def prepare_slice(self, api, slice_xrn, creds, users):
+        reg_objects = self._get_registry_objects(slice_xrn, creds, users)
+        (hrn, type) = urn_to_hrn(slice_xrn)
+        slices = Slices(api)
+        peer = slices.get_peer(hrn)
+        sfa_peer = slices.get_sfa_peer(hrn)
+        slice_record=None
+        if users:
+            slice_record = users[0].get('slice_record', {})
+        registry = api.registries[api.hrn]
+        credential = api.getCredential()
+        # ensure site record exists
+        site = slices.verify_site(hrn, slice_record, peer, sfa_peer)
+        # ensure slice record exists
+        slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)
+        # ensure person records exists
+        persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)
+    
+    def parse_resources(self, text, slice_xrn):
+        resources = []
+        urn = hrn_to_urn(slice_xrn, 'sliver')
+        plc_slice = re.search("Slice Status => ([^\n]+)", text)
+        if plc_slice.group(1) != 'NONE':
+            res = {}
+            res['geni_urn'] = urn + '_plc_slice'
+            res['geni_error'] = ''
+            res['geni_status'] = 'unknown'
+            if plc_slice.group(1) == 'CREATED':
+                res['geni_status'] = 'ready'
+            resources.append(res)
+        vlans = re.findall("GRI => ([^\n]+)\n\t  Status => ([^\n]+)", text)
+        for vlan in vlans:
+            res = {}
+            res['geni_error'] = ''
+            res['geni_urn'] = urn + '_vlan_' + vlan[0]
+            if vlan[1] == 'ACTIVE':
+                res['geni_status'] = 'ready'
+            elif vlan[1] == 'FAILED':
+                res['geni_status'] = 'failed'
+            else:
+                res['geni_status'] = 'configuring'
+            resources.append(res)
+        return resources
+    
+    def slice_status(self, api, slice_xrn, creds):
+        urn = hrn_to_urn(slice_xrn, 'slice')
+        result = {}
+        top_level_status = 'unknown'
+        slice_id = self.get_plc_slice_id(creds, urn)
+        (ret, output) = self.call_am_apiclient("QuerySliceNetworkClient", [slice_id,], 5)
+        # parse output into rspec XML
+        if output.find("Unkown Rspec:") > 0:
+            top_level_staus = 'failed'
+            result['geni_resources'] = ''
+        else:
+            has_failure = 0
+            all_active = 0
+            if output.find("Status => FAILED") > 0:
+                top_level_staus = 'failed'
+            elif (    output.find("Status => ACCEPTED") > 0 or output.find("Status => PENDING") > 0
+                   or output.find("Status => INSETUP") > 0 or output.find("Status => INCREATE") > 0
+                 ):
+                top_level_status = 'configuring'
+            else:
+                top_level_status = 'ready'
+            result['geni_resources'] = self.parse_resources(output, slice_xrn)
+        result['geni_urn'] = urn
+        result['geni_status'] = top_level_status
+        return result
+    
+    def create_slice(self, api, xrn, cred, rspec, users):
+        indx1 = rspec.find("<RSpec")
+        indx2 = rspec.find("</RSpec>")
+        if indx1 > -1 and indx2 > indx1:
+            rspec = rspec[indx1+len("<RSpec type=\"SFA\">"):indx2-1]
+        rspec_path = self.save_rspec_to_file(rspec)
+        self.prepare_slice(api, xrn, cred, users)
+        slice_id = self.get_plc_slice_id(cred, xrn)
+        sys_cmd = "sed -i \"s/rspec id=\\\"[^\\\"]*/rspec id=\\\"" +slice_id+ "/g\" " + rspec_path + ";sed -i \"s/:rspec=[^:'<\\\" ]*/:rspec=" +slice_id+ "/g\" " + rspec_path
+        ret = self.shell_execute(sys_cmd, 1)
+        sys_cmd = "sed -i \"s/rspec id=\\\"[^\\\"]*/rspec id=\\\"" + rspec_path + "/g\""
+        ret = self.shell_execute(sys_cmd, 1)
+        (ret, output) = self.call_am_apiclient("CreateSliceNetworkClient", [rspec_path,], 3)
+        # parse output ?
+        rspec = "<RSpec type=\"SFA\"> Done! </RSpec>"
+        return True
+    
+    def delete_slice(self, api, xrn, cred):
+        slice_id = self.get_plc_slice_id(cred, xrn)
+        (ret, output) = self.call_am_apiclient("DeleteSliceNetworkClient", [slice_id,], 3)
+        # parse output ?
+        return 1
+    
+    
+    def get_rspec(self, api, cred, slice_urn):
+        logger.debug("#### called max-get_rspec")
+        #geni_slice_urn: urn:publicid:IDN+plc:maxpl+slice+xi_rspec_test1
+        if slice_urn == None:
+            (ret, output) = self.call_am_apiclient("GetResourceTopology", ['all', '\"\"'], 5)
+        else:
+            slice_id = self.get_plc_slice_id(cred, slice_urn)
+            (ret, output) = self.call_am_apiclient("GetResourceTopology", ['all', slice_id,], 5)
+        # parse output into rspec XML
+        if output.find("No resouce found") > 0:
+            rspec = "<RSpec type=\"SFA\"> <Fault>No resource found</Fault> </RSpec>"
+        else:
+            comp_rspec = self.get_xml_by_tag(output, 'computeResource')
+            logger.debug("#### computeResource %s" % comp_rspec)
+            topo_rspec = self.get_xml_by_tag(output, 'topology')
+            logger.debug("#### topology %s" % topo_rspec)
+            rspec = "<RSpec type=\"SFA\"> <network name=\"" + Config().get_interface_hrn() + "\">"
+            if comp_rspec != None:
+                rspec = rspec + self.get_xml_by_tag(output, 'computeResource')
+            if topo_rspec != None:
+                rspec = rspec + self.get_xml_by_tag(output, 'topology')
+            rspec = rspec + "</network> </RSpec>"
+        return (rspec)
+    
+    def start_slice(self, api, xrn, cred):
+        # service not supported
+        return None
+    
+    def stop_slice(self, api, xrn, cred):
+        # service not supported
+        return None
+    
+    def reset_slices(self, api, xrn):
+        # service not supported
+        return None
+    
+    ### GENI AM API Methods
+    
+    def SliverStatus(self, api, slice_xrn, creds, call_id):
+        if Callids().already_handled(call_id): return {}
+        return self.slice_status(api, slice_xrn, creds)
+    
+    def CreateSliver(self, api, slice_xrn, creds, rspec_string, users, call_id):
+        if Callids().already_handled(call_id): return ""
+        #TODO: create real CreateSliver response rspec
+        ret = self.create_slice(api, slice_xrn, creds, rspec_string, users)
+        if ret:
+            return self.get_rspec(api, creds, slice_xrn)
+        else:
+            return "<?xml version=\"1.0\" ?> <RSpec type=\"SFA\"> Error! </RSpec>"
+    
+    def DeleteSliver(self, api, xrn, creds, call_id):
+        if Callids().already_handled(call_id): return ""
+        return self.delete_slice(api, xrn, creds)
+    
+    # no caching
+    def ListResources(self, api, creds, options,call_id):
+        if Callids().already_handled(call_id): return ""
+        # version_string = "rspec_%s" % (rspec_version.get_version_name())
+        slice_urn = options.get('geni_slice_urn')
+        return self.get_rspec(api, creds, slice_urn)
+    
+    def fetch_context(self, slice_hrn, user_hrn, contexts):
+        """
+        Returns the request context required by sfatables. At some point, this mechanism should be changed
+        to refer to "contexts", which is the information that sfatables is requesting. But for now, we just
+        return the basic information needed in a dict.
+        """
+        base_context = {'sfa':{'user':{'hrn':user_hrn}}}
+        return base_context
+
diff --git a/sfa/managers/aggregate_manager_openflow.py b/sfa/managers/aggregate_manager_openflow.py
deleted file mode 100755 (executable)
index a804a65..0000000
+++ /dev/null
@@ -1,178 +0,0 @@
-import sys
-
-import socket
-import struct
-
-#The following is not essential
-#from soaplib.wsgi_soap import SimpleWSGISoapApp
-#from soaplib.serializers.primitive import *
-#from soaplib.serializers.clazz import *
-
-from sfa.util.faults import *
-from sfa.util.xrn import urn_to_hrn
-from sfa.server.registry import Registries
-from sfa.util.config import Config
-from sfa.plc.nodes import *
-from sfa.util.callids import Callids
-
-# Message IDs for all the SFA light calls
-# This will be used by the aggrMgr controller
-SFA_GET_RESOURCES = 101
-SFA_CREATE_SLICE = 102
-SFA_START_SLICE = 103
-SFA_STOP_SLICE = 104
-SFA_DELETE_SLICE = 105
-SFA_GET_SLICES = 106
-SFA_RESET_SLICES = 107
-
-DEBUG = 1
-
-def print_buffer(buf):
-    for i in range(0,len(buf)):
-        print('%x' % buf[i])
-
-def extract(sock):
-    # Shud we first obtain the message length?
-    # msg_len = socket.ntohs(sock.recv(2))
-    msg = ""
-
-    while (1):
-        try:
-            chunk = sock.recv(1)
-        except socket.error, message:
-            if 'timed out' in message:
-                break
-            else:
-                sys.exit("Socket error: " + message)
-
-        if len(chunk) == 0:
-            break
-        msg += chunk
-
-    print 'Done extracting %d bytes of response from aggrMgr' % len(msg)
-    return msg
-   
-def connect(server, port):
-    '''Connect to the Aggregate Manager module'''
-    sock = socket.socket ( socket.AF_INET, socket.SOCK_STREAM )
-    sock.connect ( ( server, port) )
-    sock.settimeout(1)
-    if DEBUG: print 'Connected!'
-    return sock
-    
-def connect_aggrMgr():
-    (aggr_mgr_ip, aggr_mgr_port) = Config().get_openflow_aggrMgr_info()
-    if DEBUG: print """Connecting to port %d of %s""" % (aggr_mgr_port, aggr_mgr_ip)
-    return connect(aggr_mgr_ip, aggr_mgr_port)
-
-def generate_slide_id(cred, hrn):
-    if cred == None:
-        cred = ""
-    if hrn == None:
-        hrn = ""
-    #return cred + '_' + hrn
-    return str(hrn)
-
-def msg_aggrMgr(cred, hrn, msg_id):
-    slice_id = generate_slide_id(cred, hrn)
-
-    msg = struct.pack('> B%ds' % len(slice_id), msg_id, slice_id)
-    buf = struct.pack('> H', len(msg)+2) + msg
-
-    try:
-        aggrMgr_sock = connect_aggrMgr()
-        aggrMgr_sock.send(buf)
-        aggrMgr_sock.close()
-        return 1
-    except socket.error, message:
-        print "Socket error"
-    except IOerror, message:
-        print "IO error"
-    return 0
-
-def start_slice(cred, xrn):
-    hrn = urn_to_hrn(xrn)[0]
-    if DEBUG: print "Received start_slice call"
-    return msg_aggrMgr(SFA_START_SLICE)
-
-def stop_slice(cred, xrn):
-    hrn = urn_to_hrn(xrn)[0]
-    if DEBUG: print "Received stop_slice call"
-    return msg_aggrMgr(SFA_STOP_SLICE)
-
-def DeleteSliver(cred, xrn, call_id):
-    if Callids().already_handled(call_id): return ""
-    hrn = urn_to_hrn(xrn)[0]
-    if DEBUG: print "Received DeleteSliver call"
-    return msg_aggrMgr(SFA_DELETE_SLICE)
-
-def reset_slices(cred, xrn):
-    hrn = urn_to_hrn(xrn)[0]
-    if DEBUG: print "Received reset_slices call"
-    return msg_aggrMgr(SFA_RESET_SLICES)
-
-### Thierry: xxx this should ahve api as a first arg - probably outdated 
-def CreateSliver(cred, xrn, rspec, call_id):
-    if Callids().already_handled(call_id): return ""
-
-    hrn = urn_to_hrn(xrn)[0]
-    if DEBUG: print "Received CreateSliver call"
-    slice_id = generate_slide_id(cred, hrn)
-
-    msg = struct.pack('> B%ds%ds' % (len(slice_id)+1, len(rspec)), SFA_CREATE_SLICE, slice_id, rspec)
-    buf = struct.pack('> H', len(msg)+2) + msg
-
-    try:
-        aggrMgr_sock = connect_aggrMgr()
-        aggrMgr_sock.send(buf)
-        if DEBUG: print "Sent %d bytes and closing connection" % len(buf)
-        aggrMgr_sock.close()
-
-        if DEBUG: print "----------------"
-        return rspec
-    except socket.error, message:
-        print "Socket error"
-    except IOerror, message:
-        print "IO error"
-    return ""
-
-# Thierry : xxx this would need to handle call_id like the other AMs but is outdated...
-def ListResources(cred, xrn=None):
-    hrn = urn_to_hrn(xrn)[0]
-    if DEBUG: print "Received ListResources call"
-    slice_id = generate_slide_id(cred, hrn)
-
-    msg = struct.pack('> B%ds' % len(slice_id), SFA_GET_RESOURCES, slice_id)
-    buf = struct.pack('> H', len(msg)+2) + msg
-
-    try:
-        aggrMgr_sock = connect_aggrMgr()
-        aggrMgr_sock.send(buf)
-        resource_list = extract(aggrMgr_sock);
-        aggrMgr_sock.close()
-
-        if DEBUG: print "----------------"
-        return resource_list 
-    except socket.error, message:
-        print "Socket error"
-    except IOerror, message:
-        print "IO error"
-    return None
-
-"""
-Returns the request context required by sfatables. At some point, this mechanism should be changed
-to refer to "contexts", which is the information that sfatables is requesting. But for now, we just
-return the basic information needed in a dict.
-"""
-def fetch_context(slice_hrn, user_hrn, contexts):
-    base_context = {'sfa':{'user':{'hrn':user_hrn}}}
-    return base_context
-
-def main():
-    r = RSpec()
-    r.parseFile(sys.argv[1])
-    rspec = r.toDict()
-    CreateSliver(None,'plc',rspec,'call-id-plc')
-    
-if __name__ == "__main__":
-    main()
index 8aca53c..164b2d6 100644 (file)
@@ -29,26 +29,26 @@ def SliverStatus(api, slice_xrn, creds):
            
 def start_slice(api, xrn, creds):
     slicename = PlXrn(xrn, type='slice').pl_slicename()
-    api.nodemanger.Start(slicename)
+    api.driver.nodemanager.Start(slicename)
 
 def stop_slice(api, xrn, creds):
     slicename = PlXrn(xrn, type='slice').pl_slicename()
-    api.nodemanager.Stop(slicename)
+    api.driver.nodemanager.Stop(slicename)
 
 def DeleteSliver(api, xrn, creds, call_id):
     slicename = PlXrn(xrn, type='slice').pl_slicename()
-    api.nodemanager.Destroy(slicename)
+    api.driver.nodemanager.Destroy(slicename)
 
 def reset_slice(api, xrn):
     slicename = PlXrn(xrn, type='slice').pl_slicename()
     if not api.sliver_exists(slicename):
         raise SliverDoesNotExist(slicename)
-    api.nodemanager.ReCreate(slicename)
+    api.driver.nodemanager.ReCreate(slicename)
  
 # xxx outdated - this should accept a credential & call_id
 def ListSlices(api):
     # this returns a tuple, the data we want is at index 1 
-    xids = api.nodemanager.GetXIDs()
+    xids = api.driver.nodemanager.GetXIDs()
     # unfortunately the data we want is given to us as 
     # a string but we really want it as a dict
     # lets eval it
@@ -65,6 +65,6 @@ def redeem_ticket(api, ticket_string):
 
     # convert ticket to format nm is used to
     nm_ticket = xmlrpclib.dumps((ticket.attributes,), methodresponse=True)
-    api.nodemanager.AdminTicket(nm_ticket)
+    api.driver.nodemanager.AdminTicket(nm_ticket)
     
 
index 5231c2a..b0326d6 100644 (file)
@@ -1,4 +1,6 @@
-from sfa.util.faults import SfaNotImplemented
+from types import ModuleType, ClassType
+
+from sfa.util.faults import SfaNotImplemented, SfaAPIError
 from sfa.util.sfalogging import logger
 
 ####################
@@ -14,7 +16,15 @@ class ManagerWrapper:
     the standard AttributeError         
     """
     def __init__(self, manager, interface):
-        self.manager = manager
+        if isinstance (manager, ModuleType):
+            # old-fashioned module implementation
+            self.manager = manager
+        elif isinstance (manager, ClassType):
+            # create an instance; we don't pass the api in argument as it is passed 
+            # to the actual method calls anyway
+            self.manager = manager()
+        else:
+            raise SfaAPIError,"Argument to ManagerWrapper must be a module or class"
         self.interface = interface
         
     def __getattr__(self, method):
index e4a5b17..5888b2b 100644 (file)
@@ -46,7 +46,7 @@ def get_credential(api, xrn, type, is_self=False):
 
     # verify_cancreate_credential requires that the member lists
     # (researchers, pis, etc) be filled in
-    api.fill_record_info(record)
+    api.driver.fill_record_info(record, api.aggregates)
     if record['type']=='user':
        if not record['enabled']:
           raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record['email']))
@@ -119,7 +119,9 @@ def resolve(api, xrns, type=None, full=True):
         xrns = xrn_dict[registry_hrn]
         if registry_hrn != api.hrn:
             credential = api.getCredential()
-            peer_records = registries[registry_hrn].Resolve(xrns, credential)
+            interface = api.registries[registry_hrn]
+            server = api.server_proxy(interface, credential)
+            peer_records = server.Resolve(xrns, credential)
             records.extend([SfaRecord(dict=record).as_dict() for record in peer_records])
 
     # try resolving the remaining unfound records at the local registry
@@ -129,7 +131,7 @@ def resolve(api, xrns, type=None, full=True):
     table = SfaTable()
     local_records = table.findObjects({'hrn': remaining_hrns})
     if full:
-        api.fill_record_info(local_records)
+        api.driver.fill_record_info(local_records, api.aggregates)
     
     # convert local record objects to dicts
     records.extend([dict(record) for record in local_records])
@@ -155,13 +157,14 @@ def list(api, xrn, origin_hrn=None):
     #if there was no match then this record belongs to an unknow registry
     if not registry_hrn:
         raise MissingAuthority(xrn)
-    
     # if the best match (longest matching hrn) is not the local registry,
     # forward the request
     records = []    
     if registry_hrn != api.hrn:
         credential = api.getCredential()
-        record_list = registries[registry_hrn].List(xrn, credential)
+        interface = api.registries[registry_hrn]
+        server = api.server_proxy(interface, credential)
+        record_list = server.List(xrn, credential)
         records = [SfaRecord(dict=record).as_dict() for record in record_list]
     
     # if we still have not found the record yet, try the local registry
@@ -231,10 +234,10 @@ def register(api, record):
         # get the GID from the newly created authority
         gid = auth_info.get_gid_object()
         record.set_gid(gid.save_to_string(save_parents=True))
-        pl_record = api.sfa_fields_to_pl_fields(type, hrn, record)
-        sites = api.plshell.GetSites(api.plauth, [pl_record['login_base']])
+        pl_record = api.driver.sfa_fields_to_pl_fields(type, hrn, record)
+        sites = api.driver.GetSites([pl_record['login_base']])
         if not sites:
-            pointer = api.plshell.AddSite(api.plauth, pl_record)
+            pointer = api.driver.AddSite(pl_record)
         else:
             pointer = sites[0]['site_id']
 
@@ -243,45 +246,45 @@ def register(api, record):
 
     elif (type == "slice"):
         acceptable_fields=['url', 'instantiation', 'name', 'description']
-        pl_record = api.sfa_fields_to_pl_fields(type, hrn, record)
+        pl_record = api.driver.sfa_fields_to_pl_fields(type, hrn, record)
         for key in pl_record.keys():
             if key not in acceptable_fields:
                 pl_record.pop(key)
-        slices = api.plshell.GetSlices(api.plauth, [pl_record['name']])
+        slices = api.driver.GetSlices([pl_record['name']])
         if not slices:
-             pointer = api.plshell.AddSlice(api.plauth, pl_record)
+             pointer = api.driver.AddSlice(pl_record)
         else:
              pointer = slices[0]['slice_id']
         record.set_pointer(pointer)
         record['pointer'] = pointer
 
     elif  (type == "user"):
-        persons = api.plshell.GetPersons(api.plauth, [record['email']])
+        persons = api.driver.GetPersons([record['email']])
         if not persons:
-            pointer = api.plshell.AddPerson(api.plauth, dict(record))
+            pointer = api.driver.AddPerson(dict(record))
         else:
             pointer = persons[0]['person_id']
 
         if 'enabled' in record and record['enabled']:
-            api.plshell.UpdatePerson(api.plauth, pointer, {'enabled': record['enabled']})
+            api.driver.UpdatePerson(pointer, {'enabled': record['enabled']})
         # add this persons to the site only if he is being added for the first
         # time by sfa and doesont already exist in plc
         if not persons or not persons[0]['site_ids']:
             login_base = get_leaf(record['authority'])
-            api.plshell.AddPersonToSite(api.plauth, pointer, login_base)
+            api.driver.AddPersonToSite(pointer, login_base)
 
         # What roles should this user have?
-        api.plshell.AddRoleToPerson(api.plauth, 'user', pointer)
+        api.driver.AddRoleToPerson('user', pointer)
         # Add the user's key
         if pub_key:
-            api.plshell.AddPersonKey(api.plauth, pointer, {'key_type' : 'ssh', 'key' : pub_key})
+            api.driver.AddPersonKey(pointer, {'key_type' : 'ssh', 'key' : pub_key})
 
     elif (type == "node"):
-        pl_record = api.sfa_fields_to_pl_fields(type, hrn, record)
+        pl_record = api.driver.sfa_fields_to_pl_fields(type, hrn, record)
         login_base = hrn_to_pl_login_base(record['authority'])
-        nodes = api.plshell.GetNodes(api.plauth, [pl_record['hostname']])
+        nodes = api.driver.GetNodes([pl_record['hostname']])
         if not nodes:
-            pointer = api.plshell.AddNode(api.plauth, login_base, pl_record)
+            pointer = api.driver.AddNode(login_base, pl_record)
         else:
             pointer = nodes[0]['node_id']
 
@@ -291,7 +294,7 @@ def register(api, record):
     record['record_id'] = record_id
 
     # update membership for researchers, pis, owners, operators
-    api.update_membership(None, record)
+    api.driver.update_membership(None, record)
 
     return record.get_gid_object().save_to_string(save_parents=True)
 
@@ -310,7 +313,7 @@ def update(api, record_dict):
 
     # Update_membership needs the membership lists in the existing record
     # filled in, so it can see if members were added or removed
-    api.fill_record_info(record)
+    api.driver.fill_record_info(record, api.aggregates)
 
     # Use the pointer from the existing record, not the one that the user
     # gave us. This prevents the user from inserting a forged pointer
@@ -318,13 +321,13 @@ def update(api, record_dict):
     # update the PLC information that was specified with the record
 
     if (type == "authority"):
-        api.plshell.UpdateSite(api.plauth, pointer, new_record)
+        api.driver.UpdateSite(pointer, new_record)
 
     elif type == "slice":
-        pl_record=api.sfa_fields_to_pl_fields(type, hrn, new_record)
+        pl_record=api.driver.sfa_fields_to_pl_fields(type, hrn, new_record)
         if 'name' in pl_record:
             pl_record.pop('name')
-            api.plshell.UpdateSlice(api.plauth, pointer, pl_record)
+            api.driver.UpdateSlice(pointer, pl_record)
 
     elif type == "user":
         # SMBAKER: UpdatePerson only allows a limited set of fields to be
@@ -337,14 +340,14 @@ def update(api, record_dict):
                        'password', 'phone', 'url', 'bio', 'accepted_aup',
                        'enabled']:
                 update_fields[key] = all_fields[key]
-        api.plshell.UpdatePerson(api.plauth, pointer, update_fields)
+        api.driver.UpdatePerson(pointer, update_fields)
 
         if 'key' in new_record and new_record['key']:
             # must check this key against the previous one if it exists
-            persons = api.plshell.GetPersons(api.plauth, [pointer], ['key_ids'])
+            persons = api.driver.GetPersons([pointer], ['key_ids'])
             person = persons[0]
             keys = person['key_ids']
-            keys = api.plshell.GetKeys(api.plauth, person['key_ids'])
+            keys = api.driver.GetKeys(person['key_ids'])
             key_exists = False
             if isinstance(new_record['key'], types.ListType):
                 new_key = new_record['key'][0]
@@ -354,11 +357,11 @@ def update(api, record_dict):
             # Delete all stale keys
             for key in keys:
                 if new_record['key'] != key['key']:
-                    api.plshell.DeleteKey(api.plauth, key['key_id'])
+                    api.driver.DeleteKey(key['key_id'])
                 else:
                     key_exists = True
             if not key_exists:
-                api.plshell.AddPersonKey(api.plauth, pointer, {'key_type': 'ssh', 'key': new_key})
+                api.driver.AddPersonKey(pointer, {'key_type': 'ssh', 'key': new_key})
 
             # update the openssl key and gid
             pkey = convert_public_key(new_key)
@@ -370,13 +373,13 @@ def update(api, record_dict):
             table.update(record)
 
     elif type == "node":
-        api.plshell.UpdateNode(api.plauth, pointer, new_record)
+        api.driver.UpdateNode(pointer, new_record)
 
     else:
         raise UnknownSfaType(type)
 
     # update membership for researchers, pis, owners, operators
-    api.update_membership(record, new_record)
+    api.driver.update_membership(record, new_record)
     
     return 1 
 
@@ -408,20 +411,20 @@ def remove(api, xrn, origin_hrn=None):
                 except:
                     pass
     if type == "user":
-        persons = api.plshell.GetPersons(api.plauth, record['pointer'])
+        persons = api.driver.GetPersons(record['pointer'])
         # only delete this person if he has site ids. if he doesnt, it probably means
         # he was just removed from a site, not actually deleted
         if persons and persons[0]['site_ids']:
-            api.plshell.DeletePerson(api.plauth, record['pointer'])
+            api.driver.DeletePerson(record['pointer'])
     elif type == "slice":
-        if api.plshell.GetSlices(api.plauth, record['pointer']):
-            api.plshell.DeleteSlice(api.plauth, record['pointer'])
+        if api.driver.GetSlices(record['pointer']):
+            api.driver.DeleteSlice(record['pointer'])
     elif type == "node":
-        if api.plshell.GetNodes(api.plauth, record['pointer']):
-            api.plshell.DeleteNode(api.plauth, record['pointer'])
+        if api.driver.GetNodes(record['pointer']):
+            api.driver.DeleteNode(record['pointer'])
     elif type == "authority":
-        if api.plshell.GetSites(api.plauth, record['pointer']):
-            api.plshell.DeleteSite(api.plauth, record['pointer'])
+        if api.driver.GetSites(record['pointer']):
+            api.driver.DeleteSite(record['pointer'])
     else:
         raise UnknownSfaType(type)
 
index cba7295..cf60d78 100644 (file)
@@ -10,70 +10,77 @@ from sfa.trust.credential import Credential
 
 from sfa.util.sfalogging import logger
 from sfa.util.xrn import Xrn, urn_to_hrn
-from sfa.util.threadmanager import ThreadManager
 from sfa.util.version import version_core
 from sfa.util.callids import Callids
 
+from sfa.server.threadmanager import ThreadManager
+
 from sfa.rspecs.rspec_converter import RSpecConverter
 from sfa.rspecs.version_manager import VersionManager
 from sfa.rspecs.rspec import RSpec 
 from sfa.client.client_helper import sfa_to_pg_users_arg
 
-def _call_id_supported(api, server):
-    """
-    Returns true if server support the optional call_id arg, false otherwise.
-    """
-    server_version = api.get_cached_server_version(server)
-
-    if 'sfa' in server_version:
-        code_tag = server_version['code_tag']
-        code_tag_parts = code_tag.split("-")
-
-        version_parts = code_tag_parts[0].split(".")
-        major, minor = version_parts[0:2]
-        rev = code_tag_parts[1]
-        if int(major) > 1:
-            if int(minor) > 0 or int(rev) > 20:
-                return True
-    return False
-
-# we have specialized xmlrpclib.ServerProxy to remember the input url
-# OTOH it's not clear if we're only dealing with XMLRPCServerProxy instances
-def get_serverproxy_url (server):
-    try:
-        return server.get_url()
-    except:
-        logger.warning("GetVersion, falling back to xmlrpclib.ServerProxy internals")
-        return server._ServerProxy__host + server._ServerProxy__handler
-
-def GetVersion(api):
-    # peers explicitly in aggregates.xml
-    peers =dict ([ (peername,get_serverproxy_url(v)) for (peername,v) in api.aggregates.iteritems()
-                   if peername != api.hrn])
-    version_manager = VersionManager()
-    ad_rspec_versions = []
-    request_rspec_versions = []
-    for rspec_version in version_manager.versions:
-        if rspec_version.content_type in ['*', 'ad']:
-            ad_rspec_versions.append(rspec_version.to_dict())
-        if rspec_version.content_type in ['*', 'request']:
-            request_rspec_versions.append(rspec_version.to_dict())
-    default_rspec_version = version_manager.get_version("sfa 1").to_dict()
-    xrn=Xrn(api.hrn, 'authority+sa')
-    version_more = {'interface':'slicemgr',
-                    'hrn' : xrn.get_hrn(),
-                    'urn' : xrn.get_urn(),
-                    'peers': peers,
-                    'request_rspec_versions': request_rspec_versions,
-                    'ad_rspec_versions': ad_rspec_versions,
-                    'default_ad_rspec': default_rspec_version
+class SliceManager:
+    def __init__ (self):
+    #    self.caching=False
+        self.caching=True
+        
+    
+    def _call_id_supported(self, api, server):
+        """
+        Returns true if server support the optional call_id arg, false otherwise.
+        """
+        server_version = api.get_cached_server_version(server)
+    
+        if 'sfa' in server_version:
+            code_tag = server_version['code_tag']
+            code_tag_parts = code_tag.split("-")
+    
+            version_parts = code_tag_parts[0].split(".")
+            major, minor = version_parts[0:2]
+            rev = code_tag_parts[1]
+            if int(major) > 1:
+                if int(minor) > 0 or int(rev) > 20:
+                    return True
+        return False
+    
+    # we have specialized xmlrpclib.ServerProxy to remember the input url
+    # OTOH it's not clear if we're only dealing with XMLRPCServerProxy instances
+    def get_serverproxy_url (self, server):
+        try:
+            return server.get_url()
+        except:
+            logger.warning("GetVersion, falling back to xmlrpclib.ServerProxy internals")
+            return server._ServerProxy__host + server._ServerProxy__handler
+    
+    def GetVersion(self, api):
+        # peers explicitly in aggregates.xml
+        peers =dict ([ (peername,self.get_serverproxy_url(v)) for (peername,v) in api.aggregates.iteritems()
+                       if peername != api.hrn])
+        version_manager = VersionManager()
+        ad_rspec_versions = []
+        request_rspec_versions = []
+        for rspec_version in version_manager.versions:
+            if rspec_version.content_type in ['*', 'ad']:
+                ad_rspec_versions.append(rspec_version.to_dict())
+            if rspec_version.content_type in ['*', 'request']:
+                request_rspec_versions.append(rspec_version.to_dict())
+        default_rspec_version = version_manager.get_version("sfa 1").to_dict()
+        xrn=Xrn(api.hrn, 'authority+sa')
+        version_more = {'interface':'slicemgr',
+                        'hrn' : xrn.get_hrn(),
+                        'urn' : xrn.get_urn(),
+                        'peers': peers,
+                        'request_rspec_versions': request_rspec_versions,
+                        'ad_rspec_versions': ad_rspec_versions,
+                        'default_ad_rspec': default_rspec_version
                     }
-    sm_version=version_core(version_more)
-    # local aggregate if present needs to have localhost resolved
-    if api.hrn in api.aggregates:
-        local_am_url=get_serverproxy_url(api.aggregates[api.hrn])
-        sm_version['peers'][api.hrn]=local_am_url.replace('localhost',sm_version['hostname'])
-    return sm_version
+        sm_version=version_core(version_more)
+        # local aggregate if present needs to have localhost resolved
+        if api.hrn in api.aggregates:
+            local_am_url=self.get_serverproxy_url(api.aggregates[api.hrn])
+            sm_version['peers'][api.hrn]=local_am_url.replace('localhost',sm_version['hostname'])
+        return sm_version
 
 def drop_slicemgr_stats(rspec):
     try:
@@ -115,14 +122,9 @@ def ListResources(api, creds, options, call_id):
         args = [credential, my_opts]
         tStart = time.time()
         try:
-            if _call_id_supported(api, server):
-                args.append(call_id)
-            version = api.get_cached_server_version(server)
-            # force ProtoGENI aggregates to give us a v2 RSpec
-            if 'sfa' not in version.keys():
-                my_opts['rspec_version'] = version_manager.get_version('ProtoGENI 2').to_dict()
-            rspec = server.ListResources(*args)
-            return {"aggregate": aggregate, "rspec": rspec, "elapsed": time.time()-tStart, "status": "success"}
+            stats_elements = rspec.xml.xpath('//statistics')
+            for node in stats_elements:
+                node.getparent().remove(node)
         except Exception, e:
             api.logger.log_exc("ListResources failed at %s" %(server.url))
             return {"aggregate": aggregate, "elapsed": time.time()-tStart, "status": "exception", "exc_info": sys.exc_info()}
@@ -194,21 +196,176 @@ def CreateSliver(api, xrn, creds, rspec_str, users, call_id):
     def _CreateSliver(aggregate, server, xrn, credential, rspec, users, call_id):
         tStart = time.time()
         try:
-            # Need to call GetVersion at an aggregate to determine the supported
-            # rspec type/format beofre calling CreateSliver at an Aggregate.
+            stats_tags = rspec.xml.xpath('//statistics[@call="%s"]' % callname)
+            if stats_tags:
+                stats_tag = stats_tags[0]
+            else:
+                stats_tag = etree.SubElement(rspec.xml.root, "statistics", call=callname)
+    
+            etree.SubElement(stats_tag, "aggregate", name=str(aggname), elapsed=str(elapsed), status=str(status))
+        except Exception, e:
+            logger.warn("add_slicemgr_stat failed on  %s: %s" %(aggname, str(e)))
+    
+    def ListResources(self, api, creds, options, call_id):
+        version_manager = VersionManager()
+        def _ListResources(aggregate, server, credential, opts, call_id):
+    
+            my_opts = copy(opts)
+            args = [credential, my_opts]
+            tStart = time.time()
+            try:
+                if self._call_id_supported(api, server):
+                    args.append(call_id)
+                version = api.get_cached_server_version(server)
+                # force ProtoGENI aggregates to give us a v2 RSpec
+                if 'sfa' not in version.keys():
+                    my_opts['rspec_version'] = version_manager.get_version('ProtoGENI 2').to_dict()
+                rspec = server.ListResources(*args)
+                return {"aggregate": aggregate, "rspec": rspec, "elapsed": time.time()-tStart, "status": "success"}
+            except Exception, e:
+                api.logger.log_exc("ListResources failed at %s" %(server.url))
+                return {"aggregate": aggregate, "elapsed": time.time()-tStart, "status": "exception"}
+    
+        if Callids().already_handled(call_id): return ""
+    
+        # get slice's hrn from options
+        xrn = options.get('geni_slice_urn', '')
+        (hrn, type) = urn_to_hrn(xrn)
+        if 'geni_compressed' in options:
+            del(options['geni_compressed'])
+    
+        # get the rspec's return format from options
+        rspec_version = version_manager.get_version(options.get('rspec_version'))
+        version_string = "rspec_%s" % (rspec_version.to_string())
+    
+        # look in cache first
+        if self.caching and api.cache and not xrn:
+            rspec =  api.cache.get(version_string)
+            if rspec:
+                return rspec
+    
+        # get the callers hrn
+        valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0]
+        caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+    
+        # attempt to use delegated credential first
+        cred = api.getDelegatedCredential(creds)
+        if not cred:
+            cred = api.getCredential()
+        threads = ThreadManager()
+        for aggregate in api.aggregates:
+            # prevent infinite loop. Dont send request back to caller
+            # unless the caller is the aggregate's SM
+            if caller_hrn == aggregate and aggregate != api.hrn:
+                continue
+    
+            # get the rspec from the aggregate
+            interface = api.aggregates[aggregate]
+            server = api.server_proxy(interface, cred)
+            threads.run(_ListResources, aggregate, server, [cred], options, call_id)
+    
+    
+        results = threads.get_results()
+        rspec_version = version_manager.get_version(options.get('rspec_version'))
+        if xrn:    
+            result_version = version_manager._get_version(rspec_version.type, rspec_version.version, 'manifest')
+        else: 
+            result_version = version_manager._get_version(rspec_version.type, rspec_version.version, 'ad')
+        rspec = RSpec(version=result_version)
+        for result in results:
+            self.add_slicemgr_stat(rspec, "ListResources", result["aggregate"], result["elapsed"], result["status"])
+            if result["status"]=="success":
+                try:
+                    rspec.version.merge(result["rspec"])
+                except:
+                    api.logger.log_exc("SM.ListResources: Failed to merge aggregate rspec")
+    
+        # cache the result
+        if self.caching and api.cache and not xrn:
+            api.cache.add(version_string, rspec.toxml())
+    
+        return rspec.toxml()
+    
+    
+    def CreateSliver(self, api, xrn, creds, rspec_str, users, call_id):
+    
+        version_manager = VersionManager()
+        def _CreateSliver(aggregate, server, xrn, credential, rspec, users, call_id):
+            tStart = time.time()
+            try:
+                # Need to call GetVersion at an aggregate to determine the supported
+                # rspec type/format beofre calling CreateSliver at an Aggregate.
+                server_version = api.get_cached_server_version(server)
+                requested_users = users
+                if 'sfa' not in server_version and 'geni_api' in server_version:
+                    # sfa aggregtes support both sfa and pg rspecs, no need to convert
+                    # if aggregate supports sfa rspecs. otherwise convert to pg rspec
+                    rspec = RSpec(RSpecConverter.to_pg_rspec(rspec, 'request'))
+                    filter = {'component_manager_id': server_version['urn']}
+                    rspec.filter(filter)
+                    rspec = rspec.toxml()
+                    requested_users = sfa_to_pg_users_arg(users)
+                args = [xrn, credential, rspec, requested_users]
+                if self._call_id_supported(api, server):
+                    args.append(call_id)
+                rspec = server.CreateSliver(*args)
+                return {"aggregate": aggregate, "rspec": rspec, "elapsed": time.time()-tStart, "status": "success"}
+            except: 
+                logger.log_exc('Something wrong in _CreateSliver with URL %s'%server.url)
+                return {"aggregate": aggregate, "elapsed": time.time()-tStart, "status": "exception"}
+    
+        if Callids().already_handled(call_id): return ""
+        # Validate the RSpec against PlanetLab's schema --disabled for now
+        # The schema used here needs to aggregate the PL and VINI schemas
+        # schema = "/var/www/html/schemas/pl.rng"
+        rspec = RSpec(rspec_str)
+    #    schema = None
+    #    if schema:
+    #        rspec.validate(schema)
+    
+        # if there is a <statistics> section, the aggregates don't care about it,
+        # so delete it.
+        self.drop_slicemgr_stats(rspec)
+    
+        # attempt to use delegated credential first
+        cred = api.getDelegatedCredential(creds)
+        if not cred:
+            cred = api.getCredential()
+    
+        # get the callers hrn
+        hrn, type = urn_to_hrn(xrn)
+        valid_cred = api.auth.checkCredentials(creds, 'createsliver', hrn)[0]
+        caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+        threads = ThreadManager()
+        for aggregate in api.aggregates:
+            # prevent infinite loop. Dont send request back to caller
+            # unless the caller is the aggregate's SM 
+            if caller_hrn == aggregate and aggregate != api.hrn:
+                continue
+            interface = api.aggregates[aggregate]
+            server = api.server_proxy(interface, cred)
+            # Just send entire RSpec to each aggregate
+            threads.run(_CreateSliver, aggregate, server, xrn, [cred], rspec.toxml(), users, call_id)
+                
+        results = threads.get_results()
+        manifest_version = version_manager._get_version(rspec.version.type, rspec.version.version, 'manifest')
+        result_rspec = RSpec(version=manifest_version)
+        for result in results:
+            self.add_slicemgr_stat(result_rspec, "CreateSliver", result["aggregate"], result["elapsed"], result["status"])
+            if result["status"]=="success":
+                try:
+                    result_rspec.version.merge(result["rspec"])
+                except:
+                    api.logger.log_exc("SM.CreateSliver: Failed to merge aggregate rspec")
+        return result_rspec.toxml()
+    
+    def RenewSliver(self, api, xrn, creds, expiration_time, call_id):
+        def _RenewSliver(server, xrn, creds, expiration_time, call_id):
             server_version = api.get_cached_server_version(server)
-            requested_users = users
-            if 'sfa' not in server_version and 'geni_api' in server_version:
-                # sfa aggregtes support both sfa and pg rspecs, no need to convert
-                # if aggregate supports sfa rspecs. otherwise convert to pg rspec
-                rspec = RSpec(RSpecConverter.to_pg_rspec(rspec, 'request'))
-                filter = {'component_manager_id': server_version['urn']}
-                rspec.filter(filter)
-                rspec = rspec.toxml()
-                requested_users = sfa_to_pg_users_arg(users)
-            args = [xrn, credential, rspec, requested_users]
-            if _call_id_supported(api, server):
+            args =  [xrn, creds, expiration_time, call_id]
+            if self._call_id_supported(api, server):
                 args.append(call_id)
+<<<<<<< HEAD:sfa/managers/slice_manager.py
             rspec = server.CreateSliver(*args)
             return {"aggregate": aggregate, "rspec": rspec, "elapsed": time.time()-tStart, "status": "success"}
         except: 
@@ -247,7 +404,180 @@ def CreateSliver(api, xrn, creds, rspec_str, users, call_id):
         server = api.get_server(interface, cred)
         # Just send entire RSpec to each aggregate
         threads.run(_CreateSliver, aggregate, server, xrn, [cred], rspec.toxml(), users, call_id)
+=======
+            return server.RenewSliver(*args)
+    
+        if Callids().already_handled(call_id): return True
+    
+        (hrn, type) = urn_to_hrn(xrn)
+        # get the callers hrn
+        valid_cred = api.auth.checkCredentials(creds, 'renewsliver', hrn)[0]
+        caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+    
+        # attempt to use delegated credential first
+        cred = api.getDelegatedCredential(creds)
+        if not cred:
+            cred = api.getCredential()
+        threads = ThreadManager()
+        for aggregate in api.aggregates:
+            # prevent infinite loop. Dont send request back to caller
+            # unless the caller is the aggregate's SM
+            if caller_hrn == aggregate and aggregate != api.hrn:
+                continue
+            interface = api.aggregates[aggregate]
+            server = api.server_proxy(interface, cred)
+            threads.run(_RenewSliver, server, xrn, [cred], expiration_time, call_id)
+        # 'and' the results
+        return reduce (lambda x,y: x and y, threads.get_results() , True)
+    
+    def DeleteSliver(self, api, xrn, creds, call_id):
+        def _DeleteSliver(server, xrn, creds, call_id):
+            server_version = api.get_cached_server_version(server)
+            args =  [xrn, creds]
+            if self._call_id_supported(api, server):
+                args.append(call_id)
+            return server.DeleteSliver(*args)
+    
+        if Callids().already_handled(call_id): return ""
+        (hrn, type) = urn_to_hrn(xrn)
+        # get the callers hrn
+        valid_cred = api.auth.checkCredentials(creds, 'deletesliver', hrn)[0]
+        caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+    
+        # attempt to use delegated credential first
+        cred = api.getDelegatedCredential(creds)
+        if not cred:
+            cred = api.getCredential()
+        threads = ThreadManager()
+        for aggregate in api.aggregates:
+            # prevent infinite loop. Dont send request back to caller
+            # unless the caller is the aggregate's SM
+            if caller_hrn == aggregate and aggregate != api.hrn:
+                continue
+            interface = api.aggregates[aggregate]
+            server = api.server_proxy(interface, cred)
+            threads.run(_DeleteSliver, server, xrn, [cred], call_id)
+        threads.get_results()
+        return 1
+    
+    
+    # first draft at a merging SliverStatus
+    def SliverStatus(self, api, slice_xrn, creds, call_id):
+        def _SliverStatus(server, xrn, creds, call_id):
+            server_version = api.get_cached_server_version(server)
+            args =  [xrn, creds]
+            if self._call_id_supported(api, server):
+                args.append(call_id)
+            return server.SliverStatus(*args)
+        
+        if Callids().already_handled(call_id): return {}
+        # attempt to use delegated credential first
+        cred = api.getDelegatedCredential(creds)
+        if not cred:
+            cred = api.getCredential()
+        threads = ThreadManager()
+        for aggregate in api.aggregates:
+            interface = api.aggregates[aggregate]
+            server = api.server_proxy(interface, cred)
+            threads.run (_SliverStatus, server, slice_xrn, [cred], call_id)
+        results = threads.get_results()
+    
+        # get rid of any void result - e.g. when call_id was hit where by convention we return {}
+        results = [ result for result in results if result and result['geni_resources']]
+    
+        # do not try to combine if there's no result
+        if not results : return {}
+    
+        # otherwise let's merge stuff
+        overall = {}
+    
+        # mmh, it is expected that all results carry the same urn
+        overall['geni_urn'] = results[0]['geni_urn']
+        overall['pl_login'] = results[0]['pl_login']
+        # append all geni_resources
+        overall['geni_resources'] = \
+            reduce (lambda x,y: x+y, [ result['geni_resources'] for result in results] , [])
+        overall['status'] = 'unknown'
+        if overall['geni_resources']:
+            overall['status'] = 'ready'
+    
+        return overall
+    
+    def ListSlices(self, api, creds, call_id):
+        def _ListSlices(server, creds, call_id):
+            server_version = api.get_cached_server_version(server)
+            args =  [creds]
+            if self._call_id_supported(api, server):
+                args.append(call_id)
+            return server.ListSlices(*args)
+    
+        if Callids().already_handled(call_id): return []
+    
+        # look in cache first
+        if self.caching and api.cache:
+            slices = api.cache.get('slices')
+            if slices:
+                return slices
+    
+        # get the callers hrn
+        valid_cred = api.auth.checkCredentials(creds, 'listslices', None)[0]
+        caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+    
+        # attempt to use delegated credential first
+        cred= api.getDelegatedCredential(creds)
+        if not cred:
+            cred = api.getCredential()
+        threads = ThreadManager()
+        # fetch from aggregates
+        for aggregate in api.aggregates:
+            # prevent infinite loop. Dont send request back to caller
+            # unless the caller is the aggregate's SM
+            if caller_hrn == aggregate and aggregate != api.hrn:
+                continue
+            interface = api.aggregates[aggregate]
+            server = api.server_proxy(interface, cred)
+            threads.run(_ListSlices, server, [cred], call_id)
+    
+        # combime results
+        results = threads.get_results()
+        slices = []
+        for result in results:
+            slices.extend(result)
+    
+        # cache the result
+        if self.caching and api.cache:
+            api.cache.add('slices', slices)
+    
+        return slices
+    
+    
+    def get_ticket(self, api, xrn, creds, rspec, users):
+        slice_hrn, type = urn_to_hrn(xrn)
+        # get the netspecs contained within the clients rspec
+        aggregate_rspecs = {}
+        tree= etree.parse(StringIO(rspec))
+        elements = tree.findall('./network')
+        for element in elements:
+            aggregate_hrn = element.values()[0]
+            aggregate_rspecs[aggregate_hrn] = rspec 
+    
+        # get the callers hrn
+        valid_cred = api.auth.checkCredentials(creds, 'getticket', slice_hrn)[0]
+        caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+    
+        # attempt to use delegated credential first
+        cred = api.getDelegatedCredential(creds)
+        if not cred:
+            cred = api.getCredential() 
+        threads = ThreadManager()
+        for (aggregate, aggregate_rspec) in aggregate_rspecs.iteritems():
+            # prevent infinite loop. Dont send request back to caller
+            # unless the caller is the aggregate's SM
+            if caller_hrn == aggregate and aggregate != api.hrn:
+                continue
+>>>>>>> a3996bfa45298c8d0abfd58916221abba737441c:sfa/managers/slice_manager.py
             
+<<<<<<< HEAD:sfa/managers/slice_manager.py
     results = threads.get_results()
     manifest_version = version_manager._get_version(rspec.version.type, rspec.version.version, 'manifest')
     result_rspec = RSpec(version=manifest_version)
@@ -330,7 +660,13 @@ def SliverStatus(api, slice_xrn, creds, call_id):
         if _call_id_supported(api, server):
             args.append(call_id)
         return server.SliverStatus(*args)
+=======
+            interface = api.aggregates[aggregate]
+            server = api.server_proxy(interface, cred)
+            threads.run(server.GetTicket, xrn, [cred], aggregate_rspec, users)
+>>>>>>> a3996bfa45298c8d0abfd58916221abba737441c:sfa/managers/slice_manager.py
     
+<<<<<<< HEAD:sfa/managers/slice_manager.py
     if Callids().already_handled(call_id): return {}
     # attempt to use delegated credential first
     cred = api.getDelegatedCredential(creds)
@@ -438,118 +774,105 @@ def get_ticket(api, xrn, creds, rspec, users):
         # unless the caller is the aggregate's SM
         if caller_hrn == aggregate and aggregate != api.hrn:
             continue
+=======
+        results = threads.get_results()
+>>>>>>> a3996bfa45298c8d0abfd58916221abba737441c:sfa/managers/slice_manager.py
         
-        interface = api.aggregates[aggregate]
-        server = api.get_server(interface, cred)
-        threads.run(server.GetTicket, xrn, [cred], aggregate_rspec, users)
-
-    results = threads.get_results()
+        # gather information from each ticket 
+        rspec = None
+        initscripts = []
+        slivers = [] 
+        object_gid = None  
+        for result in results:
+            agg_ticket = SfaTicket(string=result)
+            attrs = agg_ticket.get_attributes()
+            if not object_gid:
+                object_gid = agg_ticket.get_gid_object()
+            if not rspec:
+                rspec = RSpec(agg_ticket.get_rspec())
+            else:
+                rspec.version.merge(agg_ticket.get_rspec())
+            initscripts.extend(attrs.get('initscripts', [])) 
+            slivers.extend(attrs.get('slivers', [])) 
+        
+        # merge info
+        attributes = {'initscripts': initscripts,
+                     'slivers': slivers}
+        
+        # create a new ticket
+        ticket = SfaTicket(subject = slice_hrn)
+        ticket.set_gid_caller(api.auth.client_gid)
+        ticket.set_issuer(key=api.key, subject=api.hrn)
+        ticket.set_gid_object(object_gid)
+        ticket.set_pubkey(object_gid.get_pubkey())
+        #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
+        ticket.set_attributes(attributes)
+        ticket.set_rspec(rspec.toxml())
+        ticket.encode()
+        ticket.sign()          
+        return ticket.save_to_string(save_parents=True)
     
-    # gather information from each ticket 
-    rspec = None
-    initscripts = []
-    slivers = [] 
-    object_gid = None  
-    for result in results:
-        agg_ticket = SfaTicket(string=result)
-        attrs = agg_ticket.get_attributes()
-        if not object_gid:
-            object_gid = agg_ticket.get_gid_object()
-        if not rspec:
-            rspec = RSpec(agg_ticket.get_rspec())
-        else:
-            rspec.version.merge(agg_ticket.get_rspec())
-        initscripts.extend(attrs.get('initscripts', [])) 
-        slivers.extend(attrs.get('slivers', [])) 
-    
-    # merge info
-    attributes = {'initscripts': initscripts,
-                 'slivers': slivers}
-    
-    # create a new ticket
-    ticket = SfaTicket(subject = slice_hrn)
-    ticket.set_gid_caller(api.auth.client_gid)
-    ticket.set_issuer(key=api.key, subject=api.hrn)
-    ticket.set_gid_object(object_gid)
-    ticket.set_pubkey(object_gid.get_pubkey())
-    #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
-    ticket.set_attributes(attributes)
-    ticket.set_rspec(rspec.toxml())
-    ticket.encode()
-    ticket.sign()          
-    return ticket.save_to_string(save_parents=True)
-
-def start_slice(api, xrn, creds):
-    hrn, type = urn_to_hrn(xrn)
-
-    # get the callers hrn
-    valid_cred = api.auth.checkCredentials(creds, 'startslice', hrn)[0]
-    caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
-
-    # attempt to use delegated credential first
-    cred = api.getDelegatedCredential(creds)
-    if not cred:
-        cred = api.getCredential()
-    threads = ThreadManager()
-    for aggregate in api.aggregates:
-        # prevent infinite loop. Dont send request back to caller
-        # unless the caller is the aggregate's SM
-        if caller_hrn == aggregate and aggregate != api.hrn:
-            continue
-        interface = api.aggregates[aggregate]
-        server = api.get_server(interface, cred)    
-        threads.run(server.Start, xrn, cred)
-    threads.get_results()    
-    return 1
-def stop_slice(api, xrn, creds):
-    hrn, type = urn_to_hrn(xrn)
-
-    # get the callers hrn
-    valid_cred = api.auth.checkCredentials(creds, 'stopslice', hrn)[0]
-    caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
-
-    # attempt to use delegated credential first
-    cred = api.getDelegatedCredential(creds)
-    if not cred:
-        cred = api.getCredential()
-    threads = ThreadManager()
-    for aggregate in api.aggregates:
-        # prevent infinite loop. Dont send request back to caller
-        # unless the caller is the aggregate's SM
-        if caller_hrn == aggregate and aggregate != api.hrn:
-            continue
-        interface = api.aggregates[aggregate]
-        server = api.get_server(interface, cred)
-        threads.run(server.Stop, xrn, cred)
-    threads.get_results()    
-    return 1
-
-def reset_slice(api, xrn):
-    """
-    Not implemented
-    """
-    return 1
-
-def shutdown(api, xrn, creds):
-    """
-    Not implemented   
-    """
-    return 1
-
-def status(api, xrn, creds):
-    """
-    Not implemented 
-    """
-    return 1
-
-# this is plain broken
-#def main():
-#    r = RSpec()
-#    r.parseFile(sys.argv[1])
-#    rspec = r.toDict()
-#    CreateSliver(None,'plc.princeton.tmacktestslice',rspec,'create-slice-tmacktestslice')
-
-if __name__ == "__main__":
-    main()
+    def start_slice(self, api, xrn, creds):
+        hrn, type = urn_to_hrn(xrn)
+    
+        # get the callers hrn
+        valid_cred = api.auth.checkCredentials(creds, 'startslice', hrn)[0]
+        caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+    
+        # attempt to use delegated credential first
+        cred = api.getDelegatedCredential(creds)
+        if not cred:
+            cred = api.getCredential()
+        threads = ThreadManager()
+        for aggregate in api.aggregates:
+            # prevent infinite loop. Dont send request back to caller
+            # unless the caller is the aggregate's SM
+            if caller_hrn == aggregate and aggregate != api.hrn:
+                continue
+            interface = api.aggregates[aggregate]
+            server = api.server_proxy(interface, cred)    
+            threads.run(server.Start, xrn, cred)
+        threads.get_results()    
+        return 1
+     
+    def stop_slice(self, api, xrn, creds):
+        hrn, type = urn_to_hrn(xrn)
+    
+        # get the callers hrn
+        valid_cred = api.auth.checkCredentials(creds, 'stopslice', hrn)[0]
+        caller_hrn = Credential(string=valid_cred).get_gid_caller().get_hrn()
+    
+        # attempt to use delegated credential first
+        cred = api.getDelegatedCredential(creds)
+        if not cred:
+            cred = api.getCredential()
+        threads = ThreadManager()
+        for aggregate in api.aggregates:
+            # prevent infinite loop. Dont send request back to caller
+            # unless the caller is the aggregate's SM
+            if caller_hrn == aggregate and aggregate != api.hrn:
+                continue
+            interface = api.aggregates[aggregate]
+            server = api.server_proxy(interface, cred)
+            threads.run(server.Stop, xrn, cred)
+        threads.get_results()    
+        return 1
+    
+    def reset_slice(self, api, xrn):
+        """
+        Not implemented
+        """
+        return 1
+    
+    def shutdown(self, api, xrn, creds):
+        """
+        Not implemented   
+        """
+        return 1
+    
+    def status(self, api, xrn, creds):
+        """
+        Not implemented 
+        """
+        return 1
 
index b0d7e51..a718c03 100644 (file)
@@ -367,12 +367,11 @@ class Slicetag:
     def write(self, api):
         if self.changed:
             if int(self.id) > 0:
-                api.plshell.UpdateSliceTag(api.plauth, self.id, self.value)
+                api.driver.UpdateSliceTag(self.id, self.value)
             else:
-                api.plshell.AddSliceTag(api.plauth, self.slice_id, 
-                                        self.tagname, self.value, self.node_id)
+                api.driver.AddSliceTag(self.slice_id, self.tagname, self.value, self.node_id)
         elif self.deleted and int(self.id) > 0:
-            api.plshell.DeleteSliceTag(api.plauth, self.id)
+            api.driver.DeleteSliceTag(self.id)
 
 
 """
@@ -662,7 +661,7 @@ Create a dictionary of site objects keyed by site ID
 """
 def get_sites(api):
     tmp = []
-    for site in api.plshell.GetSites(api.plauth):
+    for site in api.driver.GetSites():
         t = site['site_id'], Site(site)
         tmp.append(t)
     return dict(tmp)
@@ -673,7 +672,7 @@ Create a dictionary of node objects keyed by node ID
 """
 def get_nodes(api):
     tmp = []
-    for node in api.plshell.GetNodes(api.plauth):
+    for node in api.driver.GetNodes():
         t = node['node_id'], Node(node)
         tmp.append(t)
     return dict(tmp)
@@ -682,7 +681,7 @@ def get_nodes(api):
 Create a dictionary of slice objects keyed by slice ID
 """
 def get_slice(api, slicename):
-    slice = api.plshell.GetSlices(api.plauth, [slicename])
+    slice = api.driver.GetSlices([slicename])
     if slice:
         return Slice(slice[0])
     else:
@@ -693,7 +692,7 @@ Create a dictionary of slicetag objects keyed by slice tag ID
 """
 def get_slice_tags(api):
     tmp = []
-    for tag in api.plshell.GetSliceTags(api.plauth):
+    for tag in api.driver.GetSliceTags():
         t = tag['slice_tag_id'], Slicetag(tag)
         tmp.append(t)
     return dict(tmp)
index fcade6c..ba1ce2a 100644 (file)
@@ -437,7 +437,7 @@ class ViniNetwork(Network):
     """
     def get_sites(self, api):
         tmp = []
-        for site in api.plshell.GetSites(api.plauth, {'peer_id': None}):
+        for site in api.driver.GetSites({'peer_id': None}):
             t = site['site_id'], ViniSite(self, site)
             tmp.append(t)
         return dict(tmp)
@@ -448,7 +448,7 @@ class ViniNetwork(Network):
     """
     def get_nodes(self, api):
         tmp = []
-        for node in api.plshell.GetNodes(api.plauth, {'peer_id': None}):
+        for node in api.driver.GetNodes({'peer_id': None}):
             t = node['node_id'], ViniNode(self, node)
             tmp.append(t)
         return dict(tmp)
@@ -458,7 +458,7 @@ class ViniNetwork(Network):
     """
     def get_slice(self, api, hrn):
         slicename = hrn_to_pl_slicename(hrn)
-        slice = api.plshell.GetSlices(api.plauth, [slicename])
+        slice = api.driver.GetSlices([slicename])
         if slice:
             self.slice = ViniSlice(self, slicename, slice[0])
             return self.slice
index b25fbd1..74ce9de 100644 (file)
@@ -43,6 +43,4 @@ class CreateGid(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, xrn, self.name))
 
-        manager = self.api.get_interface_manager()
-
-        return manager.create_gid(self.api, xrn, cert)
+        return self.api.manager.create_gid(self.api, xrn, cert)
index dcb25eb..dd76d9b 100644 (file)
@@ -4,6 +4,7 @@ from sfa.util.method import Method
 from sfa.util.parameter import Parameter, Mixed
 from sfa.util.sfatablesRuntime import run_sfatables
 from sfa.trust.credential import Credential
+from sfa.rspecs.rspec import RSpec
 
 class CreateSliver(Method):
     """
@@ -42,8 +43,6 @@ class CreateSliver(Method):
             msg = "'users' musst be specified and cannot be null. You may need to update your client." 
             raise SfaInvalidArgument(name='users', extra=msg)  
 
-        manager = self.api.get_interface_manager()
-        
         # flter rspec through sfatables
         if self.api.interface in ['aggregate']:
             chain_name = 'INCOMING'
@@ -51,5 +50,9 @@ class CreateSliver(Method):
             chain_name = 'FORWARD-INCOMING'
         self.api.logger.debug("CreateSliver: sfatables on chain %s"%chain_name)
         rspec = run_sfatables(chain_name, hrn, origin_hrn, rspec)
-
-        return manager.CreateSliver(self.api, slice_xrn, creds, rspec, users, call_id)
+        slivers = RSpec(rspec).version.get_nodes_with_slivers()
+        if slivers:
+            result = self.api.manager.CreateSliver(self.api, slice_xrn, creds, rspec, users, call_id)
+        else:
+            result = rspec     
+        return result
index f766cb1..58b8846 100644 (file)
@@ -32,7 +32,6 @@ class DeleteSliver(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
 
-        manager = self.api.get_interface_manager() 
-        manager.DeleteSliver(self.api, xrn, creds, call_id)
+        self.api.manager.DeleteSliver(self.api, xrn, creds, call_id)
  
         return 1 
index da3e97b..b645cd4 100644 (file)
@@ -42,7 +42,5 @@ class GetCredential(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))        
 
-        manager = self.api.get_interface_manager()
-        
-        return manager.get_credential(self.api, xrn, type)
+        return self.api.manager.get_credential(self.api, xrn, type)
 
index e50f940..601db6f 100644 (file)
@@ -31,8 +31,7 @@ class GetGids(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         
         # resolve the record
-        manager = self.api.get_interface_manager()
-        records = manager.resolve(self.api, xrns, full = False)
+        records = self.api.manager.resolve(self.api, xrns, full = False)
         if not records:
             raise RecordNotFound(xrns)
 
index de21ab5..37f2e7b 100644 (file)
@@ -52,10 +52,9 @@ class GetSelfCredential(Method):
         origin_hrn = Certificate(string=cert).get_subject()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
         
-        manager = self.api.get_interface_manager()
  
         # authenticate the gid
-        records = manager.resolve(self.api, xrn, type)
+        records = self.api.manager.resolve(self.api, xrn, type)
         if not records:
             raise RecordNotFound(hrn)
         record = SfaRecord(dict=records[0])
@@ -72,4 +71,4 @@ class GetSelfCredential(Method):
                     self.api.logger.debug("ConnectionKeyGIDMismatch, %s filename: %s"%(name,obj.filename))
             raise ConnectionKeyGIDMismatch(gid.get_subject())
         
-        return manager.get_credential(self.api, xrn, type, is_self=True)
+        return self.api.manager.get_credential(self.api, xrn, type, is_self=True)
index 1469693..c21ce16 100644 (file)
@@ -43,8 +43,6 @@ class GetTicket(Method):
         #log the call
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
 
-        manager = self.api.get_interface_manager()
-
         # filter rspec through sfatables
         if self.api.interface in ['aggregate']:
             chain_name = 'OUTGOING'
@@ -53,7 +51,5 @@ class GetTicket(Method):
         rspec = run_sfatables(chain_name, hrn, origin_hrn, rspec)
         
         # remove nodes that are not available at this interface from the rspec
-        ticket = manager.get_ticket(self.api, xrn, creds, rspec, users)
-        
-        return ticket
+        return self.api.manager.get_ticket(self.api, xrn, creds, rspec, users)
         
index 72fe806..8c215c4 100644 (file)
@@ -13,5 +13,4 @@ class GetVersion(Method):
 
     def call(self):
         self.api.logger.info("interface: %s\tmethod-name: %s" % (self.api.interface, self.name))
-        manager = self.api.get_interface_manager()
-        return manager.GetVersion(self.api)
+        return self.api.manager.GetVersion(self.api)
index a5d1123..ccd66b4 100644 (file)
@@ -31,5 +31,4 @@ class List(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
        
-        manager = self.api.get_interface_manager()
-        return manager.list(self.api, xrn) 
+        return self.api.manager.list(self.api, xrn) 
index b8d7e2d..a12447f 100644 (file)
@@ -36,9 +36,7 @@ class ListResources(Method):
         origin_hrn = options.get('origin_hrn', None)
         if not origin_hrn:
             origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
-        # get manager for this interface    
-        manager = self.api.get_interface_manager()
-        rspec = manager.ListResources(self.api, creds, options, call_id)
+        rspec = self.api.manager.ListResources(self.api, creds, options, call_id)
 
         # filter rspec through sfatables 
         if self.api.interface in ['aggregate']:
index 7fc8513..e8521c1 100644 (file)
@@ -27,6 +27,5 @@ class ListSlices(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, self.name))
 
-        manager = self.api.get_interface_manager() 
-        return manager.ListSlices(self.api, creds, call_id)
+        return self.api.manager.ListSlices(self.api, creds, call_id)
  
index cab0e93..c6a75e2 100644 (file)
@@ -23,8 +23,6 @@ class RedeemTicket(Method):
         valid_creds = self.api.auth.checkCredentials(cred, 'redeemticket')
         self.api.auth.check_ticket(ticket)
 
-        
         # send the call to the right manager
-        manager = self.api.get_interface_manager()
-        manager.redeem_ticket(self.api, ticket) 
+        self.api.manager.redeem_ticket(self.api, ticket) 
         return 1 
index 619ed00..92f230a 100644 (file)
@@ -36,6 +36,4 @@ class Register(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
         
-        manager = self.api.get_interface_manager()
-
-        return manager.register(self.api, record)
+        return self.api.manager.register(self.api, record)
index c547c26..98ef38b 100644 (file)
@@ -39,6 +39,4 @@ class Remove(Method):
         self.api.logger.info("interface: %s\tmethod-name: %s\tcaller-hrn: %s\ttarget-urn: %s"%(
                 self.api.interface, self.name, origin_hrn, xrn.get_urn()))
 
-        manager = self.api.get_interface_manager()
-
-        return manager.remove(self.api, xrn) 
+        return self.api.manager.remove(self.api, xrn) 
index fa30e88..c831924 100644 (file)
@@ -55,33 +55,33 @@ class RemovePeerObject(Method):
     def remove_plc_record(self, record):
         type = record['type']        
         if type == "user":
-            persons = self.api.plshell.GetPersons(self.api.plauth, {'person_id' : record['pointer']})
+            persons = self.api.driver.GetPersons({'person_id' : record['pointer']})
             if not persons:
                 return 1
             person = persons[0]
             if person['peer_id']:
                 peer = self.get_peer_name(person['peer_id']) 
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'person', person['person_id'], peer)
-            self.api.plshell.DeletePerson(self.api.plauth, person['person_id'])
+                self.api.driver.UnBindObjectFromPeer('person', person['person_id'], peer)
+            self.api.driver.DeletePerson(person['person_id'])
            
         elif type == "slice":
-            slices=self.api.plshell.GetSlices(self.api.plauth, {'slice_id' : record['pointer']})
+            slices=self.api.driver.GetSlices({'slice_id' : record['pointer']})
             if not slices:
                 return 1
             slice=slices[0]
             if slice['peer_id']:
                 peer = self.get_peer_name(slice['peer_id']) 
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', slice['slice_id'], peer)
-            self.api.plshell.DeleteSlice(self.api.plauth, slice['slice_id'])
+                self.api.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
+            self.api.driver.DeleteSlice(slice['slice_id'])
         elif type == "authority":
-            sites=self.api.plshell.GetSites(self.api.plauth, {'site_id' : record['pointer']})
+            sites=self.api.driver.GetSites({'site_id' : record['pointer']})
             if not sites:
                 return 1
             site=sites[0]
             if site['peer_id']:
                 peer = self.get_peer_name(site['peer_id']) 
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'site', site['site_id'], peer)
-            self.api.plshell.DeleteSite(self.api.plauth, site['site_id'])
+                self.api.driver.UnBindObjectFromPeer('site', site['site_id'], peer)
+            self.api.driver.DeleteSite(site['site_id'])
            
         else:
             raise UnknownSfaType(type)
@@ -89,7 +89,7 @@ class RemovePeerObject(Method):
         return 1
 
     def get_peer_name(self, peer_id):
-        peers = self.api.plshell.GetPeers(self.api.plauth, [peer_id], ['peername', 'shortname', 'hrn_root'])
+        peers = self.api.driver.GetPeers([peer_id], ['peername', 'shortname', 'hrn_root'])
         if not peers:
             raise SfaInvalidArgument, "No such peer"
         peer = peers[0]
index 4a0e836..b34d424 100644 (file)
@@ -40,6 +40,5 @@ class RenewSliver(Method):
             raise InsufficientRights('Renewsliver: Credential expires before requested expiration time')
         if requested_time > datetime.datetime.utcnow() + datetime.timedelta(days=max_renew_days):
             raise Exception('Cannot renew > %s days from now' % max_renew_days)
-        manager = self.api.get_interface_manager()
-        return manager.RenewSliver(self.api, slice_xrn, valid_creds, expiration_time, call_id)    
+        return self.api.manager.RenewSliver(self.api, slice_xrn, valid_creds, expiration_time, call_id)    
     
index 74972cc..6277e1b 100644 (file)
@@ -40,6 +40,5 @@ class Resolve(Method):
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrns, self.name))
  
         # send the call to the right manager
-        manager = self.api.get_interface_manager()
-        return manager.resolve(self.api, xrns, type)
+        return self.api.manager.resolve(self.api, xrns, type)
             
index 18613b2..2f4bc0c 100644 (file)
@@ -24,8 +24,7 @@ class SliverStatus(Method):
 
         self.api.logger.info("interface: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, hrn, self.name))
     
-        manager = self.api.get_interface_manager()
-        status = manager.SliverStatus(self.api, hrn, valid_creds, call_id)
+        status = self.api.manager.SliverStatus(self.api, hrn, valid_creds, call_id)
 
         return status
     
index 6882a37..7f8aefd 100644 (file)
@@ -30,7 +30,6 @@ class Start(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
 
-        manager = self.api.get_interface_manager() 
-        manager.start_slice(self.api, xrn, creds)
+        self.api.manager.start_slice(self.api, xrn, creds)
  
         return 1 
index e8d3397..48974ab 100644 (file)
@@ -30,7 +30,6 @@ class Stop(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
 
-        manager = self.api.get_interface_manager() 
-        manager.stop_slice(self.api, xrn, creds)
+        self.api.manager.stop_slice(self.api, xrn, creds)
  
         return 1 
index 31b17e9..a90a44e 100644 (file)
@@ -35,7 +35,5 @@ class Update(Method):
         origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
         self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
        
-        manager = self.api.get_interface_manager()
-        return manager.update(self.api, record_dict)
+        return self.api.manager.update(self.api, record_dict)
 
index 4bb6587..638332e 100644 (file)
@@ -25,10 +25,10 @@ class get_key(Method):
         # verify that the callers's ip address exist in the db and is an inteface
         # for a node in the db
         (ip, port) = self.api.remote_addr
-        interfaces = self.api.plshell.GetInterfaces(self.api.plauth, {'ip': ip}, ['node_id'])
+        interfaces = self.api.driver.GetInterfaces({'ip': ip}, ['node_id'])
         if not interfaces:
             raise NonExistingRecord("no such ip %(ip)s" % locals())
-        nodes = self.api.plshell.GetNodes(self.api.plauth, [interfaces[0]['node_id']], ['node_id', 'hostname'])
+        nodes = self.api.driver.GetNodes([interfaces[0]['node_id']], ['node_id', 'hostname'])
         if not nodes:
             raise NonExistingRecord("no such node using ip %(ip)s" % locals())
         node = nodes[0]
index a510189..465ed05 100644 (file)
@@ -55,33 +55,33 @@ class remove_peer_object(Method):
     def remove_plc_record(self, record):
         type = record['type']        
         if type == "user":
-            persons = self.api.plshell.GetPersons(self.api.plauth, {'person_id' : record['pointer']})
+            persons = self.api.driver.GetPersons({'person_id' : record['pointer']})
             if not persons:
                 return 1
             person = persons[0]
             if person['peer_id']:
                 peer = self.get_peer_name(person['peer_id']) 
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'person', person['person_id'], peer)
-            self.api.plshell.DeletePerson(self.api.plauth, person['person_id'])
+                self.api.driver.UnBindObjectFromPeer('person', person['person_id'], peer)
+            self.api.driver.DeletePerson(person['person_id'])
            
         elif type == "slice":
-            slices=self.api.plshell.GetSlices(self.api.plauth, {'slice_id' : record['pointer']})
+            slices=self.api.driver.GetSlices({'slice_id' : record['pointer']})
             if not slices:
                 return 1
             slice=slices[0]
             if slice['peer_id']:
                 peer = self.get_peer_name(slice['peer_id']) 
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', slice['slice_id'], peer)
-            self.api.plshell.DeleteSlice(self.api.plauth, slice['slice_id'])
+                self.api.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
+            self.api.driver.DeleteSlice(slice['slice_id'])
         elif type == "authority":
-            sites=self.api.plshell.GetSites(self.api.plauth, {'site_id' : record['pointer']})
+            sites=self.api.driver.GetSites({'site_id' : record['pointer']})
             if not sites:
                 return 1
             site=sites[0]
             if site['peer_id']:
                 peer = self.get_peer_name(site['peer_id']) 
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'site', site['site_id'], peer)
-            self.api.plshell.DeleteSite(self.api.plauth, site['site_id'])
+                self.api.driver.UnBindObjectFromPeer('site', site['site_id'], peer)
+            self.api.driver.DeleteSite(site['site_id'])
            
         else:
             raise UnknownSfaType(type)
@@ -89,7 +89,7 @@ class remove_peer_object(Method):
         return 1
 
     def get_peer_name(self, peer_id):
-        peers = self.api.plshell.GetPeers(self.api.plauth, [peer_id], ['peername', 'shortname', 'hrn_root'])
+        peers = self.api.driver.GetPeers([peer_id], ['peername', 'shortname', 'hrn_root'])
         if not peers:
             raise SfaInvalidArgument, "No such peer"
         peer = peers[0]
index 15fb4a5..387981d 100644 (file)
@@ -25,22 +25,5 @@ class reset_slice(Method):
     def call(self, cred, xrn, origin_hrn=None):
         hrn, type = urn_to_hrn(xrn)
         self.api.auth.check(cred, 'resetslice', hrn)
-        # send the call to the right manager
-        manager_base = 'sfa.managers'
-        if self.api.interface in ['component']:
-            mgr_type = self.api.config.SFA_CM_TYPE
-            manager_module = manager_base + ".component_manager_%s" % mgr_type
-            manager = __import__(manager_module, fromlist=[manager_base])
-            manager.reset_slice(self.api, xrn)
-        elif self.api.interface in ['aggregate']:
-            mgr_type = self.api.config.SFA_AGGREGATE_TYPE
-            manager_module = manager_base + ".aggregate_manager_%s" % mgr_type
-            manager = __import__(manager_module, fromlist=[manager_base])
-            manager.reset_slice(self.api, xrn)
-        elif self.api.interface in ['slicemgr']:
-            mgr_type = self.api.config.SFA_SM_TYPE
-            manager_module = manager_base + ".slice_manager_%s" % mgr_type
-            manager = __import__(manager_module, fromlist=[manager_base])
-            manager.reset_slice(self.api, xrn) 
-
+        self.api.manager.reset_slice (self.api, xrn)
         return 1 
index f210705..abd118f 100644 (file)
@@ -1,23 +1,21 @@
 #!/usr/bin/python
 from sfa.util.xrn import hrn_to_urn, urn_to_hrn
-from sfa.util.plxrn import PlXrn, hostname_to_urn, hrn_to_pl_slicename
+from sfa.util.plxrn import PlXrn, hostname_to_urn, hrn_to_pl_slicename, urn_to_sliver_id
+
 from sfa.rspecs.rspec import RSpec
+from sfa.rspecs.elements.hardware_type import HardwareType
 from sfa.rspecs.elements.link import Link
+from sfa.rspecs.elements.login import Login
 from sfa.rspecs.elements.interface import Interface
-from sfa.managers.vini.topology import PhysicalLinks
+from sfa.rspecs.elements.services import Services
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.util.topology import Topology
 from sfa.rspecs.version_manager import VersionManager
 from sfa.plc.vlink import get_tc_rate
 
 class Aggregate:
 
     api = None
-    sites = {}
-    nodes = {}
-    interfaces = {}
-    links = {}
-    node_tags = {}
-    pl_initscripts = {} 
-    prepared=False
     #panos new user options variable
     user_options = {}
 
@@ -25,195 +23,204 @@ class Aggregate:
         self.api = api
         self.user_options = user_options
 
-    def prepare_sites(self, filter={}, force=False):
-        if not self.sites or force:  
-            for site in self.api.plshell.GetSites(self.api.plauth, filter):
-                self.sites[site['site_id']] = site
-    
-    def prepare_nodes(self, filter={}, force=False):
-        if not self.nodes or force:
-            filter.update({'peer_id': None})
-            nodes = self.api.plshell.GetNodes(self.api.plauth, filter)
-            site_ids = []
-            interface_ids = []
-            tag_ids = []
-            for node in nodes:
-                site_ids.append(node['site_id'])
-                interface_ids.extend(node['interface_ids'])
-                tag_ids.extend(node['node_tag_ids'])
-            self.prepare_sites({'site_id': site_ids})
-            self.prepare_interfaces({'interface_id': interface_ids})
-            self.prepare_node_tags({'node_tag_id': tag_ids}) 
-            for node in nodes:
-                # add site/interface info to nodes.
-                # assumes that sites, interfaces and tags have already been prepared.
-                site = self.sites[node['site_id']]
-                interfaces = [self.interfaces[interface_id] for interface_id in node['interface_ids']]
-                tags = [self.node_tags[tag_id] for tag_id in node['node_tag_ids']]
-                node['network'] = self.api.hrn
-                node['network_urn'] = hrn_to_urn(self.api.hrn, 'authority+am')
-                node['urn'] = hostname_to_urn(self.api.hrn, site['login_base'], node['hostname'])
-                node['site_urn'] = hrn_to_urn(PlXrn.site_hrn(self.api.hrn, site['login_base']), 'authority+sa')
-                node['site'] = site
-                node['interfaces'] = interfaces
-                node['tags'] = tags
-                self.nodes[node['node_id']] = node
-
-    def prepare_interfaces(self, filter={}, force=False):
-        if not self.interfaces or force:
-            for interface in self.api.plshell.GetInterfaces(self.api.plauth, filter):
-                self.interfaces[interface['interface_id']] = interface
-
-    def prepare_links(self, filter={}, force=False):
-        if not self.links or force:
-            if not self.api.config.SFA_AGGREGATE_TYPE.lower() == 'vini':
-                return
-
-            for (site_id1, site_id2) in PhysicalLinks:
-                link = Link()
-                if not site_id1 in self.sites or site_id2 not in self.sites:
+    def get_sites(self, filter={}):
+        sites = {}
+        for site in self.api.driver.GetSites(filter):
+            sites[site['site_id']] = site
+        return sites
+
+    def get_interfaces(self, filter={}):
+        interfaces = {}
+        for interface in self.api.driver.GetInterfaces(filter):
+            iface = Interface()
+            iface['interface_id'] = interface['interface_id']
+            iface['node_id'] = interface['node_id']
+            iface['ipv4'] = interface['ip']
+            iface['bwlimit'] = interface['bwlimit']
+            interfaces[iface['interface_id']] = iface
+        return interfaces
+
+    def get_links(self, filter={}):
+        
+        if not self.api.config.SFA_AGGREGATE_TYPE.lower() == 'vini':
+            return
+
+        topology = Topology() 
+        links = {}
+        for (site_id1, site_id2) in topology:
+            link = Link()
+            if not site_id1 in self.sites or site_id2 not in self.sites:
+                continue
+            site1 = self.sites[site_id1]
+            site2 = self.sites[site_id2]
+            # get hrns
+            site1_hrn = self.api.hrn + '.' + site1['login_base']
+            site2_hrn = self.api.hrn + '.' + site2['login_base']
+            # get the first node
+            node1 = self.nodes[site1['node_ids'][0]]
+            node2 = self.nodes[site2['node_ids'][0]]
+
+            # set interfaces
+            # just get first interface of the first node
+            if1_xrn = PlXrn(auth=self.api.hrn, interface='node%s:eth0' % (node1['node_id']))
+            if1_ipv4 = self.interfaces[node1['interface_ids'][0]]['ip']
+            if2_xrn = PlXrn(auth=self.api.hrn, interface='node%s:eth0' % (node2['node_id']))
+            if2_ipv4 = self.interfaces[node2['interface_ids'][0]]['ip']
+
+            if1 = Interface({'component_id': if1_xrn.urn, 'ipv4': if1_ipv4} )
+            if2 = Interface({'component_id': if2_xrn.urn, 'ipv4': if2_ipv4} )
+
+            # set link
+            link = Link({'capacity': '1000000', 'latency': '0', 'packet_loss': '0', 'type': 'ipv4'})
+            link['interface1'] = if1
+            link['interface2'] = if2
+            link['component_name'] = "%s:%s" % (site1['login_base'], site2['login_base'])
+            link['component_id'] = PlXrn(auth=self.api.hrn, interface=link['component_name']).get_urn()
+            link['component_manager_id'] =  hrn_to_urn(self.api.hrn, 'authority+am')
+            links[link['component_name']] = link
+
+        return links
+
+    def get_node_tags(self, filter={}):
+        node_tags = {}
+        for node_tag in self.api.driver.GetNodeTags(filter):
+            node_tags[node_tag['node_tag_id']] = node_tag
+        return node_tags
+
+    def get_pl_initscripts(self, filter={}):
+        pl_initscripts = {}
+        filter.update({'enabled': True})
+        for initscript in self.api.driver.GetInitScripts(filter):
+            pl_initscripts[initscript['initscript_id']] = initscript
+        return pl_initscripts
+
+
+    def get_slice_and_slivers(self, slice_xrn):
+        """
+        Returns a dict of slivers keyed on the sliver's node_id
+        """
+        slivers = {}
+        slice = None
+        if not slice_xrn:
+            return (slice, slivers)
+        slice_urn = hrn_to_urn(slice_xrn)
+        slice_hrn, _ = urn_to_hrn(slice_xrn)
+        slice_name = hrn_to_pl_slicename(slice_hrn)
+        slices = self.api.driver.GetSlices(slice_name)
+        if not slices:
+            return (slice, slivers)
+        slice = slices[0]
+
+        # sort slivers by node id    
+        for node_id in slice['node_ids']:
+            sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, slice['slice_id'], node_id),
+                             'name': 'plab-vserver', 
+                             'tags': []})
+            slivers[node_id]= sliver
+
+        # sort sliver attributes by node id    
+        tags = self.api.driver.GetSliceTags({'slice_tag_id': slice['slice_tag_ids']})
+        for tag in tags:
+            # most likely a default/global sliver attribute (node_id == None)
+            if tag['node_id'] not in slivers:
+                sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, slice['slice_id'], ""),
+                                 'name': 'plab-vserver',
+                                 'tags': []})
+                slivers[tag['node_id']] = sliver
+            slivers[tag['node_id']]['tags'].append(tag)
+        
+        return (slice, slivers)
+
+    def get_nodes(self, slice=None):
+        filter = {}
+        if slice and 'node_ids' in slice and slice['node_ids']:
+            filter['node_id'] = slice['node_ids']
+        
+        filter.update({'peer_id': None})
+        nodes = self.api.driver.GetNodes(filter)
+       
+        site_ids = []
+        interface_ids = []
+        tag_ids = []
+        for node in nodes:
+            site_ids.append(node['site_id'])
+            interface_ids.extend(node['interface_ids'])
+            tag_ids.extend(node['node_tag_ids'])
+        # get sites
+        sites_dict  = self.get_sites({'site_id': site_ids}) 
+        # get interfaces
+        interfaces = self.get_interfaces({'interface_id':interface_ids}) 
+        # get slivers
+        slivers = self.get_slivers(slice)
+        # get tags
+        node_tags = self.get_node_tags({'node_id': node_ids})
+        # get initscripts
+        pl_initscripts = self.get_pl_initscripts()
+
+        rspec_nodes = []
+        for node in nodes:
+            # skip whitelisted nodes
+            if node['slice_ids_whitelist']:
+                if not slice or slice['slice_id'] not in node['slice_ids_whitelist']:
                     continue
-                site1 = self.sites[site_id1]
-                site2 = self.sites[site_id2]
-                # get hrns
-                site1_hrn = self.api.hrn + '.' + site1['login_base']
-                site2_hrn = self.api.hrn + '.' + site2['login_base']
-                # get the first node
-                node1 = self.nodes[site1['node_ids'][0]]
-                node2 = self.nodes[site2['node_ids'][0]]
-
-                # set interfaces
-                # just get first interface of the first node
-                if1_xrn = PlXrn(auth=self.api.hrn, interface='node%s:eth0' % (node1['node_id']))
-                if1_ipv4 = self.interfaces[node1['interface_ids'][0]]['ip']
-                if2_xrn = PlXrn(auth=self.api.hrn, interface='node%s:eth0' % (node2['node_id']))
-                if2_ipv4 = self.interfaces[node2['interface_ids'][0]]['ip']
-
-                if1 = Interface({'component_id': if1_xrn.urn, 'ipv4': if1_ipv4} )
-                if2 = Interface({'component_id': if2_xrn.urn, 'ipv4': if2_ipv4} )
-
-                # set link
-                link = Link({'capacity': '1000000', 'latency': '0', 'packet_loss': '0', 'type': 'ipv4'})
-                link['interface1'] = if1
-                link['interface2'] = if2
-                link['component_name'] = "%s:%s" % (site1['login_base'], site2['login_base'])
-                link['component_id'] = PlXrn(auth=self.api.hrn, interface=link['component_name']).get_urn()
-                link['component_manager_id'] =  hrn_to_urn(self.api.hrn, 'authority+am')
-                self.links[link['component_name']] = link
-
-
-    def prepare_node_tags(self, filter={}, force=False):
-        if not self.node_tags or force:
-            for node_tag in self.api.plshell.GetNodeTags(self.api.plauth, filter):
-                self.node_tags[node_tag['node_tag_id']] = node_tag
-
-    def prepare_pl_initscripts(self, filter={}, force=False):
-        if not self.pl_initscripts or force:
-            filter.update({'enabled': True})
-            for initscript in self.api.plshell.GetInitScripts(self.api.plauth, filter):
-                self.pl_initscripts[initscript['initscript_id']] = initscript
-
-    def prepare(self, slice = None, force=False):
-        if not self.prepared or force or slice:
-            if not slice:
-                self.prepare_sites(force=force)
-                self.prepare_interfaces(force=force)
-                self.prepare_node_tags(force=force)
-                self.prepare_nodes(force=force)
-                self.prepare_links(force=force)
-                self.prepare_pl_initscripts(force=force)
-            else:
-                self.prepare_sites({'site_id': slice['site_id']})
-                self.prepare_interfaces({'node_id': slice['node_ids']})
-                self.prepare_node_tags({'node_id': slice['node_ids']})
-                self.prepare_nodes({'node_id': slice['node_ids']})
-                self.prepare_links({'slice_id': slice['slice_id']})
-                self.prepare_pl_initscripts()
-            self.prepared = True  
-
+            rspec_node = Node()
+            rspec_node['component_id'] = hostname_to_urn(self.api.hrn, site['login_base'], node['hostname'])
+            rspec_node['component_name'] = node['hostname']
+            rspec_node['component_manager_id'] = self.api.hrn
+            rspec_node['authority_id'] = hrn_to_urn(PlXrn.site_hrn(self.api.hrn, site['login_base']), 'authority+sa')
+            rspec_node['boot_state'] = node['boot_state']
+            rspec_node['exclusive'] = 'False'
+            rspec_node['hardware_types'].append(HardwareType({'name': 'plab-vserver'}))
+            # only doing this because protogeni rspec needs
+            # to advertise available initscripts 
+            rspec_node['pl_initscripts'] = pl_initscripts
+             # add site/interface info to nodes.
+            # assumes that sites, interfaces and tags have already been prepared.
+            site = sites_dict[node['site_id']]
+            location = Location({'longitude': site['longitude'], 'latitude': site['latitude']})
+            rspec_node['location'] = location
+            rspec_node['interfaces'] = []
+            for if_id in node['interface_ids']:
+                interface = Interface(interfaces[if_id]) 
+                interface['ipv4'] = interface['ip']
+                rspec_node['interfaces'].append(interface)
+            tags = [PLTag(node_tags[tag_id]) for tag_id in node['node_tag_ids']]
+            rspec_node['tags'] = tags
+            if node['node_id'] in slivers:
+                # add sliver info
+                sliver = slivers[node['node_id']]
+                rspec_node['sliver_id'] = sliver['sliver_id']
+                rspec_node['client_id'] = node['hostname']
+                rspec_node['slivers'] = [slivers[node['node_id']]]
+                
+                # slivers always provide the ssh service
+                login = Login({'authentication': 'ssh-keys', 'hostname': node['hostname'], port:'22'})
+                service = Services({'login': login})
+                rspec_node['services'].append(service)
+            rspec_nodes.append(rspec_node)
+        return rspec_nodes
+             
+        
     def get_rspec(self, slice_xrn=None, version = None):
+
         version_manager = VersionManager()
         version = version_manager.get_version(version)
         if not slice_xrn:
             rspec_version = version_manager._get_version(version.type, version.version, 'ad')
         else:
             rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
-               
-        rspec = RSpec(version=rspec_version, user_options=self.user_options)
-        # get slice details if specified
-        slice = None
-        if slice_xrn:
-            slice_hrn, _ = urn_to_hrn(slice_xrn)
-            slice_name = hrn_to_pl_slicename(slice_hrn)
-            slices = self.api.plshell.GetSlices(self.api.plauth, slice_name)
-            if slices:
-                slice = slices[0]
-            self.prepare(slice=slice)
-        else:
-            self.prepare()
-            
-        # filter out nodes with a whitelist:
-        valid_nodes = [] 
-        for node in self.nodes.values():
-            # only doing this because protogeni rspec needs
-            # to advertise available initscripts 
-            node['pl_initscripts'] = self.pl_initscripts
-
-            if slice and node['node_id'] in slice['node_ids']:
-                valid_nodes.append(node)
-            elif slice and slice['slice_id'] in node['slice_ids_whitelist']:
-                valid_nodes.append(node)
-            elif not slice and not node['slice_ids_whitelist']:
-                valid_nodes.append(node)
-    
-        rspec.version.add_nodes(valid_nodes)
-        rspec.version.add_interfaces(self.interfaces.values()) 
-        rspec.version.add_links(self.links.values())
-
-        # add slivers
-        if slice_xrn and slice:
-            slivers = []
-            tags = self.api.plshell.GetSliceTags(self.api.plauth, slice['slice_tag_ids'])
-
-            # add default tags
-            for tag in tags:
-                # if tag isn't bound to a node then it applies to all slivers
-                # and belongs in the <sliver_defaults> tag
-                if not tag['node_id']:
-                    rspec.version.add_default_sliver_attribute(tag['tagname'], tag['value'], self.api.hrn)
-                if tag['tagname'] == 'topo_rspec' and tag['node_id']:
-                    node = self.nodes[tag['node_id']]
-                    value = eval(tag['value'])
-                    for (id, realip, bw, lvip, rvip, vnet) in value:
-                        bps = get_tc_rate(bw)
-                        remote = self.nodes[id]
-                        site1 = self.sites[node['site_id']]
-                        site2 = self.sites[remote['site_id']]
-                        link1_name = '%s:%s' % (site1['login_base'], site2['login_base']) 
-                        link2_name = '%s:%s' % (site2['login_base'], site1['login_base']) 
-                        p_link = None
-                        if link1_name in self.links:
-                            link = self.links[link1_name] 
-                        elif link2_name in self.links:
-                            link = self.links[link2_name]
-                        v_link = Link()
-                        
-                        link.capacity = bps 
-            for node_id in slice['node_ids']:
-                try:
-                    sliver = {}
-                    sliver['hostname'] = self.nodes[node_id]['hostname']
-                    sliver['node_id'] = node_id
-                    sliver['slice_id'] = slice['slice_id']    
-                    sliver['tags'] = []
-                    slivers.append(sliver)
-
-                    # add tags for this node only
-                    for tag in tags:
-                        if tag['node_id'] and (tag['node_id'] == node_id):
-                            sliver['tags'].append(tag)
-                except:
-                    self.api.logger.log_exc('unable to add sliver %s to node %s' % (slice['name'], node_id))
-            rspec.version.add_slivers(slivers, sliver_urn=slice_xrn)
 
+        slice, slivers = self.get_slice_and_slivers(slice_xrn)
+        rspec = RSpec(version=rspec_version, user_options=self.user_options)
+        rspec.version.add_nodes(self.get_nodes(slice, slivers))
+        rspec.version.add_links(self.get_links(slice))
+        
+        # add sliver defaults
+        default_sliver_attribs = slivers.get(None, [])
+        for sliver_attrib in default_sliver_attribs:
+            rspec.version.add_default_sliver_attribute(sliver_attrib['name'], sliver_attrib['value'])  
+        
         return rspec.toxml()
+
+
index 5b2983b..e97565c 100644 (file)
@@ -282,12 +282,11 @@ class Slicetag:
     
     def write(self, api):
         if self.was_added():
-            api.plshell.AddSliceTag(api.plauth, self.slice_id, 
-                                    self.tagname, self.value, self.node_id)
+            api.driver.AddSliceTag(self.slice_id, self.tagname, self.value, self.node_id)
         elif self.was_changed():
-            api.plshell.UpdateSliceTag(api.plauth, self.id, self.value)
+            api.driver.UpdateSliceTag(self.id, self.value)
         elif self.was_deleted():
-            api.plshell.DeleteSliceTag(api.plauth, self.id)
+            api.driver.DeleteSliceTag(self.id)
 
 
 class TagType:
@@ -560,7 +559,7 @@ class Network:
         Create a dictionary of site objects keyed by site ID
         """
         tmp = []
-        for site in api.plshell.GetSites(api.plauth, {'peer_id': None}):
+        for site in api.driver.GetSites({'peer_id': None}):
             t = site['site_id'], Site(self, site)
             tmp.append(t)
         return dict(tmp)
@@ -571,7 +570,7 @@ class Network:
         Create a dictionary of node objects keyed by node ID
         """
         tmp = []
-        for node in api.plshell.GetNodes(api.plauth, {'peer_id': None}):
+        for node in api.driver.GetNodes({'peer_id': None}):
             try:
                 t = node['node_id'], Node(self, node)
                 tmp.append(t)
@@ -585,7 +584,7 @@ class Network:
         Create a dictionary of node objects keyed by node ID
         """
         tmp = []
-        for iface in api.plshell.GetInterfaces(api.plauth):
+        for iface in api.driver.GetInterfaces():
             t = iface['interface_id'], Iface(self, iface)
             tmp.append(t)
         return dict(tmp)
@@ -595,7 +594,7 @@ class Network:
         Create a dictionary of slicetag objects keyed by slice tag ID
         """
         tmp = []
-        for tag in api.plshell.GetSliceTags(api.plauth, {'~tagname':Slicetag.ignore_tags}, Slicetag.filter_fields): 
+        for tag in api.driver.GetSliceTags({'~tagname':Slicetag.ignore_tags}, Slicetag.filter_fields): 
             t = tag['slice_tag_id'], Slicetag(tag)
             tmp.append(t)
         return dict(tmp)
@@ -605,7 +604,7 @@ class Network:
         Create a list of tagtype obects keyed by tag name
         """
         tmp = []
-        for tag in api.plshell.GetTagTypes(api.plauth, {'~tagname':TagType.ignore_tags}):
+        for tag in api.driver.GetTagTypes({'~tagname':TagType.ignore_tags}):
             t = tag['tagname'], TagType(tag)
             tmp.append(t)
         return dict(tmp)
@@ -615,7 +614,7 @@ class Network:
         Return a Slice object for a single slice
         """
         slicename = hrn_to_pl_slicename(hrn)
-        slice = api.plshell.GetSlices(api.plauth, [slicename])
+        slice = api.driver.GetSlices([slicename])
         if len(slice):
             self.slice = Slice(self, slicename, slice[0])
             return self.slice
index e85800c..b659ea9 100644 (file)
@@ -13,8 +13,7 @@ def get_peer(api, hrn):
     # get this site's authority (sfa root authority or sub authority)
     site_authority = get_authority(slice_authority).lower()
     # check if we are already peered with this site_authority, if so
-    peers = api.plshell.GetPeers(api.plauth, {}, \
-                    ['peer_id', 'peername', 'shortname', 'hrn_root'])
+    peers = api.driver.GetPeers( {}, ['peer_id', 'peername', 'shortname', 'hrn_root'])
     for peer_record in peers:
         names = [name.lower() for name in peer_record.values() if isinstance(name, StringTypes)]
         if site_authority in names:
similarity index 79%
rename from sfa/plc/plccomponentapi.py
rename to sfa/plc/plcomponentdriver.py
index e09dbee..c991bd6 100644 (file)
@@ -1,29 +1,25 @@
 import os
 import tempfile
 
-import sfa.util.xmlrpcprotocol as xmlrpcprotocol
-from sfa.util.nodemanager import NodeManager
+import sfa.client.xmlrpcprotocol as xmlrpcprotocol
+from sfa.plc.nodemanager import NodeManager
 
 from sfa.trust.credential import Credential
 from sfa.trust.certificate import Certificate, Keypair
 from sfa.trust.gid import GID
 
-from sfa.server.sfaapi import SfaApi
-
 ####################
-class PlcComponentApi(SfaApi):
-
-    def __init__ (self, encoding="utf-8", methods='sfa.methods', 
-                  config = "/etc/sfa/sfa_config.py", 
-                  peer_cert = None, interface = None, 
-                  key_file = None, cert_file = None, cache = None):
-        SfaApi.__init__(self, encoding=encoding, methods=methods, 
-                        config=config, 
-                        peer_cert=peer_cert, interface=interface, 
-                        key_file=key_file, 
-                        cert_file=cert_file, cache=cache)
+class PlComponentDriver:
+    """
+    This class is the type for the toplevel 'api' object 
+    when running the component manager inside a planetlab node.
+    As such it runs an SFA-compliant interface and thus inherits SfaApi
+    However the fact that we run inside a planetlab nodes requires 
+    some tweaks as compared with a service running in the infrastructure.
+    """
 
-        self.nodemanager = NodeManager(self.config)
+    def __init__ (self, config):
+        self.nodemanager = NodeManager(config)
 
     def sliver_exists(self):
         sliver_dict = self.nodemanager.GetXIDs()
@@ -36,7 +32,8 @@ class PlcComponentApi(SfaApi):
     def get_registry(self):
         addr, port = self.config.SFA_REGISTRY_HOST, self.config.SFA_REGISTRY_PORT
         url = "http://%(addr)s:%(port)s" % locals()
-        server = xmlrpcprotocol.get_server(url, self.key_file, self.cert_file)
+        ### xxx this would require access to the api...
+        server = xmlrpcprotocol.server_proxy(url, self.key_file, self.cert_file)
         return server
 
     def get_node_key(self):
similarity index 72%
rename from sfa/plc/plcsfaapi.py
rename to sfa/plc/pldriver.py
index 49de80e..af3b213 100644 (file)
@@ -1,53 +1,13 @@
-import xmlrpclib
 #
 from sfa.util.faults import MissingSfaInfo
 from sfa.util.sfalogging import logger
 from sfa.util.table import SfaTable
+from sfa.util.defaultdict import defaultdict
 
 from sfa.util.xrn import hrn_to_urn
 from sfa.util.plxrn import slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename, hrn_to_pl_login_base
 
-from sfa.server.sfaapi import SfaApi
-
-#################### xxx should move into util/defaultdict
-try:
-    from collections import defaultdict
-except:
-    class defaultdict(dict):
-        def __init__(self, default_factory=None, *a, **kw):
-            if (default_factory is not None and
-                not hasattr(default_factory, '__call__')):
-                raise TypeError('first argument must be callable')
-            dict.__init__(self, *a, **kw)
-            self.default_factory = default_factory
-        def __getitem__(self, key):
-            try:
-                return dict.__getitem__(self, key)
-            except KeyError:
-                return self.__missing__(key)
-        def __missing__(self, key):
-            if self.default_factory is None:
-                raise KeyError(key)
-            self[key] = value = self.default_factory()
-            return value
-        def __reduce__(self):
-            if self.default_factory is None:
-                args = tuple()
-            else:
-                args = self.default_factory,
-            return type(self), args, None, None, self.items()
-        def copy(self):
-            return self.__copy__()
-        def __copy__(self):
-            return type(self)(self.default_factory, self)
-        def __deepcopy__(self, memo):
-            import copy
-            return type(self)(self.default_factory,
-                              copy.deepcopy(self.items()))
-        def __repr__(self):
-            return 'defaultdict(%s, %s)' % (self.default_factory,
-                                            dict.__repr__(self))
-## end of http://code.activestate.com/recipes/523034/ }}}
+from sfa.plc.plshell import PlShell
 
 def list_to_dict(recs, key):
     """
@@ -57,45 +17,19 @@ def list_to_dict(recs, key):
     keys = [rec[key] for rec in recs]
     return dict(zip(keys, recs))
 
-class PlcSfaApi(SfaApi):
-
-    def __init__ (self, encoding="utf-8", methods='sfa.methods', 
-                  config = "/etc/sfa/sfa_config.py", 
-                  peer_cert = None, interface = None, 
-                  key_file = None, cert_file = None, cache = None):
-        SfaApi.__init__(self, encoding=encoding, methods=methods, 
-                        config=config, 
-                        peer_cert=peer_cert, interface=interface, 
-                        key_file=key_file, 
-                        cert_file=cert_file, cache=cache)
+class PlDriver (PlShell):
+
+    def __init__ (self, config):
+        PlShell.__init__ (self, config)
  
-        self.SfaTable = SfaTable
+        self.hrn = config.SFA_INTERFACE_HRN
+        # xxx thgen fixme - use SfaTable hardwired for now 
+        # will need to extend generic to support multiple storage systems
+        #self.SfaTable = SfaTable
         # Initialize the PLC shell only if SFA wraps a myPLC
-        rspec_type = self.config.get_aggregate_type()
-        if (rspec_type == 'pl' or rspec_type == 'vini' or \
-            rspec_type == 'eucalyptus' or rspec_type == 'max'):
-            self.plshell = self.getPLCShell()
-            self.plshell_version = "4.3"
-
-    def getPLCShell(self):
-        self.plauth = {'Username': self.config.SFA_PLC_USER,
-                       'AuthMethod': 'password',
-                       'AuthString': self.config.SFA_PLC_PASSWORD}
-
-        # The native shell (PLC.Shell.Shell) is more efficient than xmlrpc,
-        # but it leaves idle db connections open. use xmlrpc until we can figure
-        # out why PLC.Shell.Shell doesn't close db connection properly     
-        #try:
-        #    sys.path.append(os.path.dirname(os.path.realpath("/usr/bin/plcsh")))
-        #    self.plshell_type = 'direct'
-        #    import PLC.Shell
-        #    shell = PLC.Shell.Shell(globals = globals())
-        #except:
-        
-        self.plshell_type = 'xmlrpc' 
-        url = self.config.SFA_PLC_URL
-        shell = xmlrpclib.Server(url, verbose = 0, allow_none = True)
-        return shell
+        rspec_type = config.get_aggregate_type()
+        assert (rspec_type == 'pl' or rspec_type == 'vini' or \
+                    rspec_type == 'eucalyptus' or rspec_type == 'max')
 
     ##
     # Convert SFA fields to PLC fields for use when registering up updating
@@ -177,16 +111,16 @@ class PlcSfaApi(SfaApi):
         # get pl records
         nodes, sites, slices, persons, keys = {}, {}, {}, {}, {}
         if node_ids:
-            node_list = self.plshell.GetNodes(self.plauth, node_ids)
+            node_list = self.GetNodes(node_ids)
             nodes = list_to_dict(node_list, 'node_id')
         if site_ids:
-            site_list = self.plshell.GetSites(self.plauth, site_ids)
+            site_list = self.GetSites(site_ids)
             sites = list_to_dict(site_list, 'site_id')
         if slice_ids:
-            slice_list = self.plshell.GetSlices(self.plauth, slice_ids)
+            slice_list = self.GetSlices(slice_ids)
             slices = list_to_dict(slice_list, 'slice_id')
         if person_ids:
-            person_list = self.plshell.GetPersons(self.plauth, person_ids)
+            person_list = self.GetPersons(person_ids)
             persons = list_to_dict(person_list, 'person_id')
             for person in persons:
                 key_ids.extend(persons[person]['key_ids'])
@@ -195,7 +129,7 @@ class PlcSfaApi(SfaApi):
                       'slice': slices, 'user': persons}
 
         if key_ids:
-            key_list = self.plshell.GetKeys(self.plauth, key_ids)
+            key_list = self.GetKeys(key_ids)
             keys = list_to_dict(key_list, 'key_id')
 
         # fill record info
@@ -246,16 +180,16 @@ class PlcSfaApi(SfaApi):
         # get pl records
         slices, persons, sites, nodes = {}, {}, {}, {}
         if site_ids:
-            site_list = self.plshell.GetSites(self.plauth, site_ids, ['site_id', 'login_base'])
+            site_list = self.GetSites(site_ids, ['site_id', 'login_base'])
             sites = list_to_dict(site_list, 'site_id')
         if person_ids:
-            person_list = self.plshell.GetPersons(self.plauth, person_ids, ['person_id', 'email'])
+            person_list = self.GetPersons(person_ids, ['person_id', 'email'])
             persons = list_to_dict(person_list, 'person_id')
         if slice_ids:
-            slice_list = self.plshell.GetSlices(self.plauth, slice_ids, ['slice_id', 'name'])
+            slice_list = self.GetSlices(slice_ids, ['slice_id', 'name'])
             slices = list_to_dict(slice_list, 'slice_id')       
         if node_ids:
-            node_list = self.plshell.GetNodes(self.plauth, node_ids, ['node_id', 'hostname'])
+            node_list = self.GetNodes(node_ids, ['node_id', 'hostname'])
             nodes = list_to_dict(node_list, 'node_id')
        
         # convert ids to hrns
@@ -296,7 +230,8 @@ class PlcSfaApi(SfaApi):
             
         return records   
 
-    def fill_record_sfa_info(self, records):
+    # aggregates is basically api.aggregates
+    def fill_record_sfa_info(self, records, aggregates):
 
         def startswith(prefix, values):
             return [value for value in values if value.startswith(prefix)]
@@ -315,7 +250,7 @@ class PlcSfaApi(SfaApi):
         site_pis = {}
         if site_ids:
             pi_filter = {'|roles': ['pi'], '|site_ids': site_ids} 
-            pi_list = self.plshell.GetPersons(self.plauth, pi_filter, ['person_id', 'site_ids'])
+            pi_list = self.GetPersons(pi_filter, ['person_id', 'site_ids'])
             for pi in pi_list:
                 # we will need the pi's hrns also
                 person_ids.append(pi['person_id'])
@@ -333,7 +268,9 @@ class PlcSfaApi(SfaApi):
         # we obtain
         
         # get the sfa records
-        table = self.SfaTable()
+        # xxx thgen fixme - use SfaTable hardwired for now 
+        # table = self.SfaTable()
+        table = SfaTable()
         person_list, persons = [], {}
         person_list = table.find({'type': 'user', 'pointer': person_ids})
         # create a hrns keyed on the sfa record's pointer.
@@ -345,7 +282,7 @@ class PlcSfaApi(SfaApi):
 
         # get the pl records
         pl_person_list, pl_persons = [], {}
-        pl_person_list = self.plshell.GetPersons(self.plauth, person_ids, ['person_id', 'roles'])
+        pl_person_list = self.GetPersons(person_ids, ['person_id', 'roles'])
         pl_persons = list_to_dict(pl_person_list, 'person_id')
 
         # fill sfa info
@@ -375,9 +312,9 @@ class PlcSfaApi(SfaApi):
                 
             elif (type.startswith("authority")):
                 record['url'] = None
-                if record['hrn'] in self.aggregates:
+                if record['hrn'] in aggregates:
                     
-                    record['url'] = self.aggregates[record['hrn']].get_url()
+                    record['url'] = aggregates[record['hrn']].get_url()
 
                 if record['pointer'] != -1:
                     record['PI'] = []
@@ -407,7 +344,7 @@ class PlcSfaApi(SfaApi):
                 # xxx TODO: PostalAddress, Phone
             record.update(sfa_info)
 
-    def fill_record_info(self, records):
+    def fill_record_info(self, records, aggregates):
         """
         Given a SFA record, fill in the PLC specific and SFA specific
         fields in the record. 
@@ -416,7 +353,7 @@ class PlcSfaApi(SfaApi):
             records = [records]
 
         self.fill_record_pl_info(records)
-        self.fill_record_sfa_info(records)
+        self.fill_record_sfa_info(records, aggregates)
 
     def update_membership_list(self, oldRecord, record, listName, addFunc, delFunc):
         # get a list of the HRNs that are members of the old and new records
@@ -425,17 +362,6 @@ class PlcSfaApi(SfaApi):
         else:
             oldList = []     
         newList = record.get(listName, [])
-        # xxx ugly hack - somehow we receive here a list of {'text':value} 
-        # instead of an expected list of strings
-        # please remove once this is issue is cleanly fixed
-        def normalize (value):
-            from types import StringTypes
-            if isinstance(value,StringTypes): return value
-            elif isinstance(value,dict): 
-                newvalue=value['text']
-                logger.info("Normalizing %s=>%s"%(value,newvalue))
-                return newvalue
-        newList=[normalize(v) for v in newList]
 
         # if the lists are the same, then we don't have to update anything
         if (oldList == newList):
@@ -444,7 +370,9 @@ class PlcSfaApi(SfaApi):
         # build a list of the new person ids, by looking up each person to get
         # their pointer
         newIdList = []
-        table = self.SfaTable()
+        # xxx thgen fixme - use SfaTable hardwired for now 
+        #table = self.SfaTable()
+        table = SfaTable()
         records = table.find({'type': 'user', 'hrn': newList})
         for rec in records:
             newIdList.append(rec['pointer'])
@@ -462,18 +390,18 @@ class PlcSfaApi(SfaApi):
     # add people who are in the new list, but not the oldList
         for personId in newIdList:
             if not (personId in oldIdList):
-                addFunc(self.plauth, personId, containerId)
+                addFunc(personId, containerId)
 
         # remove people who are in the old list, but not the new list
         for personId in oldIdList:
             if not (personId in newIdList):
-                delFunc(self.plauth, personId, containerId)
+                delFunc(personId, containerId)
 
     def update_membership(self, oldRecord, record):
         if record.type == "slice":
             self.update_membership_list(oldRecord, record, 'researcher',
-                                        self.plshell.AddPersonToSlice,
-                                        self.plshell.DeletePersonFromSlice)
+                                        self.AddPersonToSlice,
+                                        self.DeletePersonFromSlice)
         elif record.type == "authority":
             # xxx TODO
             pass
diff --git a/sfa/plc/plshell.py b/sfa/plc/plshell.py
new file mode 100644 (file)
index 0000000..863472f
--- /dev/null
@@ -0,0 +1,46 @@
+import xmlrpclib
+
+class PlShell:
+    """
+    A simple xmlrpc shell to a myplc instance
+    This class can receive all PLCAPI calls to the underlying testbed
+    For safety this is limited to a set of hard-coded calls
+    """
+    
+    direct_calls = ['AddNode', 'AddPerson', 'AddPersonKey', 'AddPersonToSite',
+                    'AddPersonToSlice', 'AddRoleToPerson', 'AddSite', 'AddSiteTag', 'AddSlice',
+                    'AddSliceTag', 'AddSliceToNodes', 'BindObjectToPeer', 'DeleteKey',
+                    'DeleteNode', 'DeletePerson', 'DeletePersonFromSlice', 'DeleteSite',
+                    'DeleteSlice', 'DeleteSliceFromNodes', 'DeleteSliceTag', 'GetInitScripts',
+                    'GetInterfaces', 'GetKeys', 'GetNodeTags', 'GetPeers',
+                    'GetPersons', 'GetSlices', 'GetSliceTags', 'GetTagTypes',
+                    'UnBindObjectFromPeer', 'UpdateNode', 'UpdatePerson', 'UpdateSite',
+                    'UpdateSlice', 'UpdateSliceTag',
+                    # also used as-is in importer
+                    'GetSites','GetNodes',
+                    ]
+    # support for other names - this is experimental
+    alias_calls = { 'get_authorities':'GetSites',
+                    'get_nodes':'GetNodes',
+                    }
+
+    def __init__ ( self, config ) :
+        self.plauth = {'Username': config.SFA_PLC_USER,
+                       'AuthMethod': 'password',
+                       'AuthString': config.SFA_PLC_PASSWORD}
+        
+        self.url = config.SFA_PLC_URL
+        self.plauth = {'Username': 'root@test.onelab.eu',
+                       'AuthMethod': 'password',
+                       'AuthString': 'test++'}
+        self.proxy_server = xmlrpclib.Server(self.url, verbose = 0, allow_none = True)
+
+    def __getattr__(self, name):
+        def func(*args, **kwds):
+            actual_name=None
+            if name in PlShell.direct_calls: actual_name=name
+            if name in PlShell.alias_calls: actual_name=PlShell.alias_calls[name]
+            if not actual_name:
+                raise Exception, "Illegal method call %s for PL driver"%(name)
+            return getattr(self.proxy_server, actual_name)(self.plauth, *args, **kwds)
+        return func
index 5cead3b..fcef25e 100644 (file)
@@ -30,11 +30,11 @@ class Slices:
         slice_name = hrn_to_pl_slicename(hrn)
         # XX Should we just call PLCAPI.GetSliceTicket(slice_name) instead
         # of doing all of this?
-        #return self.api.GetSliceTicket(self.auth, slice_name) 
+        #return self.api.driver.GetSliceTicket(self.auth, slice_name) 
         
         # from PLCAPI.GetSlivers.get_slivers()
         slice_fields = ['slice_id', 'name', 'instantiation', 'expires', 'person_ids', 'slice_tag_ids']
-        slices = self.api.plshell.GetSlices(self.api.plauth, slice_name, slice_fields)
+        slices = self.api.driver.GetSlices(slice_name, slice_fields)
         # Build up list of users and slice attributes
         person_ids = set()
         all_slice_tag_ids = set()
@@ -44,7 +44,7 @@ class Slices:
         person_ids = list(person_ids)
         all_slice_tag_ids = list(all_slice_tag_ids)
         # Get user information
-        all_persons_list = self.api.plshell.GetPersons(self.api.plauth, {'person_id':person_ids,'enabled':True}, ['person_id', 'enabled', 'key_ids'])
+        all_persons_list = self.api.driver.GetPersons({'person_id':person_ids,'enabled':True}, ['person_id', 'enabled', 'key_ids'])
         all_persons = {}
         for person in all_persons_list:
             all_persons[person['person_id']] = person        
@@ -55,12 +55,12 @@ class Slices:
             key_ids.update(person['key_ids'])
         key_ids = list(key_ids)
         # Get user account keys
-        all_keys_list = self.api.plshell.GetKeys(self.api.plauth, key_ids, ['key_id', 'key', 'key_type'])
+        all_keys_list = self.api.driver.GetKeys(key_ids, ['key_id', 'key', 'key_type'])
         all_keys = {}
         for key in all_keys_list:
             all_keys[key['key_id']] = key
         # Get slice attributes
-        all_slice_tags_list = self.api.plshell.GetSliceTags(self.api.plauth, all_slice_tag_ids)
+        all_slice_tags_list = self.api.driver.GetSliceTags(all_slice_tag_ids)
         all_slice_tags = {}
         for slice_tag in all_slice_tags_list:
             all_slice_tags[slice_tag['slice_tag_id']] = slice_tag
@@ -143,7 +143,7 @@ class Slices:
         site_authority = get_authority(slice_authority).lower()
 
         # check if we are already peered with this site_authority, if so
-        peers = self.api.plshell.GetPeers(self.api.plauth, {}, ['peer_id', 'peername', 'shortname', 'hrn_root'])
+        peers = self.api.driver.GetPeers({}, ['peer_id', 'peername', 'shortname', 'hrn_root'])
         for peer_record in peers:
             names = [name.lower() for name in peer_record.values() if isinstance(name, StringTypes)]
             if site_authority in names:
@@ -166,7 +166,7 @@ class Slices:
 
     def verify_slice_nodes(self, slice, requested_slivers, peer):
         
-        nodes = self.api.plshell.GetNodes(self.api.plauth, slice['node_ids'], ['hostname'])
+        nodes = self.api.driver.GetNodes(slice['node_ids'], ['hostname'])
         current_slivers = [node['hostname'] for node in nodes]
 
         # remove nodes not in rspec
@@ -177,9 +177,9 @@ class Slices:
 
         try:
             if peer:
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', slice['slice_id'], peer['shortname'])
-            self.api.plshell.AddSliceToNodes(self.api.plauth, slice['name'], added_nodes)
-            self.api.plshell.DeleteSliceFromNodes(self.api.plauth, slice['name'], deleted_nodes)
+                self.api.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
+            self.api.driver.AddSliceToNodes(slice['name'], added_nodes)
+            self.api.driver.DeleteSliceFromNodes(slice['name'], deleted_nodes)
 
         except: 
             self.api.logger.log_exc('Failed to add/remove slice from nodes')
@@ -198,7 +198,7 @@ class Slices:
             if1 = aggregate.interfaces[node['interface_ids'][0]]
             ipaddr = if1['ip']
             topo_rspec = VLink.get_topo_rspec(link, ipaddr)
-            self.api.plshell.AddSliceTag(self.api.plauth, slice['name'], 'topo_rspec', str([topo_rspec]), node_id) 
+            self.api.driver.AddSliceTag(slice['name'], 'topo_rspec', str([topo_rspec]), node_id) 
                         
         
 
@@ -207,36 +207,33 @@ class Slices:
             # bind site
             try:
                 if site:
-                    self.api.plshell.BindObjectToPeer(self.api.plauth, 'site', \
-                       site['site_id'], peer['shortname'], slice['site_id'])
+                    self.api.driver.BindObjectToPeer('site', site['site_id'], peer['shortname'], slice['site_id'])
             except Exception,e:
-                self.api.plshell.DeleteSite(self.api.plauth, site['site_id'])
+                self.api.driver.DeleteSite(site['site_id'])
                 raise e
             
             # bind slice
             try:
                 if slice:
-                    self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', \
-                       slice['slice_id'], peer['shortname'], slice['slice_id'])
+                    self.api.driver.BindObjectToPeer('slice', slice['slice_id'], peer['shortname'], slice['slice_id'])
             except Exception,e:
-                self.api.plshell.DeleteSlice(self.api.plauth, slice['slice_id'])
+                self.api.driver.DeleteSlice(slice['slice_id'])
                 raise e 
 
             # bind persons
             for person in persons:
                 try:
-                    self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', \
-                        person['person_id'], peer['shortname'], person['peer_person_id'])
+                    self.api.driver.BindObjectToPeer('person', 
+                                                     person['person_id'], peer['shortname'], person['peer_person_id'])
 
                     for (key, remote_key_id) in zip(person['keys'], person['key_ids']):
                         try:
-                            self.api.plshell.BindObjectToPeer(self.api.plauth, 'key',\
-                                key['key_id'], peer['shortname'], remote_key_id)
+                            self.api.driver.BindObjectToPeer( 'key', key['key_id'], peer['shortname'], remote_key_id)
                         except:
-                            self.api.plshell.DeleteKey(self.api.plauth, key['key_id'])
+                            self.api.driver.DeleteKey(key['key_id'])
                             self.api.logger("failed to bind key: %s to peer: %s " % (key['key_id'], peer['shortname']))
                 except Exception,e:
-                    self.api.plshell.DeletePerson(self.api.plauth, person['person_id'])
+                    self.api.driver.DeletePerson(person['person_id'])
                     raise e       
 
         return slice
@@ -248,7 +245,7 @@ class Slices:
         slicename = hrn_to_pl_slicename(slice_hrn)
         authority_name = slicename.split('_')[0]
         login_base = authority_name[:20]
-        sites = self.api.plshell.GetSites(self.api.plauth, login_base)
+        sites = self.api.driver.GetSites(login_base)
         if not sites:
             # create new site record
             site = {'name': 'geni.%s' % authority_name,
@@ -260,9 +257,9 @@ class Slices:
                     'peer_site_id': None}
             if peer:
                 site['peer_site_id'] = slice_record.get('site_id', None)
-            site['site_id'] = self.api.plshell.AddSite(self.api.plauth, site)
+            site['site_id'] = self.api.driver.AddSite(site)
             # exempt federated sites from monitor policies
-            self.api.plshell.AddSiteTag(self.api.plauth, site['site_id'], 'exempt_site_until', "20200101")
+            self.api.driver.AddSiteTag(site['site_id'], 'exempt_site_until', "20200101")
             
             # is this still necessary?
             # add record to the local registry 
@@ -274,7 +271,7 @@ class Slices:
             site =  sites[0]
             if peer:
                 # unbind from peer so we can modify if necessary. Will bind back later
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'site', site['site_id'], peer['shortname']) 
+                self.api.driver.UnBindObjectFromPeer('site', site['site_id'], peer['shortname']) 
         
         return site        
 
@@ -282,13 +279,13 @@ class Slices:
         slicename = hrn_to_pl_slicename(slice_hrn)
         parts = slicename.split("_")
         login_base = parts[0]
-        slices = self.api.plshell.GetSlices(self.api.plauth, [slicename]) 
+        slices = self.api.driver.GetSlices([slicename]) 
         if not slices:
             slice = {'name': slicename,
                      'url': slice_record.get('url', slice_hrn), 
                      'description': slice_record.get('description', slice_hrn)}
             # add the slice                          
-            slice['slice_id'] = self.api.plshell.AddSlice(self.api.plauth, slice)
+            slice['slice_id'] = self.api.driver.AddSlice(slice)
             slice['node_ids'] = []
             slice['person_ids'] = []
             if peer:
@@ -303,12 +300,10 @@ class Slices:
             if peer:
                 slice['peer_slice_id'] = slice_record.get('slice_id', None)
                 # unbind from peer so we can modify if necessary. Will bind back later
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice',\
-                             slice['slice_id'], peer['shortname'])
+                self.api.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
                #Update existing record (e.g. expires field) it with the latest info.
             if slice_record and slice['expires'] != slice_record['expires']:
-                self.api.plshell.UpdateSlice(self.api.plauth, slice['slice_id'],\
-                             {'expires' : slice_record['expires']})
+                self.api.driver.UpdateSlice( slice['slice_id'], {'expires' : slice_record['expires']})
        
         return slice
 
@@ -334,13 +329,13 @@ class Slices:
         existing_user_ids = []
         if users_by_email:
             # get existing users by email 
-            existing_users = self.api.plshell.GetPersons(self.api.plauth, \
-                {'email': users_by_email.keys()}, ['person_id', 'key_ids', 'email'])
+            existing_users = self.api.driver.GetPersons({'email': users_by_email.keys()}, 
+                                                        ['person_id', 'key_ids', 'email'])
             existing_user_ids.extend([user['email'] for user in existing_users])
 
         if users_by_site:
             # get a list of user sites (based on requeste user urns
-            site_list = self.api.plshell.GetSites(self.api.plauth, users_by_site.keys(), \
+            site_list = self.api.driver.GetSites(users_by_site.keys(), \
                 ['site_id', 'login_base', 'person_ids'])
             sites = {}
             site_user_ids = []
@@ -350,8 +345,8 @@ class Slices:
                 sites[site['site_id']] = site
                 site_user_ids.extend(site['person_ids'])
 
-            existing_site_persons_list = self.api.plshell.GetPersons(self.api.plauth, \
-              site_user_ids,  ['person_id', 'key_ids', 'email', 'site_ids'])
+            existing_site_persons_list = self.api.driver.GetPersons(site_user_ids,  
+                                                                    ['person_id', 'key_ids', 'email', 'site_ids'])
 
             # all requested users are either existing users or new (added) users      
             for login_base in users_by_site:
@@ -379,8 +374,8 @@ class Slices:
         requested_user_ids = users_dict.keys()
         # existing slice users
         existing_slice_users_filter = {'person_id': slice_record.get('person_ids', [])}
-        existing_slice_users = self.api.plshell.GetPersons(self.api.plauth, \
-             existing_slice_users_filter, ['person_id', 'key_ids', 'email'])
+        existing_slice_users = self.api.driver.GetPersons(existing_slice_users_filter,
+                                                          ['person_id', 'key_ids', 'email'])
         existing_slice_user_ids = [user['email'] for user in existing_slice_users]
         
         # users to be added, removed or updated
@@ -392,7 +387,7 @@ class Slices:
         # Remove stale users (only if we are not appending).
         if append == False:
             for removed_user_id in removed_user_ids:
-                self.api.plshell.DeletePersonFromSlice(self.api.plauth, removed_user_id, slice_record['name'])
+                self.api.driver.DeletePersonFromSlice(removed_user_id, slice_record['name'])
         # update_existing users
         updated_users_list = [user for user in existing_slice_users if user['email'] in \
           updated_user_ids]
@@ -411,20 +406,20 @@ class Slices:
                 'keys': [],
                 'key_ids': added_user.get('key_ids', []),
             }
-            person['person_id'] = self.api.plshell.AddPerson(self.api.plauth, person)
+            person['person_id'] = self.api.driver.AddPerson(person)
             if peer:
                 person['peer_person_id'] = added_user['person_id']
             added_persons.append(person)
            
             # enable the account 
-            self.api.plshell.UpdatePerson(self.api.plauth, person['person_id'], {'enabled': True})
+            self.api.driver.UpdatePerson(person['person_id'], {'enabled': True})
             
             # add person to site
-            self.api.plshell.AddPersonToSite(self.api.plauth, added_user_id, login_base)
+            self.api.driver.AddPersonToSite(added_user_id, login_base)
 
             for key_string in added_user.get('keys', []):
                 key = {'key':key_string, 'key_type':'ssh'}
-                key['key_id'] = self.api.plshell.AddPersonKey(self.api.plauth, person['person_id'], key)
+                key['key_id'] = self.api.driver.AddPersonKey(person['person_id'], key)
                 person['keys'].append(key)
 
             # add the registry record
@@ -435,7 +430,7 @@ class Slices:
     
         for added_slice_user_id in added_slice_user_ids.union(added_user_ids):
             # add person to the slice 
-            self.api.plshell.AddPersonToSlice(self.api.plauth, added_slice_user_id, slice_record['name'])
+            self.api.driver.AddPersonToSlice(added_slice_user_id, slice_record['name'])
             # if this is a peer record then it should already be bound to a peer.
             # no need to return worry about it getting bound later 
 
@@ -447,7 +442,7 @@ class Slices:
         key_ids = []
         for person in persons:
             key_ids.extend(person['key_ids'])
-        keylist = self.api.plshell.GetKeys(self.api.plauth, key_ids, ['key_id', 'key'])
+        keylist = self.api.driver.GetKeys(key_ids, ['key_id', 'key'])
         keydict = {}
         for key in keylist:
             keydict[key['key']] = key['key_id']     
@@ -469,16 +464,16 @@ class Slices:
                     try:
                         if peer:
                             person = persondict[user['email']]
-                            self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'person', person['person_id'], peer['shortname'])
-                        key['key_id'] = self.api.plshell.AddPersonKey(self.api.plauth, user['email'], key)
+                            self.api.driver.UnBindObjectFromPeer('person', person['person_id'], peer['shortname'])
+                        key['key_id'] = self.api.driver.AddPersonKey(user['email'], key)
                         if peer:
                             key_index = user_keys.index(key['key'])
                             remote_key_id = user['key_ids'][key_index]
-                            self.api.plshell.BindObjectToPeer(self.api.plauth, 'key', key['key_id'], peer['shortname'], remote_key_id)
+                            self.api.driver.BindObjectToPeer('key', key['key_id'], peer['shortname'], remote_key_id)
                             
                     finally:
                         if peer:
-                            self.api.plshell.BindObjectToPeer(self.api.plauth, 'person', person['person_id'], peer['shortname'], user['person_id'])
+                            self.api.driver.BindObjectToPeer('person', person['person_id'], peer['shortname'], user['person_id'])
         
         # remove old keys (only if we are not appending)
         if append == False: 
@@ -487,21 +482,21 @@ class Slices:
                 if keydict[existing_key_id] in removed_keys:
                     try:
                         if peer:
-                            self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'key', existing_key_id, peer['shortname'])
-                        self.api.plshell.DeleteKey(self.api.plauth, existing_key_id)
+                            self.api.driver.UnBindObjectFromPeer('key', existing_key_id, peer['shortname'])
+                        self.api.driver.DeleteKey(existing_key_id)
                     except:
                         pass   
 
     def verify_slice_attributes(self, slice, requested_slice_attributes):
         # get list of attributes users ar able to manage
-        slice_attributes = self.api.plshell.GetTagTypes(self.api.plauth, {'category': '*slice*', '|roles': ['user']})
+        slice_attributes = self.api.driver.GetTagTypes({'category': '*slice*', '|roles': ['user']})
         valid_slice_attribute_names = [attribute['tagname'] for attribute in slice_attributes]
 
         # get sliver attributes
         added_slice_attributes = []
         removed_slice_attributes = []
         ignored_slice_attribute_names = []
-        existing_slice_attributes = self.api.plshell.GetSliceTags(self.api.plauth, {'slice_id': slice['slice_id']})
+        existing_slice_attributes = self.api.driver.GetSliceTags({'slice_id': slice['slice_id']})
 
         # get attributes that should be removed
         for slice_tag in existing_slice_attributes:
@@ -539,7 +534,7 @@ class Slices:
         # remove stale attributes
         for attribute in removed_slice_attributes:
             try:
-                self.api.plshell.DeleteSliceTag(self.api.plauth, attribute['slice_tag_id'])
+                self.api.driver.DeleteSliceTag(attribute['slice_tag_id'])
             except Exception, e:
                 self.api.logger.warn('Failed to remove sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
                                 % (name, value,  node_id, str(e)))
@@ -547,7 +542,7 @@ class Slices:
         # add requested_attributes
         for attribute in added_slice_attributes:
             try:
-                self.api.plshell.AddSliceTag(self.api.plauth, slice['name'], attribute['name'], attribute['value'], attribute.get('node_id', None))
+                self.api.driver.AddSliceTag(slice['name'], attribute['name'], attribute['value'], attribute.get('node_id', None))
             except Exception, e:
                 self.api.logger.warn('Failed to add sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
                                 % (name, value,  node_id, str(e)))
@@ -570,7 +565,7 @@ class Slices:
         slice = self.verify_slice(registry, credential, hrn, site_id, remote_site_id, peer, sfa_peer)
 
         # find out where this slice is currently running
-        nodelist = self.api.plshell.GetNodes(self.api.plauth, slice['node_ids'], ['hostname'])
+        nodelist = self.api.driver.GetNodes(slice['node_ids'], ['hostname'])
         hostnames = [node['hostname'] for node in nodelist]
 
         # get netspec details
@@ -610,9 +605,9 @@ class Slices:
 
         try:
             if peer:
-                self.api.plshell.UnBindObjectFromPeer(self.api.plauth, 'slice', slice['slice_id'], peer)
+                self.api.driver.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
 
-            self.api.plshell.AddSliceToNodes(self.api.plauth, slicename, added_nodes) 
+            self.api.driver.AddSliceToNodes(slicename, added_nodes) 
 
             # Add recognized slice tags
             for node_name in node_names:
@@ -622,12 +617,12 @@ class Slices:
                     if (isinstance(value, list)):
                         value = value[0]
 
-                    self.api.plshell.AddSliceTag(self.api.plauth, slicename, slice_tag, value, node_name)
+                    self.api.driver.AddSliceTag(slicename, slice_tag, value, node_name)
 
-            self.api.plshell.DeleteSliceFromNodes(self.api.plauth, slicename, deleted_nodes)
+            self.api.driver.DeleteSliceFromNodes(slicename, deleted_nodes)
         finally:
             if peer:
-                self.api.plshell.BindObjectToPeer(self.api.plauth, 'slice', slice['slice_id'], peer, slice['peer_slice_id'])
+                self.api.driver.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
 
         return 1
 
diff --git a/sfa/rspecs/elements/bwlimit.py b/sfa/rspecs/elements/bwlimit.py
new file mode 100644 (file)
index 0000000..027bb5b
--- /dev/null
@@ -0,0 +1,8 @@
+from sfa.rspecs.elements.element import Element
+
+class BWlimit(Element):
+    fields = { 
+        'units': None,
+        'value': None,
+    }
+        
diff --git a/sfa/rspecs/elements/component_manager.py b/sfa/rspecs/elements/component_manager.py
new file mode 100644 (file)
index 0000000..ec9d85c
--- /dev/null
@@ -0,0 +1,7 @@
+from sfa.rspecs.elements.element import Element
+
+class ComponentManager(Element):
+    fields = {
+        'name': None,
+    }
+    
diff --git a/sfa/rspecs/elements/disk_image.py b/sfa/rspecs/elements/disk_image.py
new file mode 100644 (file)
index 0000000..3a810a5
--- /dev/null
@@ -0,0 +1,4 @@
+from sfa.rspecs.elements.element import Element
+
+class DiskImage(Element):
+    fields = {}        
index 8217c11..6757f8a 100644 (file)
@@ -1,85 +1,9 @@
-from lxml import etree
+class Element(dict):
 
-class Element:
-    def __init__(self, root_node, namespaces = None):
-        self.root_node = root_node
-        self.namespaces = namespaces
+    fields = {}
 
-    def xpath(self, xpath):
-        return this.root_node.xpath(xpath, namespaces=self.namespaces) 
+    def __init__(self, fields={}, element=None):
+        self.element = element
+        dict.__init__(self, self.fields) 
+        self.update(fields)
 
-    def add_element(self, name, attrs={}, parent=None, text=""):
-        """
-        Generic wrapper around etree.SubElement(). Adds an element to
-        specified parent node. Adds element to root node is parent is
-        not specified.
-        """
-        if parent == None:
-            parent = self.root_node
-        element = etree.SubElement(parent, name)
-        if text:
-            element.text = text
-        if isinstance(attrs, dict):
-            for attr in attrs:
-                element.set(attr, attrs[attr])
-        return element
-
-    def remove_element(self, element_name, root_node = None):
-        """
-        Removes all occurences of an element from the tree. Start at
-        specified root_node if specified, otherwise start at tree's root.
-        """
-        if not root_node:
-            root_node = self.root_node
-
-        if not element_name.startswith('//'):
-            element_name = '//' + element_name
-
-        elements = root_node.xpath('%s ' % element_name, namespaces=self.namespaces)
-        for element in elements:
-            parent = element.getparent()
-            parent.remove(element)
-
-    
-    def add_attribute(self, elem, name, value):
-        """
-        Add attribute to specified etree element
-        """
-        opt = etree.SubElement(elem, name)
-        opt.text = value
-
-    def remove_attribute(self, elem, name, value):
-        """
-        Removes an attribute from an element
-        """
-        if not elem == None:
-            opts = elem.iterfind(name)
-            if opts is not None:
-                for opt in opts:
-                    if opt.text == value:
-                        elem.remove(opt)
-
-    def get_attributes(self, elem=None, depth=None):
-        if elem == None:
-            elem = self.root_node
-        attrs = dict(elem.attrib)
-        attrs['text'] = str(elem.text).strip()
-        if depth is None or isinstance(depth, int) and depth > 0: 
-            for child_elem in list(elem):
-                key = str(child_elem.tag)
-                if key not in attrs:
-                    attrs[key] = [self.get_attributes(child_elem, recursive)]
-                else:
-                    attrs[key].append(self.get_attributes(child_elem, recursive))
-        return attrs
-    
-    def attributes_list(self, elem):
-        # convert a list of attribute tags into list of tuples
-        # (tagnme, text_value)
-        opts = []
-        if not elem == None:
-            for e in elem:
-                opts.append((e.tag, e.text))
-        return opts
-
-    
diff --git a/sfa/rspecs/elements/execute.py b/sfa/rspecs/elements/execute.py
new file mode 100644 (file)
index 0000000..43e6e62
--- /dev/null
@@ -0,0 +1,7 @@
+from sfa.rspecs.elements.element import Element
+
+class Execute(Element):
+    fields = {
+        'shell': None,
+        'command': None,
+    }
diff --git a/sfa/rspecs/elements/hardware_type.py b/sfa/rspecs/elements/hardware_type.py
new file mode 100644 (file)
index 0000000..8dd959c
--- /dev/null
@@ -0,0 +1,7 @@
+from sfa.rspecs.elements.element import Element
+
+class HardwareType(Element):
+    
+    fields = {
+        'name': None,
+    }        
diff --git a/sfa/rspecs/elements/install.py b/sfa/rspecs/elements/install.py
new file mode 100644 (file)
index 0000000..1df60b6
--- /dev/null
@@ -0,0 +1,8 @@
+from sfa.rspecs.elements.element import Element
+class Install(Element):
+    fields = {
+        'file_type': None,
+        'url': None,
+        'install_path': None,
+    }
index 2aadf4d..7617ade 100644 (file)
@@ -1,13 +1,12 @@
-class Interface(dict):
-    element = None
+from sfa.rspecs.elements.element import Element
+
+class Interface(Element):
     fields = {'component_id': None,
               'role': None,
               'client_id': None,
               'ipv4': None,
-    }    
-    def __init__(self, fields={}, element=None):
-        self.element = element
-        dict.__init__(self, Interface.fields)
-        self.update(fields)
-        
+              'bwlimit': None,
+              'node_id': None,
+              'interface_id': None
     
+    }    
index d916d22..02a8d10 100644 (file)
@@ -1,7 +1,6 @@
-from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.element import Element    
 
-class Link(dict):
-    element = None 
+class Link(Element):
     fields = {
         'client_id': None, 
         'component_id': None,
@@ -15,9 +14,3 @@ class Link(dict):
         'packet_loss': None,
         'description': None,
     }
-    
-    def __init__(self, fields={}, element=None):
-        self.element = element
-        dict.__init__(self, Link.fields)
-        self.update(fields)
-
diff --git a/sfa/rspecs/elements/link_type.py b/sfa/rspecs/elements/link_type.py
new file mode 100644 (file)
index 0000000..882903d
--- /dev/null
@@ -0,0 +1,6 @@
+from sfa.rspecs.elements.element import Element
+
+class LinkType(Element):
+    fields = {
+        'name': None,
+    } 
diff --git a/sfa/rspecs/elements/location.py b/sfa/rspecs/elements/location.py
new file mode 100644 (file)
index 0000000..a5a9260
--- /dev/null
@@ -0,0 +1,9 @@
+from sfa.rspecs.elements.element import Element
+
+class Location(Element):
+    
+    fields = {
+        'country': None,
+        'longitude': None,
+        'latitude': None,
+    }
diff --git a/sfa/rspecs/elements/login.py b/sfa/rspecs/elements/login.py
new file mode 100644 (file)
index 0000000..a64c759
--- /dev/null
@@ -0,0 +1,8 @@
+from sfa.rspecs.elements.element import Element
+
+class Login(Element):
+    fields = {
+        'authentication': None,
+        'hostname': None,
+        'port': None
+    }
index 6a358a4..362b9ff 100644 (file)
@@ -1,11 +1,9 @@
 from sfa.rspecs.elements.element import Element
-from sfa.util.sfalogging import logger
-
 class Network(Element):
-
-    def get_networks(*args, **kwds):
-        logger.info("sfa.rspecs.networks: get_networks not implemented")
-
-    def add_networks(*args, **kwds):
-        logger.info("sfa.rspecs.networks: add_network not implemented")
-        
+    
+    fields = {
+        'name': None,
+    }
+                
+      
index db6e119..f90fff1 100644 (file)
@@ -1,13 +1,26 @@
 from sfa.rspecs.elements.element import Element
-from sfa.util.faults import SfaNotImplemented 
-from sfa.util.sfalogging import logger
  
 class Node(Element):
-
-    def get_nodes(*args):
-        logger.info("sfa.rspecs.nodes: get_nodes not implemented") 
     
-    def add_nodes(*args):
-        logger.info("sfa.rspecs.nodes: add_nodes not implemented") 
+    fields = {
+        'component_id': None,
+        'component_name': None,
+        'component_manager_id': None,
+        'client_id': None,
+        'sliver_id': None,
+        'authority_id': None,    
+        'exclusive': None,
+        'location': None,
+        'bw_unallocated': None,
+        'bw_limit': None,
+        'boot_state': None,    
+        'slivers': [],
+        'hardware_types': [],
+        'disk_images': [],
+        'interfaces': [],
+        'services': [],
+        'tags': [],
+        'pl_initscripts': [],
+    }
                 
       
diff --git a/sfa/rspecs/elements/pl_tag.py b/sfa/rspecs/elements/pl_tag.py
new file mode 100644 (file)
index 0000000..51b1e76
--- /dev/null
@@ -0,0 +1,9 @@
+from sfa.rspecs.elements.element import Element
+
+class PLTag(Element):
+
+    fields = {
+        'name': None,
+        'value': None,
+    }
+        
diff --git a/sfa/rspecs/elements/property.py b/sfa/rspecs/elements/property.py
new file mode 100644 (file)
index 0000000..97a1ffc
--- /dev/null
@@ -0,0 +1,12 @@
+from sfa.rspecs.elements.element import Element
+
+class Property(Element):
+    
+    fields = {
+        'source_id': None,
+        'dest_id': None,
+        'capacity': None,
+        'latency': None,
+        'packet_loss': None,
+    }
+       
diff --git a/sfa/rspecs/elements/services.py b/sfa/rspecs/elements/services.py
new file mode 100644 (file)
index 0000000..a48be27
--- /dev/null
@@ -0,0 +1,10 @@
+from sfa.rspecs.elements.element import Element
+
+class Services(Element):
+
+    fields = {
+        'install': [],
+        'execute': [],
+        'login': [],
+    }
+
index 67105dc..bf2cc1f 100644 (file)
@@ -1,29 +1,9 @@
 from sfa.rspecs.elements.element import Element
-from sfa.util.sfalogging import logger
 
-class Slivers(Element):
-
-    def get_slivers(*args, **kwds):
-        logger.debug("sfa.rspecs.slivers: get_slivers not implemented")
-
-    def add_slivers(*args, **kwds):
-        logger.debug("sfa.rspecs.slivers: add_slivers not implemented")
-
-    def remove_slivers(*args, **kwds):
-        logger.debug("sfa.rspecs.slivers: remove_slivers not implemented")
-
-    def get_sliver_defaults(*args, **kwds):    
-        logger.debug("sfa.rspecs.slivers: get_sliver_defaults not implemented")
-    
-    def add_default_sliver_attribute(*args, **kwds):
-        logger.debug("sfa.rspecs.slivers: add_default_sliver_attributes not implemented")
-
-    def add_sliver_attribute(*args, **kwds):
-        logger.debug("sfa.rspecs.slivers: add_sliver_attribute not implemented")
-
-    def remove_default_sliver_attribute(*args, **kwds):
-        logger.debug("sfa.rspecs.slivers: remove_default_sliver_attributes not implemented")
-
-    def remove_sliver_attribute(*args, **kwds):
-        logger.debuv("sfa.rspecs.slivers: remove_sliver_attribute not implemented")
-        
+class Sliver(Element):
+    fields = {
+        'sliver_id': None,
+        'client_id': None,
+        'name': None,
+        'tags': [],
+    }
diff --git a/sfa/rspecs/elements/tag.py b/sfa/rspecs/elements/tag.py
new file mode 100644 (file)
index 0000000..8b13789
--- /dev/null
@@ -0,0 +1 @@
+
diff --git a/sfa/rspecs/elements/versions/element_version.py b/sfa/rspecs/elements/versions/element_version.py
new file mode 100644 (file)
index 0000000..e69de29
index aeef760..db28f6c 100644 (file)
@@ -3,16 +3,18 @@ from sfa.util.plxrn import PlXrn
 from sfa.util.xrn import Xrn
 from sfa.rspecs.elements.link import Link
 from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.link_type import LinkType
+from sfa.rspecs.elements.component_manager import ComponentManager
+from sfa.rspecs.elements.property import Property    
 from sfa.rspecs.rspec_elements import RSpecElement, RSpecElements
 
 class PGv2Link:
-
     elements = {
         'link': RSpecElement(RSpecElements.LINK, '//default:link | //link'),
         'component_manager': RSpecElement(RSpecElements.COMPONENT_MANAGER, './default:component_manager | ./component_manager'),
         'link_type': RSpecElement(RSpecElements.LINK_TYPE, './default:link_type | ./link_type'),
         'property': RSpecElement(RSpecElements.PROPERTY, './default:property | ./property'),
-        'interface_ref': RSpecElement(RSpecElements.INTERFACE_REF, './default:interface_ref | ./interface_ref') 
+        'interface_ref': RSpecElement(RSpecElements.INTERFACE_REF, './default:interface_ref | ./interface_ref'), 
     }
     
     @staticmethod
diff --git a/sfa/rspecs/elements/versions/pgv2Node.py b/sfa/rspecs/elements/versions/pgv2Node.py
new file mode 100644 (file)
index 0000000..4ddafc1
--- /dev/null
@@ -0,0 +1,146 @@
+
+from lxml import etree
+from sfa.util.plxrn import PlXrn
+from sfa.util.xrn import Xrn
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.network import Network
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.disk_image import DiskImage
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.bwlimit import BWlimit
+from sfa.rspecs.elements.pl_tag import PLTag
+from sfa.rspecs.rspec_elements import RSpecElement, RSpecElements
+from sfa.rspecs.elements.versions.pgv2Service import PGv2Service     
+
+class PGv2Node:
+    elements = {
+        'node': RSpecElement(RSpecElements.NODE, '//default:node | //node'),
+        'sliver': RSpecElement(RSpecElements.SLIVER, './default:sliver_type | ./sliver_type'),
+        'interface': RSpecElement(RSpecElements.INTERFACE, './default:interface | ./interface'),
+        'location': RSpecElement(RSpecElements.LOCATION, './default:location | ./location'),
+        'hardware_type': RSpecElement(RSpecElements.HARDWARE_TYPE, './default:hardware_type | ./hardware_type'),
+        'available': RSpecElement(RSpecElements.AVAILABLE, './default:available | ./available'),
+    } 
+    
+    @staticmethod
+    def add_nodes(xml, nodes):
+        node_elems = []
+        for node in nodes:
+            node_elem = etree.SubElement(xml, 'node')
+            node_elems.append(node_elem)
+            if node.get('component_manager_id'):
+                node_elem.set('component_manager_id', node['component_manager_id'])
+            if node.get('component_id'):
+                node_elem.set('component_id', node['component_id'])
+                component_name = Xrn(node['component_id']).get_leaf()
+                node_elem.set('component_nama', component_name)
+            if node.get('client_id'):
+                node_elem.set('client_id', node['client_id'])
+            if node.get('sliver_id'):
+                node_elem.set('sliver_id', node['sliver_id'])
+            if node.get('exclusive'):
+                node_elem.set('exclusive', node['exclusive'])
+            hardware_types = node.get('hardware_type', [])
+            for hardware_type in hardware_types:
+                hw_type_elem = etree.SubElement(node_elem, 'hardware_type')
+                if hardware_type.get('name'):
+                    hw_type_elem.set('name', hardware_type['name'])
+            if node.get('boot_state', '').lower() == 'boot':
+                available_elem = etree.SubElement(node_elem, 'available', now='True')
+            else:
+                available_elem = etree.SubElement(node_elem, 'available', now='False')
+            
+            if node.get('services'):
+                PGv2Services.add_services(node_elem, node.get('services'))
+    
+            slivers = node.get('slivers', [])
+            pl_initscripts = node.get('pl_initscripts', {})
+            for sliver in slivers:
+                sliver_elem = etree.SubElement(node_elem, 'sliver_type')
+                if sliver.get('name'):
+                    sliver_elem.set('name', sliver['name'])
+                if sliver.get('client_id'):
+                    sliver_elem.set('client_id', sliver['client_id'])      
+                for pl_initscript in pl_initscripts.values():
+                    etree.SubElement(sliver_elem, '{%s}initscript' % xml.namespaces['planetlab'], \
+                      name=pl_initscript['name'])
+            location = node.get('location')
+            #only add locaiton if long and lat are not null
+            if location.get('longitute') and location.get('latitude'):
+                location_elem = etree.SubElement(node_elem, country=location['country'],
+                  latitude=location['latitude'], longitude=location['longiutde'])
+        return node_elems
+
+    @staticmethod
+    def get_nodes(xml):
+        nodes = []
+        node_elems = xml.xpath(PGv2Node.elements['node'].path)
+        for node_elem in node_elems:
+            node = Node(node_elem.attrib, node_elem)
+            nodes.append(node) 
+            if 'component_id' in node_elem.attrib:
+                node['authority_id'] = Xrn(node_elem.attrib['component_id']).get_authority_urn()
+
+            # set hardware type
+            node['hardware_types'] = []
+            hardware_type_elems = node_elem.xpath(PGv2Node.elements['hardware_type'].path, xml.namespaces)
+            for hardware_type_elem in hardware_type_elems:
+                node['hardware_types'].append(HardwareType(hardware_type_elem.attrib, hardware_type_elem))
+            
+            # set location
+            location_elems = node_elem.xpath(PGv2Node.elements['location'].path, xml.namespaces)
+            if len(location_elems) > 0:
+                node['location'] = Location(location_elems[0].attrib, location_elems[0])
+            
+            # set services
+            services_elems = node_elem.xpath(PGv2Service.elements['services'].path, xml.namespaces)
+            node['services'] = []
+            for services_elem in services_elems:
+                # services element has no useful info, but the child elements do  
+                for child in services_elem.iterchildren():
+                    
+            # set interfaces
+            interface_elems = node_elem.xpath(PGv2Node.elements['interface'].path, xml.namespaces)
+            node['interfaces'] = []
+            for interface_elem in interface_elems:
+                node['interfaces'].append(Interface(interface_elem.attrib, interface_elem))
+
+            # set available
+            available = node_elem.xpath(PGv2Node.elements['available'].path, xml.namespaces)
+            if len(available) > 0:
+                if available[0].attrib.get('now', '').lower() == 'true': 
+                    node['boot_state'] = 'boot'
+                else: 
+                    node['boot_state'] = 'disabled' 
+
+            # set the slivers
+            sliver_elems = node_elem.xpath(PGv2Node.elements['sliver'].path, xml.namespaces)
+            node['slivers'] = []
+            for sliver_elem in sliver_elems:
+                node['slivers'].append(Sliver(sliver_elem.attrib, sliver_elem))            
+
+        return nodes
+
+
+    @staticmethod
+    def add_slivers(xml, slivers):
+        pass
+   
+    @staticmethod
+    def get_nodes_with_slivers(xml):
+        nodes = PGv2Node.get_nodes(xml)
+        nodes_with_slivers = [node for node in nodes if node['slivers']]
+        return nodes_with_slivers 
+
+if __name__ == '__main__':
+    from sfa.rspecs.rspec import RSpec
+    import pdb
+    r = RSpec('/tmp/emulab.rspec')
+    r2 = RSpec(version = 'ProtoGENI')
+    nodes = PGv2Node.get_nodes(r.xml)
+    PGv2Node.add_nodes(r2.xml.root, nodes)
+    #pdb.set_trace()
+        
+                                    
diff --git a/sfa/rspecs/elements/versions/pgv2Services.py b/sfa/rspecs/elements/versions/pgv2Services.py
new file mode 100644 (file)
index 0000000..470a2e6
--- /dev/null
@@ -0,0 +1,65 @@
+from lxml import etree
+from sfa.util.plxrn import PlXrn
+from sfa.util.xrn import Xrn
+from sfa.rspecs.elements.execute import Execute  
+from sfa.rspecs.elements.install import Install  
+from sfa.rspecs.elements.login import Login
+from sfa.rspecs.rspec_elements import RSpecElement, RSpecElements
+
+class PGv2Services:
+    elements = {
+        'services': RSpecElement(RSpecElements.SERVICES, '//default:services | //services'),
+        'install': RSpecElement(RspecElements.INSTALL, './default:install | ./install'),
+        'execute': RSpecElement(RspecElements.EXECUTE, './default:execute | ./execute'),
+        'login': RSpecElement(RspecElements.LOGIN, './default:login | ./login'),
+    }  
+    
+    @staticmethod
+    def add_services(xml, services):
+        for service in services:
+            service_elem = etree.SubElement(xml, 'service')
+            for install in service.get('install', []):
+                install_elem = etree.SubElement(service_elem, 'install')
+                for field in Install.fields:
+                    if field in install:
+                        install_elem.set(field, install[field])
+            for execute in service.get('execute', []):
+                execute_elem = etree.SubElement(service_elem, 'execute')
+                for field in Execute.fields:
+                    if field in execute:
+                        execute_elem.set(field, execute[field])
+            for login in service.get('login', []):
+                login_elem = etree.SubElement(service_elem, 'login')
+                for field in Login.fields:
+                    if field in login:
+                        login_elem.set(field, login[field]) 
+
+              
+    @staticmethod
+    def get_services(xml):
+        services = []
+        for services_elem in xml.xpath(PGv2Services.elements['services'].path):
+            service = Services(services_elem.attrib, services_elem)
+            
+            # get install elements
+            service['install'] = []
+            for install_elem in xml.xpath(PGv2Services.elements['install'].path):
+                install = Install(install_elem.attrib, install_elem)
+                service['install'].append(install)
+            
+            # get execute elements
+            service['execute'] = []
+            for execute_elem in xml.xpath(PGv2Services.elements['execute'].path):
+                execute = Execute(execute_elem.attrib, execute_elem)
+                service['execute'].append(execute)
+
+            # get login elements
+            service['login'] = []
+            for login_elem in xml.xpath(PGv2Services.elements['login'].path):
+                login = Login(login_elem.attrib, login_elem)
+                service['login'].append(login)             
+
+            services.append(service)  
+        return services
+
diff --git a/sfa/rspecs/elements/versions/pgv2SliverType.py b/sfa/rspecs/elements/versions/pgv2SliverType.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/sfa/rspecs/elements/versions/sfav1Network.py b/sfa/rspecs/elements/versions/sfav1Network.py
new file mode 100644 (file)
index 0000000..b529ad5
--- /dev/null
@@ -0,0 +1,32 @@
+
+
+from lxml import etree
+from sfa.util.plxrn import PlXrn
+from sfa.util.xrn import Xrn
+from sfa.rspecs.rspec_elements import RSpecElement, RSpecElements
+
+class SFAv1Network:
+    elements = {
+        'network': RSpecElement(RSpecElements.NETWORK, '//network'),
+    }
+
+    @staticmethod
+    def add_network(xml, network):
+        found = False
+        network_objs = SFAv1Network.get_networks(xml)
+        for network_obj in network_objs:
+            if network_obj['name'] == network['name']:
+                found = True
+                network_elem = network_obj.element
+        if not found:
+            network_elem = etree.SubElement(xml, 'network', name = network['name'])
+        return network_elem  
+    
+    @staticmethod
+    def get_networks(xml):
+        networks = []
+        network_elems = xml.xpath(SFAv1Network.elements['network'].path)
+        for network_elem in network_elems:
+            network = Network({'name': network_elem.attrib.get('name', None)}, network_elem)
+            networks.append(network)
+        return networks
diff --git a/sfa/rspecs/elements/versions/sfav1Node.py b/sfa/rspecs/elements/versions/sfav1Node.py
new file mode 100644 (file)
index 0000000..6cdf995
--- /dev/null
@@ -0,0 +1,142 @@
+
+from lxml import etree
+from sfa.util.plxrn import PlXrn
+from sfa.util.xrn import Xrn
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.network import Network 
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.disk_image import DiskImage
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.bwlimit import BWlimit
+from sfa.rspecs.elements.pl_tag import PLTag
+from sfa.rspecs.rspec_elements import RSpecElement, RSpecElements
+from sfa.rspecs.elements.versions.sfav1Network import SFAv1Network
+from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
+
+class SFAv1Node:
+
+    elements = {
+        'node': RSpecElement(RSpecElements.NODE, '//default:node | //node'),
+        'sliver': RSpecElement(RSpecElements.SLIVER, './default:sliver | ./sliver'),
+        'interface': RSpecElement(RSpecElements.INTERFACE, './default:interface | ./interface'),
+        'location': RSpecElement(RSpecElements.LOCATION, './default:location | ./location'),
+        'bw_limit': RSpecElement(RSpecElements.BWLIMIT, './default:bw_limit | ./bw_limit'),
+    }
+    
+    @staticmethod
+    def add_nodes(xml, nodes):
+        network_elems = SFAv1Network.get_networks(xml)
+        if len(network_elems) > 0:
+            network_elem = network_elems[0]
+        elif len(nodes) > 0 and nodes[0].get('component_manager_id'):
+            network_elem = SFAv1Network.add_network(xml.root, {'name': nodes[0]['component_manager_id']})
+            
+
+        node_elems = []       
+        for node in nodes:
+            node_elem = etree.SubElement(network_elem, 'node')
+            node_elems.append(node_elem)
+            network = None 
+            if 'component_manager_id' in node and node['component_manager_id']:
+                node_elem.set('component_manager_id', node['component_manager_id'])
+                network = Xrn(node['component_manager_id']).get_hrn()
+            if 'component_id' in node and node['component_id']:
+                node_elem.set('component_id', node['component_id'])
+                xrn = Xrn(node['component_id'])
+                node_elem.set('component_name', xrn.get_leaf())
+                hostname_tag = etree.SubElement(node_elem, 'hostname').text = xrn.get_leaf()
+            if 'authority_id' in node and node['authority_id']:
+                node_elem.set('site_id', node['authority_id'])
+            if 'boot_state' in node and node['boot_state']:
+                node_elem.set('boot_state', node['boot_state'])
+            if 'location' in node and node['location']:
+                location_elem = etree.SubElement(node_elem, 'location')
+                for field in Location.fields:
+                    if field in node['location'] and node['location'][field]:
+                        location_elem.set(field, node['location'][field])
+            if 'interfaces' in node and node['interfaces']:
+                i = 0
+                for interface in node['interfaces']:
+                    if 'bwlimit' in interface and interface['bwlimit']:
+                        bwlimit = etree.SubElement(node_elem, 'bw_limit', units='kbps').text = str(interface['bwlimit']/1000)
+                    comp_id = PlXrn(auth=network, interface='node%s:eth%s' % (interface['node_id'], i)).get_urn()
+                    ipaddr = interface['ipv4']
+                    interface_elem = etree.SubElement(node_elem, 'interface', component_id=comp_id, ipv4=ipaddr)
+                    i+=1
+            if 'bw_unallocated' in node and node['bw_unallocated']:
+                bw_unallocated = etree.SubElement(node_elem, 'bw_unallocated', units='kbps').text = str(int(node['bw_unallocated'])/1000)
+
+            if node.get('services'):
+                PGv2Services.add_services(node_elem, node.get('services'))
+
+            if 'tags' in node:
+                for tag in node['tags']:
+                   # expose this hard wired list of tags, plus the ones that are marked 'sfa' in their category
+                   if tag['name'] in ['fcdistro', 'arch']:
+                        tag_element = etree.SubElement(node_elem, tag['name']).text=tag['value']
+
+            if node.get('slivers'):
+                for sliver in node['slivers']:
+                    sliver_elem = etree.SubElement(node_elem, 'sliver')
+                    if sliver.get('sliver_id'): 
+                        sliver_id_leaf = Xrn(sliver.get('sliver_id')).get_leaf()
+                        sliver_id_parts = sliver_id_leaf.split(':')
+                        name = sliver_id_parts[0] 
+                        sliver_elem.set('name', name) 
+
+    @staticmethod 
+    def add_slivers(xml, slivers):
+        pass
+
+    @staticmethod
+    def get_nodes(xml):
+        nodes = []
+        node_elems = xml.xpath(SFAv1Node.elements['node'].path)
+        for node_elem in node_elems:
+            node = Node(node_elem.attrib, node_elem)
+            if 'site_id' in node_elem.attrib:
+                node['authority_id'] = node_elem.attrib['site_id']
+            if 'authority_id' in node_elem.attrib:
+                node['authority_id'] = node_elem.attrib['authority_id']
+            # set the location
+            location_elems = node_elem.xpath(SFAv1Node.elements['location'].path, xml.namespaces)
+            if len(location_elems) > 0:
+                node['location'] = Location(location_elems[0].attrib, location_elems[0])
+            
+            # set the bwlimit
+            bwlimit_elems = node_elem.xpath(SFAv1Node.elements['bw_limit'].path, xml.namespaces)
+            if len(bwlimit_elems) > 0:
+                bwlimit = BWlimit(bwlimit_elems[0].attrib, bwlimit_elems[0])
+                node['bwlimit'] = bwlimit
+
+            # set the interfaces
+            interface_elems = node_elem.xpath(SFAv1Node.elements['interface'].path, xml.namespaces)
+            node['interfaces'] = []
+            for interface_elem in interface_elems:
+                node['interfaces'].append(Interface(interface_elem.attrib, interface_elem))
+            
+            # set the slivers
+            sliver_elems = node_elem.xpath(SFAv1Node.elements['sliver'].path, xml.namespaces)
+            node['slivers'] = []
+            for sliver_elem in sliver_elems:
+                node['slivers'].append(Sliver(sliver_elem.attrib, sliver_elem))
+            
+            # set tags
+            node['tags'] = [] 
+            for child in node_elem.iterchildren():
+                if child.tag not in SFAv1Node.elements:
+                    tag = PLTag({'name': child.tag, 'value': child.text}, child)  
+                    node['tags'].append(tag) 
+            nodes.append(node)
+        return nodes
+        
+    @staticmethod
+    def get_nodes_with_slivers(xml):
+        nodes = SFAv1Node.get_nodes(xml)
+        nodes_with_slivers = [node for node in nodes if node['slivers']]
+        return nodes_with_slivers
+    
+             
diff --git a/sfa/rspecs/elements/versions/sfav1Sliver.py b/sfa/rspecs/elements/versions/sfav1Sliver.py
new file mode 100644 (file)
index 0000000..f12c977
--- /dev/null
@@ -0,0 +1,18 @@
+
+from lxml import etree
+
+from sfa.rspecs.elements.sliver import Sliver
+
+from sfa.util.xrn import Xrn
+from sfa.util.plxrn import PlXrn
+class SFAv1Sliver:
+
+    @staticmethod
+    def add_slivers(xml, slivers):
+        for sliver in slivers:
+            sliver_elem = etree.SubElement(xml, 'sliver')
+            if sliver.get('component_id'):
+                name_full = Xrn(sliver.get('component_id')).get_leaf()
+                name = name_full.split(':')
+                sliver_elem.set('name', name)
+                     
index 3226f58..ce3cac7 100644 (file)
@@ -1,18 +1,26 @@
 from sfa.util.enumeration import Enum
 
 # recognized top level rspec elements
-RSpecElements = Enum(NETWORK='NETWORK', 
-                     COMPONENT_MANAGER='COMPONENT_MANAGER', 
-                     SLIVER='SLIVER', 
-                     SLIVER_TYPE='SLIVER_TYPE', 
-                     NODE='NODE', 
-                     INTERFACE='INTERFACE', 
-                     INTERFACE_REF='INTERFACE_REF', 
-                     LINK='LINK', 
-                     LINK_TYPE='LINK_TYPE', 
-                     SERVICE='SERVICE',
-                     PROPERTY='PROPERTY'
-                )
+RSpecElements = Enum(
+    AVAILABLE='AVAILABLE',
+    BWLIMIT='BWLIMIT',
+    EXECUTE='EXECUTE',
+    NETWORK='NETWORK', 
+    COMPONENT_MANAGER='COMPONENT_MANAGER',
+    HARDWARE_TYPE='HARDWARE_TYPE', 
+    INSTALL='INSTALL', 
+    INTERFACE='INTERFACE', 
+    INTERFACE_REF='INTERFACE_REF',
+    LOCATION='LOCATION', 
+    LOGIN='LOGIN', 
+    LINK='LINK', 
+    LINK_TYPE='LINK_TYPE', 
+    NODE='NODE', 
+    PROPERTY='PROPERTY',
+    SERVICES='SERVICES',
+    SLIVER='SLIVER', 
+    SLIVER_TYPE='SLIVER_TYPE', 
+)
 
 class RSpecElement:
     def __init__(self, element_type, path):
index e7340e1..90fcaf4 100644 (file)
@@ -3,6 +3,7 @@ from sfa.util.xrn import hrn_to_urn
 from sfa.server.interface import Interfaces, Interface
 from sfa.util.config import Config     
 
+# this truly is a server-side object
 class Aggregate(SfaServer):
 
     ##
@@ -15,9 +16,10 @@ class Aggregate(SfaServer):
     def __init__(self, ip, port, key_file, cert_file):
         SfaServer.__init__(self, ip, port, key_file, cert_file,'aggregate')
 
-##
+#
 # Aggregates is a dictionary of aggregate connections keyed on the aggregate hrn
-
+# as such it's more of a client-side thing for aggregate servers to reach their peers
+#
 class Aggregates(Interfaces):
 
     default_dict = {'aggregates': {'aggregate': [Interfaces.default_fields]}}
index 3958c5f..9baa6c2 100644 (file)
@@ -17,7 +17,6 @@ except ImportError:
 ##
 # Component is a SfaServer that serves component operations.
 
-# set SFA_GENERIC_FLAVOUR=plcm to get a PlcComponentApi instance in the request handler 
 class Component(SfaServer):
     ##
     # Create a new registry object.
@@ -28,5 +27,4 @@ class Component(SfaServer):
     # @param cert_file certificate filename containing public key (could be a GID file)
 
     def __init__(self, ip, port, key_file, cert_file):
-        SfaServer.__init__(self, ip, port, key_file, cert_file)
-        self.server.interface = 'component'
+        SfaServer.__init__(self, ip, port, key_file, cert_file, interface='component')
index 7d2beef..3866a45 100644 (file)
@@ -1,6 +1,6 @@
 #from sfa.util.faults import *
-from sfa.util.storage import XmlStorage
-import sfa.util.xmlrpcprotocol as xmlrpcprotocol
+import sfa.client.xmlrpcprotocol as xmlrpcprotocol
+from sfa.util.xml import XML
 
 # GeniLight client support is optional
 try:
@@ -25,13 +25,13 @@ class Interface:
         url =  "http://%s" %  "/".join(address_parts)
         return url
 
-    def get_server(self, key_file, cert_file, timeout=30):
+    def server_proxy(self, key_file, cert_file, timeout=30):
         server = None 
         if  self.client_type ==  'geniclientlight' and GeniClientLight:
             # xxx url and self.api are undefined
             server = GeniClientLight(url, self.api.key_file, self.api.cert_file)
         else:
-            server = xmlrpcprotocol.get_server(self.get_url(), key_file, cert_file, timeout) 
+            server = xmlrpcprotocol.server_proxy(self.get_url(), key_file, cert_file, timeout) 
  
         return server       
 ##
@@ -57,25 +57,20 @@ class Interfaces(dict):
     def __init__(self, conf_file):
         dict.__init__(self, {})
         # load config file
-        self.interface_info = XmlStorage(conf_file, self.default_dict)
-        self.interface_info.load()
-        records = self.interface_info.values()[0]
-        if not isinstance(records, list):
-            records = [records]
-        
-        required_fields = self.default_fields.keys()
-        for record in records:
-            if not record or not set(required_fields).issubset(record.keys()):
-                continue
-            # port is appended onto the domain, before the path. Should look like:
-            # http://domain:port/path
-            hrn, address, port = record['hrn'], record['addr'], record['port']
-            # sometime this is called at a very early stage with no config loaded
-            # avoid to remember this instance in such a case
-            if not address or not port:
-                continue
-            interface = Interface(hrn, address, port) 
-            self[hrn] = interface
+        required_fields = set(self.default_fields.keys())
+        self.interface_info = XML(conf_file).todict()
+        for value in self.interface_info.values():
+            if isinstance(value, list):
+                for record in value:
+                    if isinstance(record, dict) and \
+                      required_fields.issubset(record.keys()):
+                        hrn, address, port = record['hrn'], record['addr'], record['port']
+                        # sometime this is called at a very early stage with no config loaded
+                        # avoid to remember this instance in such a case
+                        if not address or not port:
+                            continue     
+                        interface = Interface(hrn, address, port)
+                        self[hrn] = interface   
 
-    def get_server(self, hrn, key_file, cert_file, timeout=30):
-        return self[hrn].get_server(key_file, cert_file, timeout)
+    def server_proxy(self, hrn, key_file, cert_file, timeout=30):
+        return self[hrn].server_proxy(key_file, cert_file, timeout)
index 8638ce9..515b083 100755 (executable)
@@ -12,10 +12,10 @@ import traceback
 import xmlrpclib
 from mod_python import apache
 
-from sfa.plc.plcsfaapi import PlcSfaApi
 from sfa.util.sfalogging import logger
+from sfa.plc.server import SfaApi
 
-api = PlcSfaApi(interface='aggregate')
+api = SfaApi(interface='aggregate')
 
 def handler(req):
     try:
index 115fcba..1be5480 100755 (executable)
@@ -12,10 +12,10 @@ import traceback
 import xmlrpclib
 from mod_python import apache
 
-from sfa.plc.plcsfaapi import PlcSfaApi
 from sfa.util.sfalogging import logger
+from sfa.plc.server import SfaApi
 
-api = PlcSfaApi(interface='registry')
+api = SfaApi(interface='registry')
 
 def handler(req):
     try:
index 3de4519..cd655a7 100755 (executable)
@@ -12,10 +12,10 @@ import traceback
 import xmlrpclib
 from mod_python import apache
 
-from sfa.plc.plcsfaapi import PlcSfaApi
 from sfa.util.sfalogging import logger
+from sfa.plc.server import SfaApi
 
-api = PlcSfaApi(interface='slicemgr')
+api = SfaApi(interface='slicemgr')
 
 def handler(req):
     try:
index 2a37c22..bdad7df 100644 (file)
@@ -5,8 +5,10 @@ from sfa.server.sfaserver import SfaServer
 from sfa.server.interface import Interfaces, Interface
 from sfa.util.config import Config 
 
-##
+#
 # Registry is a SfaServer that serves registry and slice operations at PLC.
+# this truly is a server-side object
+#
 class Registry(SfaServer):
     ##
     # Create a new registry object.
@@ -19,10 +21,10 @@ class Registry(SfaServer):
     def __init__(self, ip, port, key_file, cert_file):
         SfaServer.__init__(self, ip, port, key_file, cert_file,'registry')
 
-##
-# Registries is a dictionary of registry connections keyed on the registry
-# hrn
-
+#
+# Registries is a dictionary of registry connections keyed on the registry hrn
+# as such it's more of a client-side thing for registry servers to reach their peers
+#
 class Registries(Interfaces):
     
     default_dict = {'registries': {'registry': [Interfaces.default_fields]}}
index 93fef14..0d99e98 100644 (file)
@@ -3,15 +3,18 @@
 import sys
 import os
 import traceback
+import socket
+
+import sfa.client.xmlrpcprotocol as xmlrpcprotocol 
 from sfa.util.table import SfaTable
 from sfa.util.prefixTree import prefixTree
-from sfa.plc.plcsfaapi import PlcSfaApi
 from sfa.util.config import Config
+
+from sfa.generic import Generic
+
 from sfa.trust.certificate import Keypair
 from sfa.trust.hierarchy import Hierarchy
 from sfa.server.registry import Registries
-import sfa.util.xmlrpcprotocol as xmlrpcprotocol 
-import socket
 
 def main():
     config = Config()
@@ -30,8 +33,8 @@ def main():
     # and a valid credential
     authority = config.SFA_INTERFACE_HRN
     url = 'http://%s:%s/' %(config.SFA_REGISTRY_HOST, config.SFA_REGISTRY_PORT)
-    registry = xmlrpcprotocol.get_server(url, key_file, cert_file)
-    sfa_api = PlcSfaApi(key_file = key_file, cert_file = cert_file, interface='registry')
+    registry = xmlrpcprotocol.server_proxy(url, key_file, cert_file)
+    sfa_api = Generic.the_flavour()
     credential = sfa_api.getCredential()
 
     # get peer registries
index 966a13e..d4a3131 100755 (executable)
@@ -34,19 +34,21 @@ component_port=12346
 import os, os.path
 import traceback
 import sys
-import sfa.util.xmlrpcprotocol as xmlrpcprotocol
 from optparse import OptionParser
 
 from sfa.util.sfalogging import logger
+from sfa.util.xrn import get_authority, hrn_to_urn
+from sfa.util.config import Config
+import sfa.client.xmlrpcprotocol as xmlrpcprotocol
+
 from sfa.trust.certificate import Keypair, Certificate
 from sfa.trust.hierarchy import Hierarchy
 from sfa.trust.gid import GID
-from sfa.util.config import Config
-from sfa.plc.plcsfaapi import PlcSfaApi
+
+from sfa.server.sfaapi import SfaApi
+
 from sfa.server.registry import Registries
 from sfa.server.aggregate import Aggregates
-from sfa.util.xrn import get_authority, hrn_to_urn
-from sfa.util.sfalogging import logger
 
 # after http://www.erlenstar.demon.co.uk/unix/faq_2.html
 def daemon():
@@ -143,7 +145,7 @@ def install_peer_certs(server_key_file, server_cert_file):
     # There should be a gid file in /etc/sfa/trusted_roots for every
     # peer registry found in in the registries.xml config file. If there
     # are any missing gids, request a new one from the peer registry.
-    api = PlcSfaApi(key_file = server_key_file, cert_file = server_cert_file)
+    api = SfaApi(key_file = server_key_file, cert_file = server_cert_file)
     registries = Registries()
     aggregates = Aggregates()
     interfaces = dict(registries.items() + aggregates.items())
@@ -164,7 +166,7 @@ def install_peer_certs(server_key_file, server_cert_file):
         try:
             # get gid from the registry
             url = interfaces[new_hrn].get_url()
-            interface = interfaces[new_hrn].get_server(server_key_file, server_cert_file, timeout=30)
+            interface = interfaces[new_hrn].server_proxy(server_key_file, server_cert_file, timeout=30)
             # skip non sfa aggregates
             server_version = api.get_cached_server_version(interface)
             if 'sfa' not in server_version:
index 954ff6d..16f358e 100755 (executable)
@@ -6,7 +6,7 @@ from optparse import OptionParser
 
 from sfa.util.faults import ConnectionKeyGIDMismatch
 from sfa.util.config import Config
-import sfa.util.xmlrpcprotocol as xmlrpcprotocol
+import sfa.client.xmlrpcprotocol as xmlrpcprotocol
 from sfa.util.plxrn import hrn_to_pl_slicename, slicename_to_hrn
 
 from sfa.trust.certificate import Keypair, Certificate
@@ -28,7 +28,7 @@ def handle_gid_mismatch_exception(f):
 
     return wrapper
 
-def get_server(url=None, port=None, keyfile=None, certfile=None,verbose=False):
+def server_proxy(url=None, port=None, keyfile=None, certfile=None,verbose=False):
     """
     returns an xmlrpc connection to the service a the specified 
     address
@@ -48,7 +48,7 @@ def get_server(url=None, port=None, keyfile=None, certfile=None,verbose=False):
     if verbose:
         print "Contacting registry at: %(url)s" % locals()
 
-    server = xmlrpcprotocol.get_server(url, keyfile, certfile)
+    server = xmlrpcprotocol.server_proxy(url, keyfile, certfile)
     return server    
     
 
@@ -97,7 +97,7 @@ def get_node_key(registry=None, verbose=False):
     cert.sign()
     cert.save_to_file(certfile)
     
-    registry = get_server(url = registry, keyfile=keyfile, certfile=certfile)    
+    registry = server_proxy(url = registry, keyfile=keyfile, certfile=certfile)    
     registry.get_key()
 
 def create_server_keypair(keyfile=None, certfile=None, hrn="component", verbose=False):
@@ -145,7 +145,7 @@ def get_credential(registry=None, force=False, verbose=False):
         create_server_keypair(keyfile, certfile, hrn, verbose)
 
         # get credential from registry 
-        registry = get_server(url=registry, keyfile=keyfile, certfile=certfile)
+        registry = server_proxy(url=registry, keyfile=keyfile, certfile=certfile)
         cert = Certificate(filename=certfile)
         cert_str = cert.save_to_string(save_parents=True)
         cred = registry.GetSelfCredential(cert_str, 'node', hrn)
@@ -172,7 +172,7 @@ def get_trusted_certs(registry=None, verbose=False):
     cred = get_credential(registry=registry, verbose=verbose)
     # make sure server key cert pair exists
     create_server_keypair(keyfile=keyfile, certfile=certfile, hrn=hrn, verbose=verbose)
-    registry = get_server(url=registry, keyfile=keyfile, certfile=certfile)
+    registry = server_proxy(url=registry, keyfile=keyfile, certfile=certfile)
     # get the trusted certs and save them in the right place
     if verbose:
         print "Getting trusted certs from registry"
@@ -217,14 +217,15 @@ def get_gids(registry=None, verbose=False):
     cred = get_credential(registry=registry, verbose=verbose)
     # make sure server key cert pair exists
     create_server_keypair(keyfile=keyfile, certfile=certfile, hrn=hrn, verbose=verbose)
-    registry = get_server(url=registry, keyfile=keyfile, certfile=certfile)
+    registry = server_proxy(url=registry, keyfile=keyfile, certfile=certfile)
             
     if verbose:
         print "Getting current slices on this node"
     # get a list of slices on this node
-    from sfa.plc.plcsfaapi import PlcComponentApi
-    api = PlcComponentApi()
-    xids_tuple = api.nodemanager.GetXIDs()
+    from sfa.generic import Generic
+    generic=Generic.the_flavour()
+    api = generic.make_api(interface='component')
+    xids_tuple = api.driver.nodemanager.GetXIDs()
     slices = eval(xids_tuple[1])
     slicenames = slices.keys()
 
index 242979c..7085037 100644 (file)
@@ -1,4 +1,4 @@
-import os.path
+import os, os.path
 import datetime
 
 from sfa.util.faults import SfaAPIError
@@ -9,10 +9,10 @@ from sfa.trust.certificate import Keypair, Certificate
 from sfa.trust.credential import Credential
 from sfa.trust.rights import determine_rights
 
-# this is wrong all right, but temporary, will use generic
 from sfa.server.xmlrpcapi import XmlrpcApi
-import os
-import datetime
+
+# thgen xxx fixme this is wrong all right, but temporary, will use generic
+from sfa.util.table import SfaTable
 
 ####################
 class SfaApi (XmlrpcApi): 
@@ -20,12 +20,18 @@ class SfaApi (XmlrpcApi):
     """
     An SfaApi instance is a basic xmlrcp service
     augmented with the local cryptographic material and hrn
-    It also has the notion of neighbour sfa services 
-    as defined in /etc/sfa/{aggregates,registries}.xml
+
+    It also has the notion of its own interface (a string describing
+    whether we run a registry, aggregate or slicemgr) and has 
+    the notion of neighbour sfa services as defined 
+    in /etc/sfa/{aggregates,registries}.xml
+
     Finally it contains a cache instance
+
     It gets augmented by the generic layer with 
     (*) an instance of manager (actually a manager module for now)
     (*) which in turn holds an instance of a testbed driver
+    For convenience api.manager.driver == api.driver
     """
 
     def __init__ (self, encoding="utf-8", methods='sfa.methods', 
@@ -63,11 +69,7 @@ class SfaApi (XmlrpcApi):
         # filled later on by generic/Generic
         self.manager=None
 
-    # tmp
-    def get_interface_manager(self, manager_base = 'sfa.managers'):
-        return self.manager
-
-    def get_server(self, interface, cred, timeout=30):
+    def server_proxy(self, interface, cred, timeout=30):
         """
         Returns a connection to the specified interface. Use the specified
         credential to determine the caller and look for the caller's key/cert 
@@ -83,7 +85,7 @@ class SfaApi (XmlrpcApi):
         auth_info = hierarchy.get_auth_info(caller_gid.get_hrn())
         key_file = auth_info.get_privkey_filename()
         cert_file = auth_info.get_gid_filename()
-        server = interface.get_server(key_file, cert_file, timeout)
+        server = interface.server_proxy(key_file, cert_file, timeout)
         return server
                
         
@@ -136,7 +138,7 @@ class SfaApi (XmlrpcApi):
         """
         from sfa.server.registry import Registries
         registries = Registries()
-        registry = registries.get_server(self.hrn, self.key_file, self.cert_file)
+        registry = registries.server_proxy(self.hrn, self.key_file, self.cert_file)
         cert_string=self.cert.save_to_string(save_parents=True)
         # get self credential
         self_cred = registry.GetSelfCredential(cert_string, self.hrn, 'authority')
@@ -156,7 +158,9 @@ class SfaApi (XmlrpcApi):
         if not auth_hrn or hrn == self.config.SFA_INTERFACE_HRN:
             auth_hrn = hrn
         auth_info = self.auth.get_auth_info(auth_hrn)
-        table = self.SfaTable()
+        # xxx thgen fixme - use SfaTable hardwired for now 
+        #table = self.SfaTable()
+        table = SfaTable()
         records = table.findObjects({'hrn': hrn, 'type': 'authority+sa'})
         if not records:
             raise RecordNotFound
index cf2cca9..1466916 100644 (file)
@@ -76,7 +76,7 @@ class Config:
                 except: pass
              
         except IOError, e:
-            raise IOError, "Could not find the configuration file: %s" % config_file
+            raise IOError, "Could not find or load the configuration file: %s" % config_file
 
     def get_trustedroots_dir(self):
         return self.config_path + os.sep + 'trusted_roots'
diff --git a/sfa/util/defaultdict.py b/sfa/util/defaultdict.py
new file mode 100644 (file)
index 0000000..e0dd145
--- /dev/null
@@ -0,0 +1,38 @@
+# from http://code.activestate.com/recipes/523034/
+try:
+    from collections import defaultdict
+except:
+    class defaultdict(dict):
+        def __init__(self, default_factory=None, *a, **kw):
+            if (default_factory is not None and
+                not hasattr(default_factory, '__call__')):
+                raise TypeError('first argument must be callable')
+            dict.__init__(self, *a, **kw)
+            self.default_factory = default_factory
+        def __getitem__(self, key):
+            try:
+                return dict.__getitem__(self, key)
+            except KeyError:
+                return self.__missing__(key)
+        def __missing__(self, key):
+            if self.default_factory is None:
+                raise KeyError(key)
+            self[key] = value = self.default_factory()
+            return value
+        def __reduce__(self):
+            if self.default_factory is None:
+                args = tuple()
+            else:
+                args = self.default_factory,
+            return type(self), args, None, None, self.items()
+        def copy(self):
+            return self.__copy__()
+        def __copy__(self):
+            return type(self)(self.default_factory, self)
+        def __deepcopy__(self, memo):
+            import copy
+            return type(self)(self.default_factory,
+                              copy.deepcopy(self.items()))
+        def __repr__(self):
+            return 'defaultdict(%s, %s)' % (self.default_factory,
+                                            dict.__repr__(self))
index 424f419..7ebf379 100644 (file)
@@ -319,7 +319,7 @@ class SfaRecord(Row):
         #dict = xmlrpclib.loads(str)[0][0]
 
         record = XML(str)
-        self.load_from_dict(record.todict2())
+        self.load_from_dict(record.todict())
 
     ##
     # Dump the record to stdout
diff --git a/sfa/util/rspecHelper.py b/sfa/util/rspecHelper.py
deleted file mode 100755 (executable)
index deaa746..0000000
+++ /dev/null
@@ -1,418 +0,0 @@
-#! /usr/bin/env python
-
-import sys
-
-from copy import deepcopy
-from lxml import etree
-from StringIO import StringIO
-from optparse import OptionParser
-
-from sfa.util.faults import InvalidRSpec
-from sfa.util.sfalogging import logger
-
-def merge_rspecs(rspecs):
-    """
-    Merge merge a list of RSpecs into 1 RSpec, and return the result.
-    rspecs must be a valid RSpec string or list of RSpec strings.
-    """
-    if not rspecs or not isinstance(rspecs, list):
-        return rspecs
-
-    # ugly hack to avoid sending the same info twice, when the call graph has dags
-    known_networks={}
-    def register_network (network):
-        try:
-            known_networks[network.get('name')]=True
-        except:
-            logger.error("merge_rspecs: cannot register network with no name in rspec")
-            pass
-    def is_registered_network (network):
-        try:
-            return network.get('name') in known_networks
-        except:
-            logger.error("merge_rspecs: cannot retrieve network with no name in rspec")
-            return False
-
-    # the resulting tree
-    rspec = None
-    for input_rspec in rspecs:
-        # ignore empty strings as returned with used call_ids
-        if not input_rspec: continue
-        try:
-            tree = etree.parse(StringIO(input_rspec))
-        except etree.XMLSyntaxError:
-            # consider failing silently here
-            logger.log_exc("merge_rspecs, parse error")
-            message = str(sys.exc_info()[1]) + ' with ' + input_rspec
-            raise InvalidRSpec(message)
-
-        root = tree.getroot()
-        if not root.get("type") in ["SFA"]:
-            logger.error("merge_rspecs: unexpected type for rspec root, %s"%root.get('type'))
-            continue
-        if rspec == None:
-            # we scan the first input, register all networks
-            # in addition we remove duplicates - needed until everyone runs 1.0-10
-            rspec = root
-            for network in root.iterfind("./network"):
-                if not is_registered_network(network):
-                    register_network(network)
-                else:
-                    # duplicate in the first input - trash it
-                    root.remove(network)
-        else:
-            for network in root.iterfind("./network"):
-                if not is_registered_network(network):
-                    rspec.append(deepcopy(network))
-                    register_network(network)
-            for request in root.iterfind("./request"):
-                rspec.append(deepcopy(request))
-    return etree.tostring(rspec, xml_declaration=True, pretty_print=True)
-
-class RSpec:
-    def __init__(self, xml):
-        parser = etree.XMLParser(remove_blank_text=True)
-        tree = etree.parse(StringIO(xml), parser)
-        self.rspec = tree.getroot()
-
-        # If there is only one network in the rspec, make it the default
-        self.network = None
-        networks = self.get_network_list()
-        if len(networks) == 1:
-            self.network = networks[0]
-
-    # Thierry : need this to locate hostname even if several networks
-    def get_node_element(self, hostname, network=None):
-        if network == None and self.network:
-            network = self.network
-        if network != None:
-            names = self.rspec.iterfind("./network[@name='%s']/site/node/hostname" % network)
-        else:
-            names = self.rspec.iterfind("./network/site/node/hostname")
-        for name in names:
-            if name.text == hostname:
-                return name.getparent()
-        return None
-        
-    # Thierry : need this to return all nodes in all networks
-    def get_node_list(self, network=None):
-        if network == None and self.network:
-            network = self.network
-        if network != None:
-            return self.rspec.xpath("./network[@name='%s']/site/node/hostname/text()" % network)
-        else:
-            return self.rspec.xpath("./network/site/node/hostname/text()")
-
-    def get_network_list(self):
-        return self.rspec.xpath("./network[@name]/@name")
-
-    def get_sliver_list(self, network=None):
-        if network == None:
-            network = self.network
-        result = self.rspec.xpath("./network[@name='%s']/site/node[sliver]/hostname/text()" % network)
-        return result
-
-    def get_available_node_list(self, network=None):
-        if network == None:
-            network = self.network
-        result = self.rspec.xpath("./network[@name='%s']/site/node[not(sliver)]/hostname/text()" % network)
-        return result
-
-    def add_sliver(self, hostname, network=None):
-        if network == None:
-            network = self.network
-        node = self.get_node_element(hostname, network)
-        etree.SubElement(node, "sliver")
-
-    def remove_sliver(self, hostname, network=None):
-        if network == None:
-            network = self.network
-        node = self.get_node_element(hostname, network)
-        node.remove(node.find("sliver"))
-
-    def attributes_list(self, elem):
-        opts = []
-        if elem is not None:
-            for e in elem:
-                opts.append((e.tag, e.text))
-        return opts
-
-    def get_default_sliver_attributes(self, network=None):
-        if network == None:
-            network = self.network
-        defaults = self.rspec.find("./network[@name='%s']/sliver_defaults" % network)
-        return self.attributes_list(defaults)
-
-    def get_sliver_attributes(self, hostname, network=None):
-        if network == None:
-            network = self.network
-        node = self.get_node_element(hostname, network)
-        sliver = node.find("sliver")
-        return self.attributes_list(sliver)
-
-    def add_attribute(self, elem, name, value):
-        opt = etree.SubElement(elem, name)
-        opt.text = value
-
-    def add_default_sliver_attribute(self, name, value, network=None):
-        if network == None:
-            network = self.network
-        defaults = self.rspec.find("./network[@name='%s']/sliver_defaults" % network)
-        if defaults is None:
-            defaults = etree.Element("sliver_defaults")
-            network = self.rspec.find("./network[@name='%s']" % network)
-            network.insert(0, defaults)
-        self.add_attribute(defaults, name, value)
-
-    def add_sliver_attribute(self, hostname, name, value, network=None):
-        if network == None:
-            network = self.network
-        node = self.get_node_element(hostname, network)
-        sliver = node.find("sliver")
-        self.add_attribute(sliver, name, value)
-
-    def remove_attribute(self, elem, name, value):
-        if elem is not None:
-            opts = elem.iterfind(name)
-            if opts is not None:
-                for opt in opts:
-                    if opt.text == value:
-                        elem.remove(opt)
-
-    def remove_default_sliver_attribute(self, name, value, network=None):
-        if network == None:
-            network = self.network
-        defaults = self.rspec.find("./network[@name='%s']/sliver_defaults" % network)
-        self.remove_attribute(defaults, name, value)
-
-    def remove_sliver_attribute(self, hostname, name, value, network=None):
-        if network == None:
-            network = self.network
-        node = self.get_node_element(hostname, network)
-        sliver = node.find("sliver")
-        self.remove_attribute(sliver, name, value)
-
-    def get_site_nodes(self, siteid, network=None):
-        if network == None:
-            network = self.network
-        query = './network[@name="%s"]/site[@id="%s"]/node/hostname/text()' % (network, siteid)
-        result = self.rspec.xpath(query)
-        return result
-        
-    def get_link_list(self, network=None):
-        if network == None:
-            network = self.network
-        linklist = []
-        links = self.rspec.iterfind("./network[@name='%s']/link" % network)
-        for link in links:
-            (end1, end2) = link.get("endpoints").split()
-            name = link.find("description")
-            linklist.append((name.text, 
-                             self.get_site_nodes(end1, network), 
-                             self.get_site_nodes(end2, network)))
-        return linklist
-
-    def get_vlink_list(self, network=None):
-        if network == None:
-            network = self.network
-        vlinklist = []
-        vlinks = self.rspec.iterfind("./network[@name='%s']//vlink" % network)
-        for vlink in vlinks:
-            endpoints = vlink.get("endpoints")
-            (end1, end2) = endpoints.split()
-            query = './network[@name="%s"]//node[@id="%s"]/hostname/text()' % network
-            node1 = self.rspec.xpath(query % end1)[0]
-            node2 = self.rspec.xpath(query % end2)[0]
-            desc = "%s <--> %s" % (node1, node2) 
-            kbps = vlink.find("kbps")
-            vlinklist.append((endpoints, desc, kbps.text))
-        return vlinklist
-
-    def query_links(self, fromnode, tonode, network=None):
-        if network == None:
-            network = self.network
-        fromsite = fromnode.getparent()
-        tosite = tonode.getparent()
-        fromid = fromsite.get("id")
-        toid = tosite.get("id")
-
-        query = "./network[@name='%s']/link[@endpoints = '%s %s']" % (network, fromid, toid)
-        results = self.rspec.xpath(query)
-        if results == None:
-            query = "./network[@name='%s']/link[@endpoints = '%s %s']" % (network, toid, fromid)
-            results = self.rspec.xpath(query)
-        return results
-
-    def query_vlinks(self, endpoints, network=None):
-        if network == None:
-            network = self.network
-        query = "./network[@name='%s']//vlink[@endpoints = '%s']" % (network, endpoints)
-        results = self.rspec.xpath(query)
-        return results
-            
-    
-    def add_vlink(self, fromhost, tohost, kbps, network=None):
-        if network == None:
-            network = self.network
-        fromnode = self.get_node_element(fromhost, network)
-        tonode = self.get_node_element(tohost, network)
-        links = self.query_links(fromnode, tonode, network)
-
-        for link in links:
-            vlink = etree.SubElement(link, "vlink")
-            fromid = fromnode.get("id")
-            toid = tonode.get("id")
-            vlink.set("endpoints", "%s %s" % (fromid, toid))
-            self.add_attribute(vlink, "kbps", kbps)
-        
-
-    def remove_vlink(self, endpoints, network=None):
-        if network == None:
-            network = self.network
-        vlinks = self.query_vlinks(endpoints, network)
-        for vlink in vlinks:
-            vlink.getparent().remove(vlink)
-
-    def toxml(self):
-        return etree.tostring(self.rspec, pretty_print=True, 
-                              xml_declaration=True)
-
-    def __str__(self):
-        return self.toxml()
-
-    def save(self, filename):
-        f = open(filename, "w")
-        f.write(self.toxml())
-        f.close()
-
-
-class Commands:
-    def __init__(self, usage, description, epilog=None):
-        self.parser = OptionParser(usage=usage, description=description,
-                                   epilog=epilog)
-        self.parser.add_option("-i", "", dest="infile", metavar="FILE",
-                               help="read RSpec from FILE (default is stdin)")
-        self.parser.add_option("-o", "", dest="outfile", metavar="FILE",
-                               help="write output to FILE (default is stdout)")
-        self.nodefile = False
-        self.attributes = {}
-
-    def add_nodefile_option(self):
-        self.nodefile = True
-        self.parser.add_option("-n", "", dest="nodefile", 
-                               metavar="FILE",
-                               help="read node list from FILE"),
-
-    def add_show_attributes_option(self):
-        self.parser.add_option("-s", "--show-attributes", action="store_true", 
-                               dest="showatt", default=False, 
-                               help="show sliver attributes")
-
-    def add_attribute_options(self):
-        self.parser.add_option("", "--capabilities", action="append",
-                               metavar="<cap1,cap2,...>",
-                               help="Vserver bcapabilities")
-        self.parser.add_option("", "--codemux", action="append",
-                               metavar="<host,local-port>",
-                               help="Demux HTTP between slices using " +
-                               "localhost ports")
-        self.parser.add_option("", "--cpu-pct", action="append",
-                               metavar="<num>", 
-                               help="Reserved CPU percent (e.g., 25)")
-        self.parser.add_option("", "--cpu-share", action="append",
-                               metavar="<num>", 
-                               help="Number of CPU shares (e.g., 5)")
-        self.parser.add_option("", "--delegations", 
-                               metavar="<slice1,slice2,...>", action="append",
-                               help="List of slices with delegation authority")
-        self.parser.add_option("", "--disk-max", 
-                               metavar="<num>", action="append",
-                               help="Disk quota (1k disk blocks)")
-        self.parser.add_option("", "--initscript", 
-                               metavar="<name>", action="append",
-                               help="Slice initialization script (e.g., stork)")
-        self.parser.add_option("", "--ip-addresses", action="append",
-                               metavar="<IP addr>", 
-                               help="Add an IP address to a sliver")
-        self.parser.add_option("", "--net-i2-max-kbyte", 
-                               metavar="<KBytes>", action="append",
-                               help="Maximum daily network Tx limit " +
-                               "to I2 hosts.")
-        self.parser.add_option("", "--net-i2-max-rate", 
-                               metavar="<Kbps>", action="append",
-                               help="Maximum bandwidth over I2 routes")
-        self.parser.add_option("", "--net-i2-min-rate", 
-                               metavar="<Kbps>", action="append",
-                               help="Minimum bandwidth over I2 routes")
-        self.parser.add_option("", "--net-i2-share", 
-                               metavar="<num>", action="append",
-                               help="Number of bandwidth shares over I2 routes")
-        self.parser.add_option("", "--net-i2-thresh-kbyte", 
-                               metavar="<KBytes>", action="append",
-                               help="Limit sent to I2 hosts before warning, " +
-                               "throttling")
-        self.parser.add_option("", "--net-max-kbyte", 
-                               metavar="<KBytes>", action="append",
-                               help="Maximum daily network Tx limit " +
-                               "to non-I2 hosts.")
-        self.parser.add_option("", "--net-max-rate", 
-                               metavar="<Kbps>", action="append",
-                               help="Maximum bandwidth over non-I2 routes")
-        self.parser.add_option("", "--net-min-rate", 
-                               metavar="<Kbps>", action="append",
-                               help="Minimum bandwidth over non-I2 routes")
-        self.parser.add_option("", "--net-share", 
-                               metavar="<num>", action="append",
-                               help="Number of bandwidth shares over non-I2 " +
-                               "routes")
-        self.parser.add_option("", "--net-thresh-kbyte", 
-                               metavar="<KBytes>", action="append",
-                               help="Limit sent to non-I2 hosts before " +
-                               "warning, throttling")
-        self.parser.add_option("", "--vsys", 
-                               metavar="<name>", action="append",
-                               help="Vsys script (e.g., fd_fusemount)")
-        self.parser.add_option("", "--vsys-vnet", 
-                               metavar="<IP network>", action="append",
-                               help="Allocate a virtual private network")
-
-    def get_attribute_dict(self):
-        attrlist = ['capabilities','codemux','cpu_pct','cpu_share',
-                    'delegations','disk_max','initscript','ip_addresses',
-                    'net_i2_max_kbyte','net_i2_max_rate','net_i2_min_rate',
-                    'net_i2_share','net_i2_thresh_kbyte',
-                    'net_max_kbyte','net_max_rate','net_min_rate',
-                    'net_share','net_thresh_kbyte',
-                    'vsys','vsys_vnet']
-        attrdict = {}
-        for attr in attrlist:
-            value = getattr(self.opts, attr, None)
-            if value is not None:
-                attrdict[attr] = value
-        return attrdict
-
-    def prep(self):
-        (self.opts, self.args) = self.parser.parse_args()
-
-        if self.opts.infile:
-            sys.stdin = open(self.opts.infile, "r")
-        xml = sys.stdin.read()
-        self.rspec = RSpec(xml)
-            
-        if self.nodefile:
-            if self.opts.nodefile:
-                f = open(self.opts.nodefile, "r")
-                self.nodes = f.read().split()
-                f.close()
-            else:
-                self.nodes = self.args
-
-        if self.opts.outfile:
-            sys.stdout = open(self.opts.outfile, "w")
-
-
-
-
-
-
-
index 75229b3..2e4640e 100644 (file)
@@ -68,8 +68,11 @@ class _SfaLogger:
             self.logger.setLevel(logging.WARNING)
         elif verbose==1:
             self.logger.setLevel(logging.INFO)
-        elif verbose==2:
+        elif verbose>=2:
             self.logger.setLevel(logging.DEBUG)
+    # in case some other code needs a boolean
+    def getBoolVerboseFromOpt(self,verbose):
+        return verbose>=1
 
     ####################
     def info(self, msg):
diff --git a/sfa/util/soapprotocol.py b/sfa/util/soapprotocol.py
deleted file mode 100644 (file)
index de3ee96..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-# SOAP-specific code for SFA Client
-
-from httplib import HTTPSConnection
-from ZSI.client import Binding
-
-def xmlrpc_like_callable (soap_callable, *x):
-    soap_result = soap_callable(*x)
-    xmlrpc_result = soap_result['Result']
-    return xmlrpc_result
-        
-class SFACallable:
-     def __init__(self, soap_callable):
-        self.soap_callable = soap_callable
-
-     def __call__(self, *args):
-         outer_result = self.soap_callable(*args)
-         return outer_result['Result']
-
-
-class SFASoapBinding(Binding):
-    def __getattr__(self, attr):
-        soap_callable = Binding.__getattr__(self, attr)
-        return SFACallable(soap_callable)
-
-
-def get_server(url, key_file, cert_file):
-    auth = {
-        'transport' : HTTPSConnection,
-        'transdict' : {'cert_file' : cert_file, 
-                       'key_file' : key_file
-                      },
-     }
-
-    return SFASoapBinding(url=url, **auth)
-
diff --git a/sfa/util/ssl_socket.py b/sfa/util/ssl_socket.py
deleted file mode 100644 (file)
index d221da3..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-from ssl import SSLSocket
-
-import textwrap
-
-import _ssl             # if we can't import it, let the error propagate
-
-from _ssl import SSLError
-from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
-from _ssl import PROTOCOL_SSLv2, PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
-from _ssl import RAND_status, RAND_egd, RAND_add
-from _ssl import \
-     SSL_ERROR_ZERO_RETURN, \
-     SSL_ERROR_WANT_READ, \
-     SSL_ERROR_WANT_WRITE, \
-     SSL_ERROR_WANT_X509_LOOKUP, \
-     SSL_ERROR_SYSCALL, \
-     SSL_ERROR_SSL, \
-     SSL_ERROR_WANT_CONNECT, \
-     SSL_ERROR_EOF, \
-     SSL_ERROR_INVALID_ERROR_CODE
-
-from socket import socket, _fileobject
-from socket import getnameinfo as _getnameinfo
-import base64        # for DER-to-PEM translation
-
-class SSLSocket(SSLSocket, socket):
-
-    """This class implements a subtype of socket.socket that wraps
-    the underlying OS socket in an SSL context when necessary, and
-    provides read and write methods over that channel."""
-
-    def __init__(self, sock, keyfile=None, certfile=None,
-                 server_side=False, cert_reqs=CERT_NONE,
-                 ssl_version=PROTOCOL_SSLv23, ca_certs=None,
-                 do_handshake_on_connect=True,
-                 suppress_ragged_eofs=True):
-        socket.__init__(self, _sock=sock._sock)
-        # the initializer for socket trashes the methods (tsk, tsk), so...
-        self.send = lambda data, flags=0: SSLSocket.send(self, data, flags)
-        self.sendto = lambda data, addr, flags=0: SSLSocket.sendto(self, data, addr, flags)
-        self.recv = lambda buflen=1024, flags=0: SSLSocket.recv(self, buflen, flags)
-        self.recvfrom = lambda addr, buflen=1024, flags=0: SSLSocket.recvfrom(self, addr, buflen, flags)
-        self.recv_into = lambda buffer, nbytes=None, flags=0: SSLSocket.recv_into(self, buffer, nbytes, flags)
-        self.recvfrom_into = lambda buffer, nbytes=None, flags=0: SSLSocket.recvfrom_into(self, buffer, nbytes, flags)
-
-        if certfile and not keyfile:
-            keyfile = certfile
-        # see if it's connected
-        try:
-            socket.getpeername(self)
-        except:
-            # no, no connection yet
-            self._sslobj = None
-        else:
-            # yes, create the SSL object
-            self._sslobj = _ssl.sslwrap(self._sock, server_side,
-                                        keyfile, certfile,
-                                        cert_reqs, ssl_version, ca_certs)
-            if do_handshake_on_connect:
-                timeout = self.gettimeout()
-                try:
-                    if timeout == 0:
-                        self.settimeout(None)
-                    self.do_handshake()
-                finally:
-                    self.settimeout(timeout)
-        self.keyfile = keyfile
-        self.certfile = certfile
-        self.cert_reqs = cert_reqs
-        self.ssl_version = ssl_version
-        self.ca_certs = ca_certs
-        self.do_handshake_on_connect = do_handshake_on_connect
-        self.suppress_ragged_eofs = suppress_ragged_eofs
-        self._makefile_refs = 0
-
-
diff --git a/sfa/util/topology.py b/sfa/util/topology.py
new file mode 100644 (file)
index 0000000..bd7eb18
--- /dev/null
@@ -0,0 +1,40 @@
+##
+# SFA Topology Info
+#
+# This module holds topology configuration for SFA. It is implemnted as a 
+# list of site_id tuples
+
+import os.path
+import traceback
+from sfa.util.sfalogging import logger
+
+class Topology(set):
+    """
+    Parse the topology configuration file. 
+    """
+
+    #def __init__(self, config_file = "/etc/sfa/topology"):
+    def __init__(self, config_file = "/tmp/topology"):
+        set.__init__(self) 
+        self.config_file = None
+        self.config_path = None
+        self.load(config_file)
+
+    def load(self, config_file):
+        try:
+            
+            self.config_file = config_file
+            # path to configuration data
+            self.config_path = os.path.dirname(config_file)
+            # load the links
+            f = open(config_file, 'r')
+            for line in f:
+                ignore = line.find('#')
+                if ignore > -1:
+                    line = line[0:ignore]
+                tup = line.split()
+                if len(tup) > 1:
+                    self.add((tup[0], tup[1]))    
+        except Exception, e:
+            logger.log_exc("Could not find or load the configuration file: %s" % config_file)
+            raise
index d880ed3..ddb06e4 100755 (executable)
@@ -1,4 +1,5 @@
 #!/usr/bin/python 
+from types import StringTypes
 from lxml import etree
 from StringIO import StringIO
 
@@ -59,10 +60,10 @@ class XML:
             # it hard for us to write xpath queries for the default naemspace because lxml 
             # wont understand a None prefix. We will just associate the default namespeace 
             # with a key named 'default'.     
-            self.namespaces['default'] = self.namespaces[None]
+            self.namespaces['default'] = self.namespaces.pop(None)
+            
         else:
             self.namespaces['default'] = 'default' 
-
         # set schema 
         for key in self.root.attrib.keys():
             if key.endswith('schemaLocation'):
@@ -101,8 +102,9 @@ class XML:
         # element.attrib.update will explode if DateTimes are in the
         # dcitionary.
         d=d.copy()
+        # looks like iteritems won't stand side-effects
         for k in d.keys():
-            if (type(d[k]) != str) and (type(d[k]) != unicode):
+            if not isinstance(d[k],StringTypes):
                 del d[k]
 
         element.attrib.update(d)
@@ -136,7 +138,7 @@ class XML:
 
     def add_element(self, name, attrs={}, parent=None, text=""):
         """
-        Generic wrapper around etree.SubElement(). Adds an element to 
+        Wrapper around etree.SubElement(). Adds an element to 
         specified parent node. Adds element to root node is parent is 
         not specified. 
         """
@@ -216,6 +218,7 @@ class XML:
     def toxml(self):
         return etree.tostring(self.root, encoding='UTF-8', pretty_print=True)  
     
+    # XXX smbaker, for record.load_from_string
     def todict(self, elem=None):
         if elem is None:
             elem = self.root
@@ -226,19 +229,6 @@ class XML:
             if child.tag not in d:
                 d[child.tag] = []
             d[child.tag].append(self.todict(child))
-        return d
-
-    # XXX smbaker, for record.load_from_string
-    def todict2(self, elem=None):
-        if elem is None:
-            elem = self.root
-        d = {}
-        d.update(elem.attrib)
-        d['text'] = elem.text
-        for child in elem.iterchildren():
-            if child.tag not in d:
-                d[child.tag] = []
-            d[child.tag].append(self.todict2(child))
 
         if len(d)==1 and ("text" in d):
             d = d["text"]
index 9160637..ce72936 100755 (executable)
@@ -4,10 +4,9 @@ import os
 import random
 import string
 import unittest
-import sfa.util.xmlrpcprotocol as xmlrpc
+import sfa.util.xmlrpcprotocol as xmlrpcprotocol
 from unittest import TestCase
 from optparse import OptionParser
-from sfa.util.xmlrpcprotocol import ServerException
 from sfa.util.xrn import get_authority
 from sfa.util.config import *
 from sfa.trust.certificate import *
@@ -44,10 +43,10 @@ class Client:
         self.cert.save_to_file(cert_file)        
         SFI_AGGREGATE = config.SFI_SM.replace('12347', '12346')
         SFI_CM = 'http://' + options.cm_host + ':12346'
-        self.registry = xmlrpc.get_server(config.SFI_REGISTRY, key_file, cert_file)
-        self.aggregate = xmlrpc.get_server(SFI_AGGREGATE, key_file, cert_file)
-        self.sm = xmlrpc.get_server(config.SFI_SM, key_file, cert_file)
-        self.cm = xmlrpc.get_server(SFI_CM, key_file, cert_file)
+        self.registry = xmlrpcprotocol.server_proxy(config.SFI_REGISTRY, key_file, cert_file)
+        self.aggregate = xmlrpcprotocol.server_proxy(SFI_AGGREGATE, key_file, cert_file)
+        self.sm = xmlrpcprotocol.server_proxy(config.SFI_SM, key_file, cert_file)
+        self.cm = xmlrpcprotocol.server_proxy(SFI_CM, key_file, cert_file)
         self.hrn = config.SFI_USER
         # XX defaulting to user, but this should be configurable so we can
         # test from components persepctive
@@ -171,7 +170,7 @@ class RegistryTest(BasicTestCase):
         server_exception = False 
         try:
             callable(self.credential)
-        except ServerException:
+        except xmlrpcprotocol.ServerException:
             server_exception = True
         finally:
             if self.type in ['user'] and not server_exception: