Merge branch 'master' into sqlalchemy - solved totally harmless
authorThierry Parmentelat <thierry.parmentelat@sophia.inria.fr>
Wed, 1 Feb 2012 17:38:27 +0000 (18:38 +0100)
committerThierry Parmentelat <thierry.parmentelat@sophia.inria.fr>
Wed, 1 Feb 2012 17:38:27 +0000 (18:38 +0100)
conflict in sfa/openstack/openstack_driver.py

sfa/openstack/openstack_driver.py
sfa/openstack/openstack_shell.py
sfa/openstack/osaggregate.py [new file with mode: 0644]
sfa/rspecs/elements/disk_image.py
sfa/util/osxrn.py [new file with mode: 0644]

index 3209f2b..368b408 100644 (file)
@@ -9,9 +9,6 @@ from sfa.util.defaultdict import defaultdict
 from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
 from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf, urn_to_sliver_id
 from sfa.util.cache import Cache
-
-# one would think the driver should not need to mess with the SFA db, but..
-
 # used to be used in get_ticket
 #from sfa.trust.sfaticket import SfaTicket
 
@@ -22,10 +19,9 @@ from sfa.rspecs.rspec import RSpec
 from sfa.managers.driver import Driver
 
 from sfa.openstack.openstack_shell import OpenstackShell
-import sfa.plc.peers as peers
-from sfa.plc.plaggregate import PlAggregate
+from sfa.openstack.osaggregate import OSAggregate
 from sfa.plc.plslices import PlSlices
-from sfa.util.plxrn import slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename, hrn_to_pl_login_base
+from sfa.util.osxrn import OSXrn
 
 
 def list_to_dict(recs, key):
@@ -199,17 +195,16 @@ class OpenstackDriver (Driver):
         if self.cache:
             slices = self.cache.get('slices')
             if slices:
-                logger.debug("PlDriver.list_slices returns from cache")
+                logger.debug("OpenStackDriver.list_slices returns from cache")
                 return slices
     
-        # get data from db 
-        slices = self.shell.GetSlices({'peer_id': None}, ['name'])
-        slice_hrns = [slicename_to_hrn(self.hrn, slice['name']) for slice in slices]
-        slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
+        # get data from db
+        slices = self.shell.project_get_all()
+        slice_urns = [OSXrn(name, 'slice').urn for name in slice] 
     
         # cache the result
         if self.cache:
-            logger.debug ("PlDriver.list_slices stores value in cache")
+            logger.debug ("OpenStackDriver.list_slices stores value in cache")
             self.cache.add('slices', slice_urns) 
     
         return slice_urns
@@ -231,18 +226,18 @@ class OpenstackDriver (Driver):
         if cached_requested and self.cache and not slice_hrn:
             rspec = self.cache.get(version_string)
             if rspec:
-                logger.debug("PlDriver.ListResources: returning cached advertisement")
+                logger.debug("OpenStackDriver.ListResources: returning cached advertisement")
                 return rspec 
     
         #panos: passing user-defined options
         #print "manager options = ",options
-        aggregate = PlAggregate(self)
+        aggregate = OSAggregate(self)
         rspec =  aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version, 
                                      options=options)
     
         # cache the result
         if self.cache and not slice_hrn:
-            logger.debug("PlDriver.ListResources: stores advertisement in cache")
+            logger.debug("OpenStackDriver.ListResources: stores advertisement in cache")
             self.cache.add(version_string, rspec)
     
         return rspec
@@ -335,66 +330,33 @@ class OpenstackDriver (Driver):
         return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
 
     def delete_sliver (self, slice_urn, slice_hrn, creds, options):
-        slicename = hrn_to_pl_slicename(slice_hrn)
-        slices = self.shell.GetSlices({'name': slicename})
-        if not slices:
+        name = OSXrn(xrn=slice_urn).name
+        slice = self.shell.project_get(name)
+        if not slice:
             return 1
-        slice = slices[0]
-    
-        # determine if this is a peer slice
-        # xxx I wonder if this would not need to use PlSlices.get_peer instead 
-        # in which case plc.peers could be deprecated as this here
-        # is the only/last call to this last method in plc.peers
-        peer = peers.get_peer(self, slice_hrn)
-        try:
-            if peer:
-                self.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
-            self.shell.DeleteSliceFromNodes(slicename, slice['node_ids'])
-        finally:
-            if peer:
-                self.shell.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
+        
+        self.shell.DeleteSliceFromNodes(slicename, slice['node_ids'])
+        instances = self.shell.instance_get_all_by_project(name)
+        for instance in instances:
+            self.shell.instance_destroy(instance.instance_id)
         return 1
     
     def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
-        slicename = hrn_to_pl_slicename(slice_hrn)
-        slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
-        if not slices:
-            raise RecordNotFound(slice_hrn)
-        slice = slices[0]
-        requested_time = utcparse(expiration_time)
-        record = {'expires': int(datetime_to_epoch(requested_time))}
-        try:
-            self.shell.UpdateSlice(slice['slice_id'], record)
-            return True
-        except:
-            return False
-
-    # remove the 'enabled' tag 
+        return True
+
     def start_slice (self, slice_urn, slice_hrn, creds):
-        slicename = hrn_to_pl_slicename(slice_hrn)
-        slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
-        if not slices:
-            raise RecordNotFound(slice_hrn)
-        slice_id = slices[0]['slice_id']
-        slice_tags = self.shell.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'}, ['slice_tag_id'])
-        # just remove the tag if it exists
-        if slice_tags:
-            self.shell.DeleteSliceTag(slice_tags[0]['slice_tag_id'])
         return 1
 
-    # set the 'enabled' tag to 0
     def stop_slice (self, slice_urn, slice_hrn, creds):
-        slicename = hrn_to_pl_slicename(slice_hrn)
-        slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
-        if not slices:
-            raise RecordNotFound(slice_hrn)
-        slice_id = slices[0]['slice_id']
-        slice_tags = self.shell.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'})
-        if not slice_tags:
-            self.shell.AddSliceTag(slice_id, 'enabled', '0')
-        elif slice_tags[0]['value'] != "0":
-            tag_id = slice_tags[0]['slice_tag_id']
-            self.shell.UpdateSliceTag(tag_id, '0')
+        name = OSXrn(xrn=slice_urn).name
+        slice = self.shell.project_get(name)
+        if not slice:
+            return 1
+
+        self.shell.DeleteSliceFromNodes(slicename, slice['node_ids'])
+        instances = self.shell.instance_get_all_by_project(name)
+        for instance in instances:
+            self.shell.instance_stop(instance.instance_id)
         return 1
     
     def reset_slice (self, slice_urn, slice_hrn, creds):
@@ -403,7 +365,7 @@ class OpenstackDriver (Driver):
     # xxx this code is quite old and has not run for ages
     # it is obviously totally broken and needs a rewrite
     def get_ticket (self, slice_urn, slice_hrn, creds, rspec_string, options):
-        raise SfaNotImplemented,"PlDriver.get_ticket needs a rewrite"
+        raise SfaNotImplemented,"OpenStackDriver.get_ticket needs a rewrite"
 # please keep this code for future reference
 #        slices = PlSlices(self)
 #        peer = slices.get_peer(slice_hrn)
index d2c19fb..a4636ae 100644 (file)
@@ -2,9 +2,15 @@ import sys
 import xmlrpclib
 import socket
 from urlparse import urlparse
-
 from sfa.util.sfalogging import logger
-
+try:
+    from nova import flags
+    from nova import context 
+    from nova import db
+    has_nova = True  
+except:
+    has_nova = False
 class OpenstackShell:
     """
     A simple xmlrpc shell to a myplc instance
@@ -21,7 +27,6 @@ class OpenstackShell:
         url = config.SFA_PLC_URL
         # try to figure if the url is local
         hostname=urlparse(url).hostname
-        is_local=False
         if hostname == 'localhost': is_local=True
         # otherwise compare IP addresses; 
         # this might fail for any number of reasons, so let's harden that
@@ -35,27 +40,12 @@ class OpenstackShell:
             pass
 
 
-        # Openstack provides a RESTful api but it is very limited, so we will
-        # ignore it for now and always use the native openstack (nova) library.
-        # This of course will not work if sfa is not installed on the same machine
-        # as the openstack-compute package.   
-        if is_local:
-            try:
-                from nova.auth.manager import AuthManager, db, context
-                direct_access=True
-            except:
-                direct_access=False
-        if is_local and direct_access:
-            
+        if is_local and has_nova:
             logger.debug('openstack access - native')
+            # load the config
+            flags.FLAGS(['foo', '--flagfile=/etc/nova/nova.conf', 'foo', 'foo'])
             self.auth = context.get_admin_context()
-            # AuthManager isnt' really useful for much yet but it's
-            # more convenient to use than the db reference which requires
-            # a context. Lets hold onto the AuthManager reference for now.
-            #self.proxy = AuthManager()
-            self.auth_manager = AuthManager()
             self.proxy = db
-
         else:
             self.auth = None
             self.proxy = None
diff --git a/sfa/openstack/osaggregate.py b/sfa/openstack/osaggregate.py
new file mode 100644 (file)
index 0000000..6296d29
--- /dev/null
@@ -0,0 +1,89 @@
+from sfa.rspecs.rspec import RSpec
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.login import Login
+from sfa.rspecs.elements.services import Services
+from sfa.util.xrn import Xrn
+from sfa.util.osxrn import OSXrn
+from sfa.rspecs.version_manager import VersionManager
+
+class OSAggregate:
+
+    def __init__(self, driver):
+        self.driver = driver
+
+    def instance_to_sliver(self, instance, slice_xrn=None):
+        sliver_id = None
+        name = None
+        if slice_xrn:
+            name = OSXrn(slice_xrn, 'slice').name
+            sliver_id = xrn.sliver_id(instance.instance_id, "")
+
+        # should include: 
+        # * instance.image_ref
+        # * instance.kernel_id
+        # * instance.ramdisk_id 
+        name=None
+        if hasattr(instance, 'name'):
+            name = instance.name
+        elif hasattr(instance, 'display_name'):
+            name = instance.display_name 
+        sliver = Sliver({'slice_id': sliver_id,
+                         'name': name,
+                         'type': 'plos-' + instance['name'],
+                         'tags': []})
+        return sliver
+
+    def get_rspec(self, slice_xrn=None, vsersion=None, options={}):
+        version_manager = VersionManager()
+        version = version_manager.get_version(version)
+        if not slice_xrn:
+            rspec_version = version_manager._get_version(version.type, version.version, 'ad')
+            nodes = self.get_aggregate_nodes()
+        else:
+            rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
+            nodes = self.get_slice_nodes(slice_xrn)
+        
+        rspec.version.add_nodes(nodes)
+        return rspec.toxml()
+
+    def get_slice_nodes(self, slice_xrn):
+        name = OSXrn(xrn = slice_xrn).name
+        instances = self.driver.shell.instance_get_all_by_project(name)
+        rspec_nodes = []
+        for instance in instances:
+            rspec_node = Node()
+            xrn = OSXrn(instance.hostname, 'node')
+            rspec_node['component_id'] = xrn.urn
+            rspec_node['component_name'] = xrn.name
+            rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()   
+            sliver = self.instance_to_sliver(instance) 
+            rspec_node['slivers'] = [sliver]
+            rspec_nodes.append(rspec_node)
+        return rspec_nodes
+
+    def get_aggregate_nodes(self):
+                
+        zones = self.driver.shell.zone_get_all()
+        if not zones:
+            zones = ['cloud']
+        else:
+            zones = [zone.name for zone in zones]
+
+        rspec_nodes = []
+        for zone in zones:
+            rspec_node = Node()
+            xrn = OSXrn(zone, 'node')
+            rspec_node['component_id'] = xrn.urn
+            rspec_node['component_name'] = xrn.name
+            rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
+            rspec_node['exclusive'] = 'false'
+            rspec_node['hardware_types'] = [HardwareType({'name': 'plos-pc'}),
+                                                HardwareType({'name': 'pc'})]
+            instances = self.driver.shell.instance_type_get_all().values()
+            slivers = [self.instance_to_sliver(inst) for inst in instances]
+            rspec_node['slivers'] = slivers
+            rspec_nodes.append(rspec_node) 
+
+        return rspec_node    
index 3a810a5..1f530f6 100644 (file)
@@ -1,4 +1,9 @@
 from sfa.rspecs.elements.element import Element
 
 class DiskImage(Element):
-    fields = {}        
+    fields = [
+        'name',
+        'os',
+        'version',
+        'description',
+    ]        
diff --git a/sfa/util/osxrn.py b/sfa/util/osxrn.py
new file mode 100644 (file)
index 0000000..752feec
--- /dev/null
@@ -0,0 +1,27 @@
+import re
+from sfa.util.xrn import Xrn
+from sfa.util.config import Config
+
+class OSXrn(Xrn):
+
+    def __init__(self, name=None, type=None, **kwds):
+        
+        config = Config()
+        if name is not None:
+            self.type = type
+            self.hrn = config.SFA_INTERFACE_HRN + "." + name
+            self.hrn_to_urn()
+        else:
+            Xrn.__init__(self, **kwds)   
+         
+        self.name = self.get_name() 
+    
+    def get_name(self):
+        self._normalize()
+        leaf = self.leaf
+        sliver_id_parts = leaf.split(':')
+        name = sliver_id_parts[0]
+        name = re.sub('[^a-zA-Z0-9_]', '', name)
+        return name
+
+