from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf, urn_to_sliver_id
from sfa.util.cache import Cache
-
-# one would think the driver should not need to mess with the SFA db, but..
-
# used to be used in get_ticket
#from sfa.trust.sfaticket import SfaTicket
from sfa.managers.driver import Driver
from sfa.openstack.openstack_shell import OpenstackShell
-import sfa.plc.peers as peers
-from sfa.plc.plaggregate import PlAggregate
+from sfa.openstack.osaggregate import OSAggregate
from sfa.plc.plslices import PlSlices
-from sfa.util.plxrn import slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename, hrn_to_pl_login_base
+from sfa.util.osxrn import OSXrn
def list_to_dict(recs, key):
if self.cache:
slices = self.cache.get('slices')
if slices:
- logger.debug("PlDriver.list_slices returns from cache")
+ logger.debug("OpenStackDriver.list_slices returns from cache")
return slices
- # get data from db
- slices = self.shell.GetSlices({'peer_id': None}, ['name'])
- slice_hrns = [slicename_to_hrn(self.hrn, slice['name']) for slice in slices]
- slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
+ # get data from db
+ slices = self.shell.project_get_all()
+ slice_urns = [OSXrn(name, 'slice').urn for name in slice]
# cache the result
if self.cache:
- logger.debug ("PlDriver.list_slices stores value in cache")
+ logger.debug ("OpenStackDriver.list_slices stores value in cache")
self.cache.add('slices', slice_urns)
return slice_urns
if cached_requested and self.cache and not slice_hrn:
rspec = self.cache.get(version_string)
if rspec:
- logger.debug("PlDriver.ListResources: returning cached advertisement")
+ logger.debug("OpenStackDriver.ListResources: returning cached advertisement")
return rspec
#panos: passing user-defined options
#print "manager options = ",options
- aggregate = PlAggregate(self)
+ aggregate = OSAggregate(self)
rspec = aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version,
options=options)
# cache the result
if self.cache and not slice_hrn:
- logger.debug("PlDriver.ListResources: stores advertisement in cache")
+ logger.debug("OpenStackDriver.ListResources: stores advertisement in cache")
self.cache.add(version_string, rspec)
return rspec
return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
def delete_sliver (self, slice_urn, slice_hrn, creds, options):
- slicename = hrn_to_pl_slicename(slice_hrn)
- slices = self.shell.GetSlices({'name': slicename})
- if not slices:
+ name = OSXrn(xrn=slice_urn).name
+ slice = self.shell.project_get(name)
+ if not slice:
return 1
- slice = slices[0]
-
- # determine if this is a peer slice
- # xxx I wonder if this would not need to use PlSlices.get_peer instead
- # in which case plc.peers could be deprecated as this here
- # is the only/last call to this last method in plc.peers
- peer = peers.get_peer(self, slice_hrn)
- try:
- if peer:
- self.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
- self.shell.DeleteSliceFromNodes(slicename, slice['node_ids'])
- finally:
- if peer:
- self.shell.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
+
+ self.shell.DeleteSliceFromNodes(slicename, slice['node_ids'])
+ instances = self.shell.instance_get_all_by_project(name)
+ for instance in instances:
+ self.shell.instance_destroy(instance.instance_id)
return 1
def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
- slicename = hrn_to_pl_slicename(slice_hrn)
- slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
- if not slices:
- raise RecordNotFound(slice_hrn)
- slice = slices[0]
- requested_time = utcparse(expiration_time)
- record = {'expires': int(datetime_to_epoch(requested_time))}
- try:
- self.shell.UpdateSlice(slice['slice_id'], record)
- return True
- except:
- return False
-
- # remove the 'enabled' tag
+ return True
+
def start_slice (self, slice_urn, slice_hrn, creds):
- slicename = hrn_to_pl_slicename(slice_hrn)
- slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
- if not slices:
- raise RecordNotFound(slice_hrn)
- slice_id = slices[0]['slice_id']
- slice_tags = self.shell.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'}, ['slice_tag_id'])
- # just remove the tag if it exists
- if slice_tags:
- self.shell.DeleteSliceTag(slice_tags[0]['slice_tag_id'])
return 1
- # set the 'enabled' tag to 0
def stop_slice (self, slice_urn, slice_hrn, creds):
- slicename = hrn_to_pl_slicename(slice_hrn)
- slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
- if not slices:
- raise RecordNotFound(slice_hrn)
- slice_id = slices[0]['slice_id']
- slice_tags = self.shell.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'})
- if not slice_tags:
- self.shell.AddSliceTag(slice_id, 'enabled', '0')
- elif slice_tags[0]['value'] != "0":
- tag_id = slice_tags[0]['slice_tag_id']
- self.shell.UpdateSliceTag(tag_id, '0')
+ name = OSXrn(xrn=slice_urn).name
+ slice = self.shell.project_get(name)
+ if not slice:
+ return 1
+
+ self.shell.DeleteSliceFromNodes(slicename, slice['node_ids'])
+ instances = self.shell.instance_get_all_by_project(name)
+ for instance in instances:
+ self.shell.instance_stop(instance.instance_id)
return 1
def reset_slice (self, slice_urn, slice_hrn, creds):
# xxx this code is quite old and has not run for ages
# it is obviously totally broken and needs a rewrite
def get_ticket (self, slice_urn, slice_hrn, creds, rspec_string, options):
- raise SfaNotImplemented,"PlDriver.get_ticket needs a rewrite"
+ raise SfaNotImplemented,"OpenStackDriver.get_ticket needs a rewrite"
# please keep this code for future reference
# slices = PlSlices(self)
# peer = slices.get_peer(slice_hrn)
import xmlrpclib
import socket
from urlparse import urlparse
-
from sfa.util.sfalogging import logger
-
+try:
+ from nova import flags
+ from nova import context
+ from nova import db
+ has_nova = True
+except:
+ has_nova = False
+
class OpenstackShell:
"""
A simple xmlrpc shell to a myplc instance
url = config.SFA_PLC_URL
# try to figure if the url is local
hostname=urlparse(url).hostname
- is_local=False
if hostname == 'localhost': is_local=True
# otherwise compare IP addresses;
# this might fail for any number of reasons, so let's harden that
pass
- # Openstack provides a RESTful api but it is very limited, so we will
- # ignore it for now and always use the native openstack (nova) library.
- # This of course will not work if sfa is not installed on the same machine
- # as the openstack-compute package.
- if is_local:
- try:
- from nova.auth.manager import AuthManager, db, context
- direct_access=True
- except:
- direct_access=False
- if is_local and direct_access:
-
+ if is_local and has_nova:
logger.debug('openstack access - native')
+ # load the config
+ flags.FLAGS(['foo', '--flagfile=/etc/nova/nova.conf', 'foo', 'foo'])
self.auth = context.get_admin_context()
- # AuthManager isnt' really useful for much yet but it's
- # more convenient to use than the db reference which requires
- # a context. Lets hold onto the AuthManager reference for now.
- #self.proxy = AuthManager()
- self.auth_manager = AuthManager()
self.proxy = db
-
else:
self.auth = None
self.proxy = None
--- /dev/null
+from sfa.rspecs.rspec import RSpec
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.login import Login
+from sfa.rspecs.elements.services import Services
+from sfa.util.xrn import Xrn
+from sfa.util.osxrn import OSXrn
+from sfa.rspecs.version_manager import VersionManager
+
+class OSAggregate:
+
+ def __init__(self, driver):
+ self.driver = driver
+
+ def instance_to_sliver(self, instance, slice_xrn=None):
+ sliver_id = None
+ name = None
+ if slice_xrn:
+ name = OSXrn(slice_xrn, 'slice').name
+ sliver_id = xrn.sliver_id(instance.instance_id, "")
+
+ # should include:
+ # * instance.image_ref
+ # * instance.kernel_id
+ # * instance.ramdisk_id
+ name=None
+ if hasattr(instance, 'name'):
+ name = instance.name
+ elif hasattr(instance, 'display_name'):
+ name = instance.display_name
+ sliver = Sliver({'slice_id': sliver_id,
+ 'name': name,
+ 'type': 'plos-' + instance['name'],
+ 'tags': []})
+ return sliver
+
+ def get_rspec(self, slice_xrn=None, vsersion=None, options={}):
+ version_manager = VersionManager()
+ version = version_manager.get_version(version)
+ if not slice_xrn:
+ rspec_version = version_manager._get_version(version.type, version.version, 'ad')
+ nodes = self.get_aggregate_nodes()
+ else:
+ rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
+ nodes = self.get_slice_nodes(slice_xrn)
+
+ rspec.version.add_nodes(nodes)
+ return rspec.toxml()
+
+ def get_slice_nodes(self, slice_xrn):
+ name = OSXrn(xrn = slice_xrn).name
+ instances = self.driver.shell.instance_get_all_by_project(name)
+ rspec_nodes = []
+ for instance in instances:
+ rspec_node = Node()
+ xrn = OSXrn(instance.hostname, 'node')
+ rspec_node['component_id'] = xrn.urn
+ rspec_node['component_name'] = xrn.name
+ rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
+ sliver = self.instance_to_sliver(instance)
+ rspec_node['slivers'] = [sliver]
+ rspec_nodes.append(rspec_node)
+ return rspec_nodes
+
+ def get_aggregate_nodes(self):
+
+ zones = self.driver.shell.zone_get_all()
+ if not zones:
+ zones = ['cloud']
+ else:
+ zones = [zone.name for zone in zones]
+
+ rspec_nodes = []
+ for zone in zones:
+ rspec_node = Node()
+ xrn = OSXrn(zone, 'node')
+ rspec_node['component_id'] = xrn.urn
+ rspec_node['component_name'] = xrn.name
+ rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
+ rspec_node['exclusive'] = 'false'
+ rspec_node['hardware_types'] = [HardwareType({'name': 'plos-pc'}),
+ HardwareType({'name': 'pc'})]
+ instances = self.driver.shell.instance_type_get_all().values()
+ slivers = [self.instance_to_sliver(inst) for inst in instances]
+ rspec_node['slivers'] = slivers
+ rspec_nodes.append(rspec_node)
+
+ return rspec_node