2to3 -f except
[sfa.git] / sfa / openstack / osaggregate.py
index 90fd003..29681a0 100644 (file)
@@ -3,11 +3,13 @@ import os
 import socket
 import base64
 import string
-import random    
+import random
+import time    
 from collections import defaultdict
 from nova.exception import ImageNotFound
 from nova.api.ec2.cloud import CloudController
-from sfa.util.faults import SfaAPIError
+from sfa.util.faults import SliverDoesNotExist
+from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
 from sfa.rspecs.rspec import RSpec
 from sfa.rspecs.elements.hardware_type import HardwareType
 from sfa.rspecs.elements.node import Node
@@ -16,148 +18,249 @@ from sfa.rspecs.elements.login import Login
 from sfa.rspecs.elements.disk_image import DiskImage
 from sfa.rspecs.elements.services import Services
 from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.fw_rule import FWRule
 from sfa.util.xrn import Xrn
 from sfa.planetlab.plxrn import PlXrn 
 from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename
 from sfa.rspecs.version_manager import VersionManager
-from sfa.openstack.image import ImageManager
 from sfa.openstack.security_group import SecurityGroup
+from sfa.client.multiclient import MultiClient
 from sfa.util.sfalogging import logger
 
-def instance_to_sliver(instance, slice_xrn=None):
-    # should include?
-    # * instance.image_ref
-    # * instance.kernel_id
-    # * instance.ramdisk_id
-    import nova.db.sqlalchemy.models
-    name=None
-    type=None
-    sliver_id = None
-    if isinstance(instance, dict):
-        # this is an isntance type dict
-        name = instance['name']
-        type = instance['name']
-    elif isinstance(instance, nova.db.sqlalchemy.models.Instance):
-        # this is an object that describes a running instance
-        name = instance.display_name
-        type = instance.instance_type.name
-    else:
-        raise SfaAPIError("instnace must be an instance_type dict or" + \
-                           " a nova.db.sqlalchemy.models.Instance object")
-    if slice_xrn:
-        xrn = Xrn(slice_xrn, 'slice')
-        sliver_id = xrn.get_sliver_id(instance.project_id, instance.hostname, instance.id)
-
-    sliver = Sliver({'slice_id': sliver_id,
-                     'name': name,
-                     'type':  type,
-                     'tags': []})
-    return sliver
-    
-
-def ec2_id(id=None, type=None):
-    ec2_id = None
-    if type == 'ovf':
-        type = 'ami'   
-    if id and type:
-        ec2_id = CloudController.image_ec2_id(id, type)        
-    return ec2_id
-
+def pubkeys_to_user_data(pubkeys):
+    user_data = "#!/bin/bash\n\n"
+    for pubkey in pubkeys:
+        pubkey = pubkey.replace('\n', '')
+        user_data += "echo %s >> /root/.ssh/authorized_keys" % pubkey
+        user_data += "\n"
+        user_data += "echo >> /root/.ssh/authorized_keys"
+        user_data += "\n"
+    return user_data
 
+def image_to_rspec_disk_image(image):
+    img = DiskImage()
+    img['name'] = image['name']
+    img['description'] = image['name']
+    img['os'] = image['name']
+    img['version'] = image['name']    
+    return img
+    
 class OSAggregate:
 
     def __init__(self, driver):
         self.driver = driver
 
-    def get_rspec(self, slice_xrn=None, version=None, options={}):
-        version_manager = VersionManager()
-        version = version_manager.get_version(version)
-        if not slice_xrn:
-            rspec_version = version_manager._get_version(version.type, version.version, 'ad')
-            nodes = self.get_aggregate_nodes()
-        else:
-            rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
-            nodes = self.get_slice_nodes(slice_xrn)
-        rspec = RSpec(version=rspec_version, user_options=options)
-        rspec.version.add_nodes(nodes)
-        return rspec.toxml()
-
     def get_availability_zones(self):
-        try:
-            # pre essex releases 
-            zones = self.driver.shell.db.zone_get_all()
-        except:
-            # essex release
-            zones = self.driver.shell.db.dnsdomain_list()
-
+        zones = self.driver.shell.nova_manager.dns_domains.domains()
         if not zones:
             zones = ['cloud']
         else:
             zones = [zone.name for zone in zones]
         return zones
 
-    def get_slice_nodes(self, slice_xrn):
-        image_manager = ImageManager(self.driver)
+    def list_resources(self, version=None, options=None):
+        if options is None: options={}
+        version_manager = VersionManager()
+        version = version_manager.get_version(version)
+        rspec_version = version_manager._get_version(version.type, version.version, 'ad')
+        rspec = RSpec(version=version, user_options=options)
+        nodes = self.get_aggregate_nodes()
+        rspec.version.add_nodes(nodes)
+        return rspec.toxml()
+
+    def describe(self, urns, version=None, options=None):
+        if options is None: options={}
+        # update nova connection
+        tenant_name = OSXrn(xrn=urns[0], type='slice').get_tenant_name()
+        self.driver.shell.nova_manager.connect(tenant=tenant_name)
+        instances = self.get_instances(urns)
+        # lookup the sliver allocations
+        sliver_ids = [sliver['sliver_id'] for sliver in slivers]
+        constraint = SliverAllocation.sliver_id.in_(sliver_ids)
+        sliver_allocations = self.driver.api.dbsession().query(SliverAllocation).filter(constraint)
+        sliver_allocation_dict = {}
+        for sliver_allocation in sliver_allocations:
+            sliver_allocation_dict[sliver_allocation.sliver_id] = sliver_allocation
 
-        zones = self.get_availability_zones()
-        name = hrn_to_os_slicename(slice_xrn)
-        instances = self.driver.shell.db.instance_get_all_by_project(name)
+        geni_slivers = []
         rspec_nodes = []
         for instance in instances:
-            rspec_node = Node()
-            interfaces = []
-            for fixed_ip in instance.fixed_ips:
-                if_xrn = PlXrn(auth=self.driver.hrn, 
-                               interface='node%s:eth0' % (instance.hostname)) 
-                interface = Interface({'component_id': if_xrn.urn})
-                interface['ips'] =  [{'address': fixed_ip['address'],
-                                     'netmask': fixed_ip['network'].netmask,
-                                     'type': 'ipv4'}]
-                interface['floating_ips'] = []
-                for floating_ip in fixed_ip.floating_ips:
-                    interface['floating_ips'].append(floating_ip.address)
-                interfaces.append(interface)
-            if instance.availability_zone:
-                node_xrn = OSXrn(instance.availability_zone, 'node')
+            rspec_nodes.append(self.instance_to_rspec_node(instance))
+            geni_sliver = self.instance_to_geni_sliver(instance, sliver_sllocation_dict)
+            geni_slivers.append(geni_sliver)
+        version_manager = VersionManager()
+        version = version_manager.get_version(version)
+        rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
+        rspec = RSpec(version=rspec_version, user_options=options)
+        rspec.xml.set('expires',  datetime_to_string(utcparse(time.time())))
+        rspec.version.add_nodes(rspec_nodes)
+        result = {'geni_urn': Xrn(urns[0]).get_urn(),
+                  'geni_rspec': rspec.toxml(), 
+                  'geni_slivers': geni_slivers}
+        
+        return result
+
+    def get_instances(self, urns):
+        # parse slice names and sliver ids
+        names = set()
+        ids = set()
+        for urn in urns:
+            xrn = OSXrn(xrn=urn)
+            if xrn.type == 'slice':
+                names.add(xrn.get_slice_name())
+            elif xrn.type == 'sliver':
+                ids.add(xrn.leaf)
+
+        # look up instances
+        instances = []
+        filter = {}
+        if names:
+            filter['name'] = names
+        if ids:
+            filter['id'] = ids   
+        servers = self.driver.shell.nova_manager.servers.findall(**filter)
+        instances.extend(servers)
+
+        return instances
+
+    def instance_to_rspec_node(self, instance):
+        # determine node urn
+        node_xrn = instance.metadata.get('component_id')
+        if not node_xrn:
+            node_xrn = OSXrn('cloud', type='node')
+        else:
+            node_xrn = OSXrn(xrn=node_xrn, type='node')
+
+        rspec_node = Node()
+        rspec_node['component_id'] = node_xrn.urn
+        rspec_node['component_name'] = node_xrn.name
+        rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
+        rspec_node['sliver_id'] = OSXrn(name=instance.name, type='slice', id=instance.id).get_urn() 
+        if instance.metadata.get('client_id'):
+            rspec_node['client_id'] = instance.metadata.get('client_id')
+
+        # get sliver details
+        flavor = self.driver.shell.nova_manager.flavors.find(id=instance.flavor['id'])
+        sliver = self.instance_to_sliver(flavor)
+        # get firewall rules
+        fw_rules = []
+        group_name = instance.metadata.get('security_groups')
+        if group_name:
+            group = self.driver.shell.nova_manager.security_groups.find(name=group_name)
+            for rule in group.rules:
+                port_range ="%s:%s" % (rule['from_port'], rule['to_port'])
+                fw_rule = FWRule({'protocol': rule['ip_protocol'],
+                                  'port_range': port_range,
+                                  'cidr_ip': rule['ip_range']['cidr']})
+                fw_rules.append(fw_rule)
+        sliver['fw_rules'] = fw_rules 
+        rspec_node['slivers'] = [sliver]
+
+        # get disk image
+        image = self.driver.shell.image_manager.get_images(id=instance.image['id'])
+        if isinstance(image, list) and len(image) > 0:
+            image = image[0]
+        disk_image = image_to_rspec_disk_image(image)
+        sliver['disk_image'] = [disk_image]
+
+        # get interfaces            
+        rspec_node['services'] = []
+        rspec_node['interfaces'] = []
+        addresses = instance.addresses
+        # HACK: public ips are stored in the list of private, but 
+        # this seems wrong. Assume pub ip is the last in the list of 
+        # private ips until openstack bug is fixed.      
+        if addresses.get('private'):
+            login = Login({'authentication': 'ssh-keys',
+                           'hostname': addresses.get('private')[-1]['addr'],
+                           'port':'22', 'username': 'root'})
+            service = Services({'login': login})
+            rspec_node['services'].append(service)    
+        
+        for private_ip in addresses.get('private', []):
+            if_xrn = PlXrn(auth=self.driver.hrn, 
+                           interface='node%s' % (instance.hostId)) 
+            if_client_id = Xrn(if_xrn.urn, type='interface', id="eth%s" %if_index).urn
+            if_sliver_id = Xrn(rspec_node['sliver_id'], type='slice', id="eth%s" %if_index).urn
+            interface = Interface({'component_id': if_xrn.urn,
+                                   'client_id': if_client_id,
+                                   'sliver_id': if_sliver_id})
+            interface['ips'] =  [{'address': private_ip['addr'],
+                                 #'netmask': private_ip['network'],
+                                 'type': private_ip['version']}]
+            rspec_node['interfaces'].append(interface) 
+        
+        # slivers always provide the ssh service
+        for public_ip in addresses.get('public', []):
+            login = Login({'authentication': 'ssh-keys', 
+                           'hostname': public_ip['addr'], 
+                           'port':'22', 'username': 'root'})
+            service = Services({'login': login})
+            rspec_node['services'].append(service)
+        return rspec_node
+
+
+    def instance_to_sliver(self, instance, xrn=None):
+        if xrn:
+            sliver_hrn = '%s.%s' % (self.driver.hrn, instance.id)
+            sliver_id = Xrn(sliver_hrn, type='sliver').urn
+
+        sliver = Sliver({'sliver_id': sliver_id,
+                         'name': instance.name,
+                         'type': instance.name,
+                         'cpus': str(instance.vcpus),
+                         'memory': str(instance.ram),
+                         'storage':  str(instance.disk)})
+        return sliver   
+
+    def instance_to_geni_sliver(self, instance, sliver_allocations=None):
+        if sliver_allocations is None: sliver_allocations={}
+        sliver_hrn = '%s.%s' % (self.driver.hrn, instance.id)
+        sliver_id = Xrn(sliver_hrn, type='sliver').urn
+        # set sliver allocation and operational status
+        sliver_allocation = sliver_allocations[sliver_id]
+        if sliver_allocation:
+            allocation_status = sliver_allocation.allocation_state
+            if allocation_status == 'geni_allocated':
+                op_status =  'geni_pending_allocation'
+            elif allocation_status == 'geni_provisioned':
+                state = instance.state.lower()
+                if state == 'active':
+                    op_status = 'geni_ready'
+                elif state == 'building':
+                    op_status = 'geni_notready'
+                elif state == 'failed':
+                    op_status =' geni_failed'
+                else:
+                    op_status = 'geni_unknown'
             else:
-                node_xrn = OSXrn('cloud', 'node')
-
-            rspec_node['component_id'] = node_xrn.urn
-            rspec_node['component_name'] = node_xrn.name
-            rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()   
-            sliver = instance_to_sliver(instance)
-            disk_image = image_manager.get_disk_image(instance.image_ref)
-            sliver['disk_image'] = [disk_image.to_rspec_object()]
-            rspec_node['slivers'] = [sliver]
-            rspec_node['interfaces'] = interfaces
-            # slivers always provide the ssh service
-            rspec_node['services'] = []
-            for interface in interfaces:
-                if 'floating_ips' in interface:
-                    for hostname in interface['floating_ips']:
-                        login = Login({'authentication': 'ssh-keys', 
-                                       'hostname': hostname, 
-                                       'port':'22', 'username': 'root'})
-                        service = Services({'login': login})
-                        rspec_node['services'].append(service)
-            rspec_nodes.append(rspec_node)
-        return rspec_nodes
+                allocation_status = 'geni_unallocated'    
+        # required fields
+        geni_sliver = {'geni_sliver_urn': sliver_id, 
+                       'geni_expires': None,
+                       'geni_allocation_status': allocation_status,
+                       'geni_operational_status': op_status,
+                       'geni_error': None,
+                       'plos_created_at': datetime_to_string(utcparse(instance.created)),
+                       'plos_sliver_type': self.shell.nova_manager.flavors.find(id=instance.flavor['id']).name,
+                        }
 
+        return geni_sliver
+                        
     def get_aggregate_nodes(self):
         zones = self.get_availability_zones()
         # available sliver/instance/vm types
-        instances = self.driver.shell.db.instance_type_get_all()
+        instances = self.driver.shell.nova_manager.flavors.list()
         if isinstance(instances, dict):
             instances = instances.values()
         # available images
-        image_manager = ImageManager(self.driver)
-        disk_images = image_manager.get_available_disk_images()
-        disk_image_objects = [image.to_rspec_object() \
-                               for image in disk_images]  
+        images = self.driver.shell.image_manager.get_images_detailed()
+        disk_images  = [image_to_rspec_disk_image(img) for img in images if img['container_format'] in ['ami', 'ovf']]
         rspec_nodes = []
         for zone in zones:
             rspec_node = Node()
-            xrn = OSXrn(zone, 'node')
+            xrn = OSXrn(zone, type='node')
             rspec_node['component_id'] = xrn.urn
             rspec_node['component_name'] = xrn.name
             rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
@@ -166,63 +269,44 @@ class OSAggregate:
                                                 HardwareType({'name': 'pc'})]
             slivers = []
             for instance in instances:
-                sliver = instance_to_sliver(instance)
-                sliver['disk_image'] = disk_image_objects
+                sliver = self.instance_to_sliver(instance)
+                sliver['disk_image'] = disk_images
                 slivers.append(sliver)
-        
+            rspec_node['available'] = 'true'
             rspec_node['slivers'] = slivers
             rspec_nodes.append(rspec_node) 
 
         return rspec_nodes 
 
+    def create_tenant(self, tenant_name):
+        tenants = self.driver.shell.auth_manager.tenants.findall(name=tenant_name)
+        if not tenants:
+            self.driver.shell.auth_manager.tenants.create(tenant_name, tenant_name)
+            tenant = self.driver.shell.auth_manager.tenants.find(name=tenant_name)
+        else:
+            tenant = tenants[0]
+        return tenant
+            
+    def create_instance_key(self, slice_hrn, user):
+        slice_name = Xrn(slice_hrn).leaf
+        user_name = Xrn(user['urn']).leaf
+        key_name = "%s_%s" % (slice_name, user_name)
+        pubkey = user['keys'][0]
+        key_found = False
+        existing_keys = self.driver.shell.nova_manager.keypairs.findall(name=key_name)
+        for existing_key in existing_keys:
+            if existing_key.public_key != pubkey:
+                self.driver.shell.nova_manager.keypairs.delete(existing_key)
+            elif existing_key.public_key == pubkey:
+                key_found = True
 
-    def create_project(self, slicename, users, options={}):
-        """
-        Create the slice if it doesn't alredy exist. Create user
-        accounts that don't already exist   
-        """
-        from nova.exception import ProjectNotFound, UserNotFound
-        for user in users:
-            username = Xrn(user['urn']).get_leaf()
-            try:
-                self.driver.shell.auth_manager.get_user(username)
-            except UserNotFound:
-                self.driver.shell.auth_manager.create_user(username)
-            self.verify_user_keys(username, user['keys'], options)
-
-        try:
-            slice = self.driver.shell.auth_manager.get_project(slicename)
-        except ProjectNotFound:
-            # assume that the first user is the project manager
-            proj_manager = Xrn(users[0]['urn']).get_leaf()
-            self.driver.shell.auth_manager.create_project(slicename, proj_manager) 
-
-    def verify_user_keys(self, username, keys, options={}):
-        """
-        Add requested keys.
-        """
-        append = options.get('append', True)    
-        existing_keys = self.driver.shell.db.key_pair_get_all_by_user(username)
-        existing_pub_keys = [key.public_key for key in existing_keys]
-        removed_pub_keys = set(existing_pub_keys).difference(keys)
-        added_pub_keys = set(keys).difference(existing_pub_keys)
-        pubkeys = []
-        # add new keys
-        for public_key in added_pub_keys:
-            key = {}
-            key['user_id'] = username
-            key['name'] =  username
-            key['public_key'] = public_key
-            self.driver.shell.db.key_pair_create(key)
-
-        # remove old keys
-        if not append:
-            for key in existing_keys:
-                if key.public_key in removed_pub_keys:
-                    self.driver.shell.db.key_pair_destroy(username, key.name)
-
-
-    def create_security_group(self, slicename, fw_rules=[]):
+        if not key_found:
+            self.driver.shell.nova_manager.keypairs.create(key_name, pubkey)
+        return key_name       
+        
+
+    def create_security_group(self, slicename, fw_rules=None):
+        if fw_rules is None: fw_rules=[]
         # use default group by default
         group_name = 'default' 
         if isinstance(fw_rules, list) and fw_rules:
@@ -240,6 +324,11 @@ class OSAggregate:
                                              cidr_ip = rule.get('cidr_ip'), 
                                              port_range = rule.get('port_range'), 
                                              icmp_type_code = rule.get('icmp_type_code'))
+            # Open ICMP by default
+            security_group.add_rule_to_group(group_name,
+                                             protocol = "icmp",
+                                             cidr_ip = "0.0.0.0/0",
+                                             icmp_type_code = "-1:-1")
         return group_name
 
     def add_rule_to_security_group(self, group_name, **kwds):
@@ -250,97 +339,121 @@ class OSAggregate:
                                          icmp_type_code = kwds.get('icmp_type_code'))
 
  
-    def reserve_instance(self, image_id, kernel_id, ramdisk_id, \
-                         instance_type, key_name, user_data, group_name):
-        conn  = self.driver.euca_shell.get_euca_connection()
-        logger.info('Reserving an instance: image: %s, kernel: ' \
-                    '%s, ramdisk: %s, type: %s, key: %s' % \
-                    (image_id, kernel_id, ramdisk_id,
-                    instance_type, key_name))
-        try:
-            reservation = conn.run_instances(image_id=image_id,
-                                             kernel_id=kernel_id,
-                                             ramdisk_id=ramdisk_id,
-                                             instance_type=instance_type,
-                                             key_name=key_name,
-                                             user_data = user_data,
-                                             security_groups=[group_name])
-                                             #placement=zone,
-                                             #min_count=min_count,
-                                             #max_count=max_count,           
-                                              
-        except Exception, err:
-            logger.log_exc(err)
-    
-               
-    def run_instances(self, slicename, rspec, keyname, pubkeys):
-        """
-        Create the security groups and instances. 
-        """
-        # the default image to use for instnaces that dont
-        # explicitly request an image.
-        # Just choose the first available image for now.
-        image_manager = ImageManager(self.driver)
-        available_images = image_manager.get_available_disk_images()
-        default_image_id = None
-        default_aki_id  = None
-        default_ari_id = None
-        default_image = available_images[0]
-        default_image_id = ec2_id(default_image.id, default_image.container_format)  
-        default_aki_id = ec2_id(default_image.kernel_id, 'aki')  
-        default_ari_id = ec2_id(default_image.ramdisk_id, 'ari')
-
-        # get requested slivers
+
+    def run_instances(self, instance_name, tenant_name, rspec, key_name, pubkeys):
+        #logger.debug('Reserving an instance: image: %s, flavor: ' \
+        #            '%s, key: %s, name: %s' % \
+        #            (image_id, flavor_id, key_name, slicename))
+
+        # make sure a tenant exists for this slice
+        tenant = self.create_tenant(tenant_name)  
+
+        # add the sfa admin user to this tenant and update our nova client connection
+        # to use these credentials for the rest of this session. This emsures that the instances
+        # we create will be assigned to the correct tenant.
+        sfa_admin_user = self.driver.shell.auth_manager.users.find(name=self.driver.shell.auth_manager.opts['OS_USERNAME'])
+        user_role = self.driver.shell.auth_manager.roles.find(name='user')
+        admin_role = self.driver.shell.auth_manager.roles.find(name='admin')
+        self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, admin_role, tenant)
+        self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, user_role, tenant)
+        self.driver.shell.nova_manager.connect(tenant=tenant.name)  
+
+        authorized_keys = "\n".join(pubkeys)
+        files = {'/root/.ssh/authorized_keys': authorized_keys}
         rspec = RSpec(rspec)
-        user_data = "\n".join(pubkeys)
         requested_instances = defaultdict(list)
+        
         # iterate over clouds/zones/nodes
+        slivers = []
         for node in rspec.version.get_nodes_with_slivers():
-            instance_types = node.get('slivers', [])
-            if isinstance(instance_types, list):
-                # iterate over sliver/instance types
-                for instance_type in instance_types:
-                    fw_rules = instance_type.get('fw_rules', [])
-                    group_name = self.create_security_group(slicename, fw_rules)
-                    ami_id = default_image_id
-                    aki_id = default_aki_id
-                    ari_id = default_ari_id
-                    req_image = instance_type.get('disk_image')
-                    if req_image and isinstance(req_image, list):
-                        req_image_name = req_image[0]['name']
-                        disk_image = image_manager.get_disk_image(name=req_image_name)
-                        if disk_image:
-                            ami_id = ec2_id(disk_image.id, disk_image.container_format)
-                            aki_id = ec2_id(disk_image.kernel_id, 'aki')
-                            ari_id = ec2_id(disk_image.ramdisk_id, 'ari')
-                    # start the instance
-                    self.reserve_instance(image_id=ami_id, 
-                                          kernel_id=aki_id, 
-                                          ramdisk_id=ari_id, 
-                                          instance_type=instance_type['name'], 
-                                          key_name=keyname, 
-                                          user_data=user_data, 
-                                          group_name=group_name)
-
-
-    def delete_instances(self, project_name):
-        instances = self.driver.shell.db.instance_get_all_by_project(project_name)
+            instances = node.get('slivers', [])
+            if not instances:
+                continue
+            for instance in instances:
+                try: 
+                    metadata = {}
+                    flavor_id = self.driver.shell.nova_manager.flavors.find(name=instance['name'])
+                    image = instance.get('disk_image')
+                    if image and isinstance(image, list):
+                        image = image[0]
+                    else:
+                        raise InvalidRSpec("Must specify a disk_image for each VM")
+                    image_id = self.driver.shell.nova_manager.images.find(name=image['name'])
+                    fw_rules = instance.get('fw_rules', [])
+                    group_name = self.create_security_group(instance_name, fw_rules)
+                    metadata['security_groups'] = group_name
+                    if node.get('component_id'):
+                        metadata['component_id'] = node['component_id']
+                    if node.get('client_id'):
+                        metadata['client_id'] = node['client_id'] 
+                    server = self.driver.shell.nova_manager.servers.create(
+                                                            flavor=flavor_id,
+                                                            image=image_id,
+                                                            key_name = key_name,
+                                                            security_groups = [group_name],
+                                                            files=files,
+                                                            meta=metadata, 
+                                                            name=instance_name)
+                    slivers.append(server)
+                except Exception as err:    
+                    logger.log_exc(err)                                
+                           
+        return slivers        
+
+    def delete_instance(self, instance):
+    
+        def _delete_security_group(inst):
+            security_group = inst.metadata.get('security_groups', '')
+            if security_group:
+                manager = SecurityGroup(self.driver)
+                timeout = 10.0 # wait a maximum of 10 seconds before forcing the security group delete
+                start_time = time.time()
+                instance_deleted = False
+                while instance_deleted == False and (time.time() - start_time) < timeout:
+                    tmp_inst = self.driver.shell.nova_manager.servers.findall(id=inst.id)
+                    if not tmp_inst:
+                        instance_deleted = True
+                    time.sleep(.5)
+                manager.delete_security_group(security_group)
+
+        multiclient = MultiClient()
+        tenant = self.driver.shell.auth_manager.tenants.find(id=instance.tenant_id)  
+        self.driver.shell.nova_manager.connect(tenant=tenant.name)
+        args = {'name': instance.name,
+                'id': instance.id}
+        instances = self.driver.shell.nova_manager.servers.findall(**args)
         security_group_manager = SecurityGroup(self.driver)
         for instance in instances:
-            # deleate this instance's security groups
-            for security_group in instance.security_groups:
-                # dont delete the default security group
-                if security_group.name != 'default': 
-                    security_group_manager.delete_security_group(security_group.name)
             # destroy instance
-            self.driver.shell.db.instance_destroy(instance.id)
+            self.driver.shell.nova_manager.servers.delete(instance)
+            # deleate this instance's security groups
+            multiclient.run(_delete_security_group, instance)
         return 1
 
-    def stop_instances(self, project_name):
-        instances = self.driver.shell.db.instance_get_all_by_project(project_name)
+    def stop_instances(self, instance_name, tenant_name, id=None):
+        self.driver.shell.nova_manager.connect(tenant=tenant_name)
+        args = {'name': instance_name}
+        if id:
+            args['id'] = id
+        instances = self.driver.shell.nova_manager.servers.findall(**args)
         for instance in instances:
-            self.driver.shell.db.instance_stop(instance.id)
+            self.driver.shell.nova_manager.servers.pause(instance)
         return 1
 
+    def start_instances(self, instance_name, tenant_name, id=None):
+        self.driver.shell.nova_manager.connect(tenant=tenant_name)
+        args = {'name': instance_name}
+        if id:
+            args['id'] = id
+        instances = self.driver.shell.nova_manager.servers.findall(**args)
+        for instance in instances:
+            self.driver.shell.nova_manager.servers.resume(instance)
+        return 1
+
+    def restart_instances(self, instacne_name, tenant_name, id=None):
+        self.stop_instances(instance_name, tenant_name, id)
+        self.start_instances(instance_name, tenant_name, id)
+        return 1 
+
     def update_instances(self, project_name):
         pass