Merge branch 'upstreammaster'
[sfa.git] / sfa / openstack / osaggregate.py
index cf5a3b4..602cfe8 100644 (file)
@@ -1,3 +1,10 @@
+
+import os
+import socket
+import base64
+import string
+import random    
+from collections import defaultdict
 from nova.exception import ImageNotFound
 from nova.api.ec2.cloud import CloudController
 from sfa.util.faults import SfaAPIError
@@ -8,94 +15,51 @@ from sfa.rspecs.elements.sliver import Sliver
 from sfa.rspecs.elements.login import Login
 from sfa.rspecs.elements.disk_image import DiskImage
 from sfa.rspecs.elements.services import Services
+from sfa.rspecs.elements.interface import Interface
 from sfa.util.xrn import Xrn
-from sfa.util.osxrn import OSXrn
+from sfa.planetlab.plxrn import PlXrn 
+from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename
 from sfa.rspecs.version_manager import VersionManager
+from sfa.openstack.security_group import SecurityGroup
+from sfa.util.sfalogging import logger
 
-
-def disk_image_to_rspec_object(image):
-    img = DiskImage()
-    img['name'] = image['ami']['name']
-    img['description'] = image['ami']['name']
-    img['os'] = image['ami']['name']
-    img['version'] = image['ami']['name']
-    return img
-    
+def pubkeys_to_user_data(pubkeys):
+    user_data = "#!/bin/bash\n\n"
+    for pubkey in pubkeys:
+        pubkey = pubkey.replace('\n', '')
+        user_data += "echo %s >> /root/.ssh/authorized_keys" % pubkey
+        user_data += "\n"
+        user_data += "echo >> /root/.ssh/authorized_keys"
+        user_data += "\n"
+    return user_data
 
 def instance_to_sliver(instance, slice_xrn=None):
-    # should include?
-    # * instance.image_ref
-    # * instance.kernel_id
-    # * instance.ramdisk_id
-    import nova.db.sqlalchemy.models
-    name=None
-    type=None
     sliver_id = None
-    if isinstance(instance, dict):
-        # this is an isntance type dict
-        name = instance['name']
-        type = instance['name']
-    elif isinstance(instance, nova.db.sqlalchemy.models.Instance):
-        # this is an object that describes a running instance
-        name = instance.display_name
-        type = instance.instance_type.name
-    else:
-        raise SfaAPIError("instnace must be an instance_type dict or" + \
-                           " a nova.db.sqlalchemy.models.Instance object")
     if slice_xrn:
         xrn = Xrn(slice_xrn, 'slice')
         sliver_id = xrn.get_sliver_id(instance.project_id, instance.hostname, instance.id)
 
     sliver = Sliver({'slice_id': sliver_id,
-                     'name': name,
-                     'type': 'plos-' + type,
-                     'tags': []})
+                     'name': instance.name,
+                     'type': instance.name,
+                     'cpus': str(instance.vcpus),
+                     'memory': str(instance.ram),
+                     'storage':  str(instance.disk)})
     return sliver
-            
 
+def image_to_rspec_disk_image(image):
+    img = DiskImage()
+    img['name'] = image['name']
+    img['description'] = image['name']
+    img['os'] = image['name']
+    img['version'] = image['name']    
+    return img
+    
 class OSAggregate:
 
     def __init__(self, driver):
         self.driver = driver
 
-    def get_machine_image_details(self, image):
-        """
-        Returns a dict that contains the ami, aki and ari details for the specified
-        ami image. 
-        """
-        disk_image = {}
-        if image['container_format'] == 'ami':
-            kernel_id = image['properties']['kernel_id']
-            ramdisk_id = image['properties']['ramdisk_id']
-            disk_image['ami'] = image
-            disk_image['aki'] = self.driver.shell.image_manager.show(kernel_id)
-            disk_image['ari'] = self.driver.shell.image_manager.show(ramdisk_id)
-        return disk_image
-        
-    def get_disk_image(self, id=None, name=None):
-        """
-        Look up a image bundle using the specifeid id or name  
-        """
-        disk_image = None    
-        try:
-            if id:
-                image = self.driver.shell.image_manager.show(image_id)
-            elif name:
-                image = self.driver.shell.image_manager.show_by_name(image_name)
-            if image['container_format'] == 'ami':
-                disk_image = self.get_machine_image_details(image)
-        except ImageNotFound:
-                pass
-        return disk_image
-
-    def get_available_disk_images(self):
-        # get image records
-        disk_images = []
-        for image in self.driver.shell.image_manager.detail():
-            if image['container_format'] == 'ami':
-                disk_images.append(self.get_machine_image_details(image))
-        return disk_images 
-
     def get_rspec(self, slice_xrn=None, version=None, options={}):
         version_manager = VersionManager()
         version = version_manager.get_version(version)
@@ -109,41 +73,82 @@ class OSAggregate:
         rspec.version.add_nodes(nodes)
         return rspec.toxml()
 
+    def get_availability_zones(self):
+        # essex release
+        zones = self.driver.shell.nova_manager.dns_domains.domains()
+
+        if not zones:
+            zones = ['cloud']
+        else:
+            zones = [zone.name for zone in zones]
+        return zones
+
     def get_slice_nodes(self, slice_xrn):
-        name = OSXrn(xrn = slice_xrn).name
-        instances = self.driver.shell.db.instance_get_all_by_project(name)
+        zones = self.get_availability_zones()
+        name = hrn_to_os_slicename(slice_xrn)
+        instances = self.driver.shell.nova_manager.servers.findall(name=name)
         rspec_nodes = []
         for instance in instances:
             rspec_node = Node()
-            xrn = OSXrn(instance.hostname, 'node')
-            rspec_node['component_id'] = xrn.urn
-            rspec_node['component_name'] = xrn.name
-            rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()   
-            sliver = instance_to_sliver(instance)
-            disk_image = self.get_disk_image(instance.image_ref)
-            sliver['disk_images'] = [disk_image_to_rspec_object(disk_image)]
+            
+            #TODO: find a way to look up an instances availability zone in essex
+            #if instance.availability_zone:
+            #    node_xrn = OSXrn(instance.availability_zone, 'node')
+            #else:
+            #    node_xrn = OSXrn('cloud', 'node')
+            node_xrn = instance.metatata.get('component_id')
+            if not node_xrn:
+                node_xrn = OSXrn('cloud', 'node') 
+
+            rspec_node['component_id'] = node_xrn.urn
+            rspec_node['component_name'] = node_xrn.name
+            rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
+            flavor = self.driver.shell.nova_manager.flavors.find(id=instance.flavor['id'])
+            sliver = instance_to_sliver(flavor)
             rspec_node['slivers'] = [sliver]
+            image = self.driver.shell.image_manager.get_images(id=instance.image['id'])
+            if isinstance(image, list) and len(image) > 0:
+                image = image[0]
+            disk_image = image_to_rspec_disk_image(image)
+            sliver['disk_image'] = [disk_image]
+
+            # build interfaces            
+            interfaces = []
+            addresses = instance.addresses
+            for private_ip in addresses.get('private', []):
+                if_xrn = PlXrn(auth=self.driver.hrn, 
+                               interface='node%s:eth0' % (instance.hostId)) 
+                interface = Interface({'component_id': if_xrn.urn})
+                interface['ips'] =  [{'address': private_ip['addr'],
+                                     #'netmask': private_ip['network'],
+                                     'type': private_ip['version']}]
+                interfaces.append(interface)
+            rspec_node['interfaces'] = interfaces 
+            
+            # slivers always provide the ssh service
+            rspec_node['services'] = []
+            for public_ip in addresses.get('public', []):
+                login = Login({'authentication': 'ssh-keys', 
+                               'hostname': public_ip['addr'], 
+                               'port':'22', 'username': 'root'})
+                service = Services({'login': login})
+                rspec_node['services'].append(service)
             rspec_nodes.append(rspec_node)
         return rspec_nodes
 
     def get_aggregate_nodes(self):
-                
-        zones = self.driver.shell.db.zone_get_all()
-        if not zones:
-            zones = ['cloud']
-        else:
-            zones = [zone.name for zone in zones]
-
+        zones = self.get_availability_zones()
         # available sliver/instance/vm types
-        instances = self.driver.shell.db.instance_type_get_all().values()
+        instances = self.driver.shell.nova_manager.flavors.list()
+        if isinstance(instances, dict):
+            instances = instances.values()
         # available images
-        disk_images = self.get_available_disk_images()
-        disk_image_objects = [disk_image_to_rspec_object(image) \
-                               for image in disk_images]  
+        images = self.driver.shell.image_manager.get_images_detailed()
+        disk_images  = [image_to_rspec_disk_image(img) for img in images if img['container_format'] in ['ami', 'ovf']]
         rspec_nodes = []
         for zone in zones:
             rspec_node = Node()
-            xrn = OSXrn(zone, 'node')
+            xrn = OSXrn(zone, type='node')
             rspec_node['component_id'] = xrn.urn
             rspec_node['component_name'] = xrn.name
             rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
@@ -153,7 +158,7 @@ class OSAggregate:
             slivers = []
             for instance in instances:
                 sliver = instance_to_sliver(instance)
-                sliver['disk_images'] = disk_image_objects
+                sliver['disk_image'] = disk_images
                 slivers.append(sliver)
         
             rspec_node['slivers'] = slivers
@@ -162,112 +167,107 @@ class OSAggregate:
         return rspec_nodes 
 
 
-    def create_project(self, slicename, users, options={}):
-        """
-        Create the slice if it doesn't alredy exist  
-        """
-        import nova.exception.ProjectNotFound
-        try:
-            slice = self.driver.shell.auth_manager.get_project(slicename)
-        except nova.exception.ProjectNotFound:
-            # convert urns to user names
-            usernames = [Xrn(user['urn']).get_leaf() for user in users]
-            # assume that the first user is the project manager
-            proj_manager = usernames[0] 
-            self.driver.shell.auth_manager.create_project(slicename, proj_manager)
+    def create_instance_key(self, slice_hrn, user):
+        key_name = "%s:%s" (slice_name, Xrn(user['urn']).get_hrn())
+        pubkey = user['keys'][0]
+        key_found = False
+        existing_keys = self.driver.shell.nova_manager.keypairs.findall(name=key_name)
+        for existing_key in existing_keys:
+            if existing_key.public_key != pubkey:
+                self.driver.shell.nova_manager.keypairs.delete(existing_key)
+            elif existing_key.public_key == pubkey:
+                key_found = True
 
-    def create_project_users(self, slicename, users, options={}):
-        """
-        Add requested users to the specified slice.  
-        """
-        
-        # There doesn't seem to be an effcient way to 
-        # look up all the users of a project, so lets not  
-        # attempt to remove stale users . For now lets just
-        # ensure that the specified users exist     
-        for user in users:
-            username = Xrn(user['urn']).get_leaf()
-            try:
-                self.driver.shell.auth_manager.get_user(username)
-            except nova.exception.UserNotFound:
-                self.driver.shell.auth_manager.create_user(username)
-            self.verify_user_keys(username, user['keys'], options)
+        if not key_found:
+            self.driver.shll.nova_manager.keypairs.create(key_name, pubkey)
+        return key_name       
         
 
-    def verify_user_keys(self, username, keys, options={}):
-        """
-        Add requested keys.
-        """
-        append = options.get('append', True)    
-        existing_keys = self.driver.shell.db.key_pair_get_all_by_user(username)
-        existing_pub_keys = [key.public_key for key in existing_keys]
-        removed_pub_keys = set(existing_pub_keys).difference(keys)
-        added_pub_keys = set(keys).difference(existing_pub_keys)
-        pubkeys = []
-        # add new keys
-        for public_key in added_pub_keys:
-            key = {}
-            key['user_id'] = username
-            key['name'] =  username
-            key['public'] = public_key
-            self.driver.shell.db.key_pair_create(key)
+    def create_security_group(self, slicename, fw_rules=[]):
+        # use default group by default
+        group_name = 'default' 
+        if isinstance(fw_rules, list) and fw_rules:
+            # Each sliver get's its own security group.
+            # Keep security group names unique by appending some random
+            # characters on end.
+            random_name = "".join([random.choice(string.letters+string.digits)
+                                           for i in xrange(6)])
+            group_name = slicename + random_name 
+            security_group = SecurityGroup(self.driver)
+            security_group.create_security_group(group_name)
+            for rule in fw_rules:
+                security_group.add_rule_to_group(group_name, 
+                                             protocol = rule.get('protocol'), 
+                                             cidr_ip = rule.get('cidr_ip'), 
+                                             port_range = rule.get('port_range'), 
+                                             icmp_type_code = rule.get('icmp_type_code'))
+        return group_name
 
-        # remove old keys
-        if not append:
-            for key in existing_keys:
-                if key.public_key in removed_pub_keys:
-                    self.driver.shell.db.key_pair_destroy(username, key.name)
-    
-    def reserve_instance(self, image_id, kernel_id, ramdisk_id, \
-                         instance_type, key_name, user_data):
-        conn  = self.driver.euca_shell
-        logger.info('Reserving an instance: image: %s, kernel: ' \
-                    '%s, ramdisk: %s, type: %s, key: %s' % \
-                    (image_id, kernel_id, ramdisk_id,
-                    instance_type, key_name))
-        try:
-            reservation = conn.run_instances(image_id=image_id,
-                                             kernel_id=kernel_id,
-                                             ramdisk_id=ramdisk_id,
-                                             instance_type=instance_type,
-                                             key_name=key_name,
-                                             user_data = user_data)
-        except EC2ResponseError, ec2RespError:
-            logger.log_exc(ec2RespError)
-               
-    def run_instances(self, slicename, rspec, keyname, pubkeys):
-        """
-        Create the instances thats requested in the rspec 
-        """
-        # the default image to use for instnaces that dont
-        # explicitly request an image.
-        # Just choose the first available image for now.
-        available_images = self.get_available_disk_images()
-        default_image = self.get_disk_images()[0]    
-        default_ami_id = CloudController.image_ec2_id(default_image['ami']['id'])  
-        default_aki_id = CloudController.image_ec2_id(default_image['aki']['id'])  
-        default_ari_id = CloudController.image_ec2_id(default_image['ari']['id'])
+    def add_rule_to_security_group(self, group_name, **kwds):
+        security_group = SecurityGroup(self.driver)
+        security_group.add_rule_to_group(group_name=group_name, 
+                                         protocol=kwds.get('protocol'), 
+                                         cidr_ip =kwds.get('cidr_ip'), 
+                                         icmp_type_code = kwds.get('icmp_type_code'))
+
+
+    def run_instances(self, slicename, rspec, key_name, pubkeys):
+        #logger.debug('Reserving an instance: image: %s, flavor: ' \
+        #            '%s, key: %s, name: %s' % \
+        #            (image_id, flavor_id, key_name, slicename))
 
-        # get requested slivers
+        authorized_keys = "\n".join(pubkeys)
+        files = {'/root/.ssh/authorized_keys': authorized_keys}
         rspec = RSpec(rspec)
         requested_instances = defaultdict(list)
         # iterate over clouds/zones/nodes
         for node in rspec.version.get_nodes_with_slivers():
-            instance_types = node.get('slivers', [])
-            if isinstance(instance_types, list):
-                # iterate over sliver/instance types
-                for instance_type in instance_types:
-                    ami_id = default_ami_id
-                    aki_id = default_aki_id
-                    ari_id = default_ari_id
-                    req_image = instance_type.get('disk_images')
-                    if req_image and isinstance(req_image, list):
-                        req_image_name = req_image[0]['name']
-                        disk_image = self.get_disk_image(name=req_image_name)
-                        if disk_image:
-                            ami_id = CloudController.image_ec2_id(disk_image['ami']['id'])
-                            aki_id = CloudController.image_ec2_id(disk_image['aki']['id'])
-                            ari_id = CloudController.image_ec2_id(disk_image['ari']['id'])
-                    # start the instance
-                    self.reserve_instance(ami_id, aki_id, ari_id, \
-                                          instance_type['name'], keyname, pubkeys) 
+            instances = node.get('slivers', [])
+            if not instances:
+                continue
+            for instance in instances:
+                metadata = {}
+                flavor_id = self.driver.shell.nova_manager.flavors.find(name=instance['name'])
+                image = instance.get('disk_image')
+                if image and isinstance(image, list):
+                    image = image[0]
+                image_id = self.driver.shell.nova_manager.images.find(name=image['name'])
+                fw_rules = instance.get('fw_rules', [])
+                group_name = self.create_security_group(slicename, fw_rules)
+                metadata['security_groups'] = [group_name]
+                metadata['component_id'] = node['component_id']
+                try: 
+                    self.driver.shell.nova_manager.servers.create(flavor=flavor_id,
+                                                            image=image_id,
+                                                            key_name = key_name,
+                                                            security_group = group_name,
+                                                            files=files,
+                                                            meta=metadata, 
+                                                            name=slicename)
+                except Exception, err:    
+                    logger.log_exc(err)                                
+                           
+
+
+    def delete_instances(self, instance_name):
+        instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
+        security_group_manager = SecurityGroup(self.driver)
+        for instance in instances:
+            # deleate this instance's security groups
+            for security_group in instance.metadata.get('security_groups', []):
+                # dont delete the default security group
+                if security_group != 'default': 
+                    security_group_manager.delete_security_group(security_group)
+            # destroy instance
+            self.driver.shell.nova_manager.servers.delete(instance)
+        return 1
+
+    def stop_instances(self, instance_name):
+        instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
+        for instance in instances:
+            self.driver.shell.nova_manager.servers.pause(instance)
+        return 1
+
+    def update_instances(self, project_name):
+        pass