This tree was mixed up, with an old version of the EC2 Observer. This
authorSapan Bhatia <gwsapan@gmail.com>
Tue, 22 Jul 2014 02:53:58 +0000 (22:53 -0400)
committerSapan Bhatia <gwsapan@gmail.com>
Wed, 23 Jul 2014 20:23:12 +0000 (16:23 -0400)
change copies across the version demo'd the OpenCloud developer meeting.

planetstack/ec2_observer/steps/__init__.py
planetstack/ec2_observer/steps/sync_deployments.py [new file with mode: 0644]
planetstack/ec2_observer/steps/sync_images.py
planetstack/ec2_observer/steps/sync_nodes.py [new file with mode: 0644]
planetstack/ec2_observer/steps/sync_site_deployments.py
planetstack/ec2_observer/steps/sync_sites.py
planetstack/ec2_observer/steps/sync_slivers.py
planetstack/ec2_observer/steps/sync_users.py

index eabf46c..e56c4b1 100644 (file)
@@ -1,15 +1,15 @@
 #from .sync_external_routes import SyncExternalRoutes
 #from .sync_external_routes import SyncExternalRoutes
-from .sync_network_slivers import SyncNetworkSlivers
-from .sync_networks import SyncNetworks
-from .sync_network_deployments import SyncNetworkDeployments
-from .sync_site_privileges import SyncSitePrivileges
-from .sync_sites import SyncSites
-from .sync_slice_memberships import SyncSliceMemberships
-from .sync_slices import SyncSlices
+#from .sync_network_slivers import SyncNetworkSlivers
+#from .sync_networks import SyncNetworks
+#from .sync_network_deployments import SyncNetworkDeployments
+#from .sync_site_privileges import SyncSitePrivileges
+#from .sync_slice_memberships import SyncSliceMemberships
+#from .sync_slices import SyncSlices
 #from .sync_sliver_ips import SyncSliverIps
 #from .sync_sliver_ips import SyncSliverIps
-from .sync_slivers import SyncSlivers
-from .sync_users import SyncUsers
-from .sync_roles import SyncRoles
-from .sync_nodes import SyncNodes
-from .sync_images import SyncImages
-from .garbage_collector import GarbageCollector
+#from .sync_slivers import SyncSlivers
+#from .sync_users import SyncUsers
+#from .sync_roles import SyncRoles
+#from .sync_nodes import SyncNodes
+#from .sync_images import SyncImages
+#from .garbage_collector import GarbageCollector
+
diff --git a/planetstack/ec2_observer/steps/sync_deployments.py b/planetstack/ec2_observer/steps/sync_deployments.py
new file mode 100644 (file)
index 0000000..8a258f1
--- /dev/null
@@ -0,0 +1,22 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from ec2_observer.syncstep import SyncStep
+from core.models.site import *
+
+class SyncDeployments(SyncStep):
+    requested_interval=86400
+    provides=[Deployment]
+
+    def fetch_pending(self,deletion):
+        deployments = Deployment.objects.filter(Q(name="Amazon EC2"))
+        if (not deployments):
+            deployments = [Deployment(name="Amazon EC2")]
+        else:
+            deployments = []
+
+        return deployments
+
+    def sync_record(self, deployment):
+        deployment.save()
index 32b3363..3a15b33 100644 (file)
@@ -2,30 +2,59 @@ import os
 import base64
 from django.db.models import F, Q
 from planetstack.config import Config
 import base64
 from django.db.models import F, Q
 from planetstack.config import Config
-from observer.syncstep import SyncStep
+from ec2_observer.syncstep import SyncStep
 from core.models.image import Image
 from core.models.image import Image
-from awslib import *
+from ec2_observer.awslib import *
 
 
-class SyncImages(OpenStackSyncStep):
+
+class SyncImages(SyncStep):
     provides=[Image]
     requested_interval=3600
 
     provides=[Image]
     requested_interval=3600
 
-    def fetch_pending(self):
+    def fetch_pending(self,deletion):
         images = Image.objects.all()
         image_names = [image.name for image in images]
        
         new_images = []
 
         images = Image.objects.all()
         image_names = [image.name for image in images]
        
         new_images = []
 
-               aws_images = aws_run('ec2 describe-images')
+        try:
+            aws_images = json.loads(open('/opt/planetstack/aws-images').read())
+        except:
+            aws_images = aws_run('ec2 describe-images --owner 099720109477')
+            open('/opt/planetstack/aws-images','w').write(json.dumps(aws_images))
+
+        
+
+        aws_images=aws_images['Images']
+        aws_images=filter(lambda x:x['ImageType']=='machine',aws_images)[:50]
 
 
+        names = set([])
         for aws_image in aws_images:
         for aws_image in aws_images:
-            if aws_image not in image_names:
-                image = Image(image_id=image_id,
-                              name=aws_image['name'],
-                              disk_format='XXX'
-                              container_format='XXX'
-                new_images.append(image)   
+            desc_ok = True
+
+            try:
+                desc = aws_image['Description']
+            except:
+                try:
+                    desc = aws_image['ImageLocation']
+                except:
+                    desc_ok = False
+            
+            if (desc_ok):
+                try:
+                    desc_ok =  desc and desc not in names and desc not in image_names and '14.04' in desc
+                except KeyError:
+                    desc_ok = False
+
+            if desc_ok and aws_image['ImageType']=='machine':
+                image = Image(disk_format=aws_image['ImageLocation'],
+                                name=desc,
+                                container_format=aws_image['Hypervisor'],
+                                path=aws_image['ImageId'])
+                new_images.append(image)
+                names.add(image.name)
+
+        #print new_images
         return new_images
 
     def sync_record(self, image):
         return new_images
 
     def sync_record(self, image):
diff --git a/planetstack/ec2_observer/steps/sync_nodes.py b/planetstack/ec2_observer/steps/sync_nodes.py
new file mode 100644 (file)
index 0000000..b66eb2e
--- /dev/null
@@ -0,0 +1,48 @@
+import os
+import base64
+import random
+import time
+from datetime import datetime 
+from django.db.models import F, Q
+from planetstack.config import Config
+from ec2_observer.syncstep import SyncStep
+from core.models.node import Node
+from core.models.site import *
+from ec2_observer.awslib import *
+import pdb
+
+class SyncNodes(SyncStep):
+       provides=[Node]
+       requested_interval=0
+
+       def fetch_pending(self, deletion):
+               deployment = Deployment.objects.filter(Q(name="Amazon EC2"))[0]
+               current_site_deployments = SiteDeployments.objects.filter(Q(deployment=deployment))
+
+               zone_ret = aws_run('ec2 describe-availability-zones')
+               zones = zone_ret['AvailabilityZones']
+
+               # collect local nodes
+               instance_types = 'm1.small | m1.medium | m1.large | m1.xlarge | m3.medium | m3.large | m3.xlarge | m3.2xlarge'.split(' | ')
+
+               all_new_nodes = []
+               for sd in current_site_deployments:
+                       s = sd.site
+                       current_fqns = [n.name for n in s.nodes.all()]
+                       all_fqns = ['.'.join([n,s.name]) for n in instance_types]
+                       new_node_names = list(set(all_fqns) - set(current_fqns))
+
+                       new_nodes = []
+                       for node_name in new_node_names:
+                               node = Node(name=node_name,
+                                                       site=s,deployment=deployment)
+                               new_nodes.append(node)
+
+                       all_new_nodes.extend(new_nodes)
+
+               return all_new_nodes
+                                
+
+       def sync_record(self, node):
+               node.save()
+                 
index a996c85..30fdf97 100644 (file)
@@ -2,27 +2,46 @@ import os
 import base64
 from django.db.models import F, Q
 from planetstack.config import Config
 import base64
 from django.db.models import F, Q
 from planetstack.config import Config
-from observer.openstacksyncstep import OpenStackSyncStep
+from ec2_observer.syncstep import SyncStep
 from core.models.site import *
 from core.models.site import *
+from ec2_observer.awslib import *
+import pdb
 
 
-class SyncSiteDeployments(OpenStackSyncStep):
-    requested_interval=0
-    provides=[Site, SiteDeployments]
-
-    def fetch_pending(self):
-        return SiteDeployments.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
-
-    def sync_record(self, site_deployment):
-        if not site_deployment.tenant_id:
-            driver = self.driver.admin_driver(deployment=site_deployment.deployment.name)
-            tenant = driver.create_tenant(tenant_name=site_deployment.site.login_base,
-                                               description=site_deployment.site.name,
-                                               enabled=site_deployment.site.enabled)
-            site_deployment.tenant_id = tenant.id
-            site_deployment.save()
-        elif site_deployment.site.id and site_deployment.tenant_id:
-            driver = self.driver.admin_driver(deployment=site_deployment.name)
-            driver.update_tenant(site_deployment.tenant_id,
-                                 description=site_deployment.site.name,
-                                 enabled=site_deployment.site.enabled)
-            
+class SyncSiteDeployments(SyncStep):
+       requested_interval=86400
+       provides=[SiteDeployments]
+
+       def fetch_pending(self, deletion):
+               zone_ret = aws_run('ec2 describe-availability-zones')
+               zones = zone_ret['AvailabilityZones']
+               available_sites = [zone['ZoneName'] for zone in zones]
+
+               current_sites = []
+               for s in available_sites:
+                       site = Site.objects.filter(Q(name=s))
+                       if (site):
+                               current_sites.append(site[0])
+
+               # OK not to catch exception
+               # The syncstep should catch it
+               # At any rate, we should not run if there are no deployments
+               deployment = Deployment.objects.filter(Q(name="Amazon EC2"))[0]
+               current_site_deployments = SiteDeployments.objects.filter(Q(deployment=deployment))
+               site_dict = {}
+
+               for sd in current_site_deployments:
+                       site_dict[sd.site]=sd
+
+               updated_site_deployments = []
+               for site in current_sites:
+                       try:
+                               site_record = site_dict[site]
+                       except KeyError:
+                               sd = SiteDeployments(site=site,deployment=deployment,tenant_id=base64.urlsafe_b64encode(os.urandom(12)))
+                               updated_site_deployments.append(sd)
+
+               return updated_site_deployments
+
+
+       def sync_record(self, site_deployment):
+               site_deployment.save()
index 5771aef..7a5c0dc 100644 (file)
@@ -2,20 +2,27 @@ import os
 import base64
 from django.db.models import F, Q
 from planetstack.config import Config
 import base64
 from django.db.models import F, Q
 from planetstack.config import Config
-from observer.syncstep import SyncStep
-from core.models.site import Site
+from ec2_observer.syncstep import SyncStep
+from core.models.site import *
 from ec2_observer.awslib import *
 from ec2_observer.awslib import *
+import pdb
 
 class SyncSites(SyncStep):
 
 class SyncSites(SyncStep):
-    provides=[Site]
-    requested_interval=3600
+       provides=[Site]
+       requested_interval=3600
+
+       def fetch_pending(self, deletion):
+
+               deployment = Deployment.objects.filter(Q(name="Amazon EC2"))[0]
+               current_site_deployments = SiteDeployments.objects.filter(Q(deployment=deployment))
+
+               zone_ret = aws_run('ec2 describe-availability-zones')
+               zones = zone_ret['AvailabilityZones']
 
 
-    def fetch_pending(self):
-               current_sites = Site.objects.all()
-               zones = aws_run('ec2 describe-availability-zones')
                available_sites = [zone['ZoneName'] for zone in zones]
                available_sites = [zone['ZoneName'] for zone in zones]
+               site_names = [sd.site.name for sd in current_site_deployments]
 
 
-               new_site_names = list(set(available_sites) - set(zones))
+               new_site_names = list(set(available_sites) - set(site_names))
 
                new_sites = []
                for s in new_site_names:
 
                new_sites = []
                for s in new_site_names:
@@ -27,8 +34,8 @@ class SyncSites(SyncStep):
                                                abbreviated_name=s)
                        new_sites.append(site)
                
                                                abbreviated_name=s)
                        new_sites.append(site)
                
-        return new_sites
+               return new_sites
 
 
-    def sync_record(self, site):
-        site.save()
+       def sync_record(self, site):
+               site.save()
 
 
index b576bbc..2079859 100644 (file)
 import os
 import os
+import json
 import base64
 from django.db.models import F, Q
 from planetstack.config import Config
 import base64
 from django.db.models import F, Q
 from planetstack.config import Config
-from observer.openstacksyncstep import OpenStackSyncStep
+from ec2_observer.syncstep import SyncStep
 from core.models.sliver import Sliver
 from core.models.slice import SlicePrivilege, SliceDeployments
 from core.models.network import Network, NetworkSlice, NetworkDeployments
 from util.logger import Logger, logging
 from core.models.sliver import Sliver
 from core.models.slice import SlicePrivilege, SliceDeployments
 from core.models.network import Network, NetworkSlice, NetworkDeployments
 from util.logger import Logger, logging
+from ec2_observer.awslib import *
+from core.models.site import *
+from core.models.slice import *
+import pdb
 
 logger = Logger(level=logging.INFO)
 
 
 logger = Logger(level=logging.INFO)
 
-class SyncSlivers(OpenStackSyncStep):
-    provides=[Sliver]
-    requested_interval=0
+class SyncSlivers(SyncStep):
+       provides=[Sliver]
+       requested_interval=0
 
 
-    def fetch_pending(self):
-        return Sliver.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+       def fetch_pending(self, deletion):
+               all_slivers = Sliver.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+               my_slivers = [] 
 
 
-    def sync_record(self, sliver):
-        logger.info("sync'ing sliver:%s deployment:%s " % (sliver, sliver.node.deployment))
-        metadata_update = {}
-        if ("numberCores" in sliver.changed_fields):
-            metadata_update["cpu_cores"] = str(sliver.numberCores)
+               for sliver in all_slivers:
+                       sd = SliceDeployments.objects.filter(Q(slice=sliver.slice))
+                       if (sd):
+                               if (sd.deployment.name=='Amazon EC2'):
+                                       my_slivers.append(sliver)
+                       if (sliver.node.deployment.name=='Amazon EC2'):
+                               my_slivers.append(sliver)
+               return my_slivers
 
 
-        for tag in sliver.slice.tags.all():
-            if tag.name.startswith("sysctl-"):
-                metadata_update[tag.name] = tag.value
+       def sync_record(self, sliver):
+               logger.info("sync'ing sliver:%s deployment:%s " % (sliver, sliver.node.deployment))
 
 
-        if not sliver.instance_id:
-            driver = self.driver.client_driver(caller=sliver.creator, tenant=sliver.slice.name, deployment=sliver.deploymentNetwork.name)
-            # public keys
-            slice_memberships = SlicePrivilege.objects.filter(slice=sliver.slice)
-            pubkeys = [sm.user.public_key for sm in slice_memberships if sm.user.public_key]
-            if sliver.creator.public_key:
-                pubkeys.append(sliver.creator.public_key)
-            if sliver.slice.creator.public_key:
-                pubkeys.append(sliver.slice.creator.public_key) 
-            # netowrks
-            # include all networks available to the slice and/or associated network templates
-            nics = []
-            networks = [ns.network for ns in NetworkSlice.objects.filter(slice=sliver.slice)]   
-            network_deployments = NetworkDeployments.objects.filter(network__in=networks, 
-                                                                    deployment=sliver.node.deployment)
-            # Gather private networks first. This includes networks with a template that has
-            # visibility = private and translation = none
-            for network_deployment in network_deployments:
-                if network_deployment.network.template.visibility == 'private' and \
-                   network_deployment.network.template.translation == 'none': 
-                    nics.append({'net-id': network_deployment.net_id})
-    
-            # now include network template
-            network_templates = [network.template.sharedNetworkName for network in networks \
-                                 if network.template.sharedNetworkName]
-            for net in driver.shell.quantum.list_networks()['networks']:
-                if net['name'] in network_templates: 
-                    nics.append({'net-id': net['id']}) 
+               if not sliver.instance_id:
+                       # public keys
+                       slice_memberships = SlicePrivilege.objects.filter(slice=sliver.slice)
+                       pubkeys = [sm.user.public_key for sm in slice_memberships if sm.user.public_key]
 
 
-            file("/tmp/scott-manager","a").write("slice: %s\nreq: %s\n" % (str(sliver.slice.name), str(nics)))
-         
-            # look up image id
-            deployment_driver = self.driver.admin_driver(deployment=sliver.deploymentNetwork.name)
-            image_id = None
-            images = deployment_driver.shell.glance.get_images()
-            for image in images:
-                if image['name'] == sliver.image.name:
-                    image_id = image['id']
-                    
-            # look up key name at the deployment
-            # create/fetch keypair
-            keyname = None
-            if sliver.creator.public_key:
-                keyname = sliver.creator.email.lower().replace('@', 'AT').replace('.', '') +\
-                          sliver.slice.name
-                key_fields =  {'name': keyname,
-                               'public_key': sliver.creator.public_key}
-                driver.create_keypair(**key_fields)       
-            instance = driver.spawn_instance(name=sliver.name,
-                                key_name = keyname,
-                                image_id = image_id,
-                                hostname = sliver.node.name,
-                                pubkeys = pubkeys,
-                                nics = nics )
-            sliver.instance_id = instance.id
-            sliver.instance_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name')
-            sliver.save()    
+                       if sliver.creator.public_key:
+                               pubkeys.append(sliver.creator.public_key)
 
 
-        if sliver.instance_id and metadata_update:
-            driver.update_instance_metadata(sliver.instance_id, metadata_update)
+                       if sliver.slice.creator.public_key:
+                               pubkeys.append(sliver.slice.creator.public_key) 
+
+                       # netowrks
+                       # include all networks available to the slice and/or associated network templates
+                       #nics = []
+                       #networks = [ns.network for ns in NetworkSlice.objects.filter(slice=sliver.slice)]      
+                       #network_deployments = NetworkDeployments.objects.filter(network__in=networks, 
+                                                                                                                                       #deployment=sliver.node.deployment)
+                       # Gather private networks first. This includes networks with a template that has
+                       # visibility = private and translation = none
+                       #for network_deployment in network_deployments:
+                       #       if network_deployment.network.template.visibility == 'private' and \
+                       #          network_deployment.network.template.translation == 'none': 
+                       #               nics.append({'net-id': network_deployment.net_id})
+       
+                       # now include network template
+                       #network_templates = [network.template.sharedNetworkName for network in networks \
+                       #                                        if network.template.sharedNetworkName]
+                       #for net in driver.shell.quantum.list_networks()['networks']:
+                       #       if net['name'] in network_templates: 
+                       #               nics.append({'net-id': net['id']}) 
+                       # look up image id
+
+                       instance_type = sliver.node.name.rsplit('.',1)[0]
+
+                       # Bail out of we don't have a key
+                       key_name = sliver.creator.email.lower().replace('@', 'AT').replace('.', '')
+                       key_sig = aws_run('ec2 describe-key-pairs')
+                       ec2_keys = key_sig['KeyPairs']
+                       key_found = False
+                       for key in ec2_keys:
+                               if (key['KeyName']==key_name):
+                                       key_found = True
+                                       break
+
+                       if (not key_found):
+                               # set backend_status
+                               raise Exception('Will not sync sliver without key')
+
+                       image_id = sliver.image.path
+                       instance_sig = aws_run('ec2 run-instances --image-id %s --instance-type %s --count 1 --key-name %s --placement AvailabilityZone=%s'%(image_id,instance_type,key_name,sliver.node.site.name))
+                       sliver.instance_id = instance_sig['Instances'][0]['InstanceId']
+                       sliver.save()
+                       state = instance_sig['Instances'][0]['State']['Code']
+                       if (state==16):
+                               sliver.ip = instance_sig['Instances'][0]['PublicIpAddress']
+                               sliver.save()
+                       else:
+                               # This status message should go into backend_status
+                               raise Exception('Waiting for instance to start')
+               else:
+                       ret = aws_run('ec2 describe-instances --instance-ids %s'%sliver.instance_id)
+                       state = ret['Reservations'][0]['Instances'][0]['State']['Code']
+                       if (state==16):
+                               sliver.ip = ret['Reservations'][0]['Instances'][0]['PublicIpAddress']
+                               sliver.save()
 
 
index 71f9c0f..d742186 100644 (file)
@@ -1,20 +1,43 @@
 import os
 import base64
 import os
 import base64
-import hashlib
+import random
+import time
+from datetime import datetime 
 from django.db.models import F, Q
 from planetstack.config import Config
 from django.db.models import F, Q
 from planetstack.config import Config
-from observer.openstacksyncstep import OpenStackSyncStep
-from core.models.user import User, UserDeployments
+from ec2_observer.syncstep import SyncStep
+from core.models.user import User
+from core.models.site import *
+from ec2_observer.awslib import *
+import pdb
 
 
-class SyncUsers(OpenStackSyncStep):
-    provides=[User]
-    requested_interval=0
+class SyncUsers(SyncStep):
+       provides=[User]
+       requested_interval=0
 
 
-    def fetch_pending(self):
-        return User.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+       def fetch_pending(self, deletion):
+               users = User.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+               if (users):
+                       key_sig = aws_run('ec2 describe-key-pairs')
+                       ec2_keys = key_sig['KeyPairs']
+               else:
+                       ec2_keys = []
 
 
-    def sync_record(self, user):
-        for user_deployment in UserDeployments.objects.filter(user=user):
-            # bump the 'updated' field so user account are updated across 
-            # deployments.
-            user_deployment.save()
+               for user in users:
+                       if (user.public_key): 
+                               key_name = user.email.lower().replace('@', 'AT').replace('.', '')
+                               key_found = False
+
+                               for key in ec2_keys:
+                                       if (key['KeyName']==key_name):
+                                               key_found = True
+                                               break
+
+                               if (not key_found):
+                                       aws_run('ec2 import-key-pair --key-name %s --public-key-material "%s"'%(key_name, user.public_key))
+                                       
+               return users
+
+       def sync_record(self, node):
+               node.save()
+