changes from alpha site
authorScott Baker <smbaker@gmail.com>
Mon, 12 May 2014 17:40:25 +0000 (10:40 -0700)
committerScott Baker <smbaker@gmail.com>
Mon, 12 May 2014 17:40:25 +0000 (10:40 -0700)
planetstack/observer/sync_image_deployments.py [new file with mode: 0644]
planetstack/observer/sync_roles.py [new file with mode: 0644]
planetstack/observer/sync_slice_deployments.py [new file with mode: 0644]
planetstack/observer/sync_slice_memberships.py [new file with mode: 0644]
planetstack/observer/sync_slivers.py [new file with mode: 0644]
planetstack/observer/syncstep.py

diff --git a/planetstack/observer/sync_image_deployments.py b/planetstack/observer/sync_image_deployments.py
new file mode 100644 (file)
index 0000000..3522eca
--- /dev/null
@@ -0,0 +1,60 @@
+import os
+import base64
+from collections import defaultdict
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.deployment import Deployment
+from core.models.image import Image, ImageDeployments
+
+class SyncImageDeployments(OpenStackSyncStep):
+    provides=[ImageDeployments]
+    requested_interval=0
+
+    def fetch_pending(self):
+        # ensure images are available across all deployments
+        image_deployments = ImageDeployments.objects.all()
+        image_deploy_lookup = defaultdict(list)
+        for image_deployment in image_deployments:
+            image_deploy_lookup[image_deployment.image].append(image_deployment.deployment)
+        
+        all_deployments = Deployment.objects.all() 
+        for image in Image.objects.all():
+            expected_deployments = all_deployments
+            for expected_deployment in expected_deployments:
+                if image not in image_deploy_lookup or \
+                  expected_deployment not in image_deploy_lookup[image]:
+                    id = ImageDeployments(image=image, deployment=expected_deployment)
+                    id.save()
+            
+        # now we return all images that need to be enacted
+        return ImageDeployments.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None)) 
+                      
+    def sync_record(self, image_deployment):
+        driver = self.driver.admin_driver(deployment=image_deployment.deployment.name)
+        images = driver.shell.glance.get_images()
+        glance_image = None
+        for image in images:
+            if image['name'] == image_deployment.image.name:
+                glance_image = image
+                break
+        if glance_image:
+            image_deployment.glance_image_id = glance_image['id']
+        elif image_deployment.image.path:
+            image = {
+                'name': image_deployment.image.name,
+                'is_public': True,
+                'disk_format': 'raw', 
+                'container_format': 'bare',
+                'file': image_deployment.image.path, 
+            }  
+            glance_image = driver.shell.glanceclient.images.create(name=image_deployment.image.name,
+                                                                   is_public=True,
+                                                                   disk_format='raw',
+                                                                   container_format='bare')
+            glance_image.update(data=open(image_deployment.image.path, 'rb'))
+            if not glance_image or not glance_image.get('id'): 
+                raise Exception, "Add image failed at deployment %s" % image_deployment.deployment.name
+            image_deployment.glance_image_id = glance_image['id']
+        image_deployment.save()
diff --git a/planetstack/observer/sync_roles.py b/planetstack/observer/sync_roles.py
new file mode 100644 (file)
index 0000000..ca85d57
--- /dev/null
@@ -0,0 +1,38 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.role import Role
+from core.models.site import SiteRole
+from core.models.slice import SliceRole
+from core.models.deployment import Deployment, DeploymentRole
+
+class SyncRoles(OpenStackSyncStep):
+    provides=[Role]
+    requested_interval=0
+
+    def fetch_pending(self):
+        site_roles = SiteRole.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+        slice_roles = SliceRole.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+        deployment_roles = DeploymentRole.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+        roles = []
+        for site_role in site_roles:
+            roles.append(site_role)
+        for slice_role in slice_roles:
+            roles.append(slice_role)
+        for deployment_role in deployment_roles:
+            roles.append(deployment_role)
+
+        return roles
+
+
+    def sync_record(self, role):
+        if not role.enacted:
+            deployments = Deployment.objects.all()
+                   for deployment in deployments:
+                driver = self.driver.admin_driver(deployment=deployment.name)
+                driver.create_role(role.role)
+            role.save()
+    
diff --git a/planetstack/observer/sync_slice_deployments.py b/planetstack/observer/sync_slice_deployments.py
new file mode 100644 (file)
index 0000000..b40eb6b
--- /dev/null
@@ -0,0 +1,106 @@
+import os
+import base64
+from collections import defaultdict
+from netaddr import IPAddress, IPNetwork
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.deployment import Deployment
+from core.models.site import SiteDeployments
+from core.models.slice import Slice, SliceDeployments
+from core.models.user import UserDeployments
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncSliceDeployments(OpenStackSyncStep):
+    provides=[SliceDeployments]
+    requested_interval=0
+
+    def fetch_pending(self):
+        # slice deployments are not visible to users. We must ensure
+        # slices are deployed at all deploymets available to their site.
+        site_deployments = SiteDeployments.objects.all()
+        site_deploy_lookup = defaultdict(list)
+        for site_deployment in site_deployments:
+            site_deploy_lookup[site_deployment.site].append(site_deployment.deployment)
+        
+        slice_deployments = SliceDeployments.objects.all()
+        slice_deploy_lookup = defaultdict(list)
+        for slice_deployment in slice_deployments:
+            slice_deploy_lookup[slice_deployment.slice].append(slice_deployment.deployment)
+        
+        all_deployments = Deployment.objects.all() 
+        for slice in Slice.objects.all():
+            # slices are added to all deployments for now
+            expected_deployments = all_deployments
+            #expected_deployments = site_deploy_lookup[slice.site]
+            for expected_deployment in expected_deployments:
+                if slice not in slice_deploy_lookup or \
+                   expected_deployment not in slice_deploy_lookup[slice]:
+                    sd = SliceDeployments(slice=slice, deployment=expected_deployment)
+                    sd.save()
+
+        # now we can return all slice deployments that need to be enacted   
+        return SliceDeployments.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+    def get_next_subnet(self, deployment=None):
+        # limit ourself to 10.0.x.x for now
+        valid_subnet = lambda net: net.startswith('10.0')
+        driver = self.driver.admin_driver(deployment=deployment)
+        subnets = driver.shell.quantum.list_subnets()['subnets']
+        ints = [int(IPNetwork(subnet['cidr']).ip) for subnet in subnets \
+                if valid_subnet(subnet['cidr'])]
+        ints.sort()
+        if ints:
+            last_ip = IPAddress(ints[-1])
+        else:
+            last_ip = IPAddress('10.0.0.1')
+        last_ip = IPAddress(ints[-1])
+        last_network = IPNetwork(str(last_ip) + "/24")
+        next_network = IPNetwork(str(IPAddress(last_network) + last_network.size) + "/24")
+        return next_network
+
+
+    def sync_record(self, slice_deployment):
+        logger.info("sync'ing slice deployment %s" % slice_deployment)
+        if not slice_deployment.tenant_id:
+            nova_fields = {'tenant_name': slice_deployment.slice.name,
+                   'description': slice_deployment.slice.description,
+                   'enabled': slice_deployment.slice.enabled}
+            driver = self.driver.admin_driver(deployment=slice_deployment.deployment.name)
+            tenant = driver.create_tenant(**nova_fields)
+            slice_deployment.tenant_id = tenant.id
+
+            # XXX give caller an admin role at the tenant they've created
+            deployment_users = UserDeployments.objects.filter(user=slice_deployment.slice.creator,
+                                                             deployment=slice_deployment.deployment)            
+            if not deployment_users:
+                logger.info("slice createor %s has not accout at deployment %s" % (slice_deployment.slice.creator, slice_deployment.deployment.name))
+            else:
+                deployment_user = deployment_users[0]
+                # lookup user id at this deployment
+                kuser= driver.shell.keystone.users.find(email=slice_deployment.slice.creator.email)
+
+                # add required roles at the slice's tenant 
+                driver.add_user_role(kuser.id, tenant.id, 'admin')
+                    
+                # refresh credentials using this tenant
+                client_driver = self.driver.client_driver(caller=deployment_user.user,
+                                                          tenant=tenant.name, 
+                                                          deployment=slice_deployment.deployment.name)
+
+
+        if slice_deployment.id and slice_deployment.tenant_id:
+            # update existing tenant
+            driver = self.driver.admin_driver(deployment=slice_deployment.deployment.name)
+            driver.update_tenant(slice_deployment.tenant_id,
+                                 description=slice_deployment.slice.description,
+                                 enabled=slice_deployment.slice.enabled)  
+
+        if slice_deployment.tenant_id:
+            # update slice/tenant quota
+            driver = self.driver.client_driver(deployment=slice_deployment.deployment.name, tenant=slice_deployment.slice.name)
+            driver.shell.nova.quotas.update(tenant_id=slice_deployment.tenant_id, instances=int(slice_deployment.slice.max_slivers)) 
+
+        slice_deployment.save()
diff --git a/planetstack/observer/sync_slice_memberships.py b/planetstack/observer/sync_slice_memberships.py
new file mode 100644 (file)
index 0000000..38bd26c
--- /dev/null
@@ -0,0 +1,33 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.slice import *
+from core.models.user import UserDeployments
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncSliceMemberships(OpenStackSyncStep):
+    requested_interval=0
+    provides=[SlicePrivilege]
+
+    def fetch_pending(self):
+        return SlicePrivilege.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+    def sync_record(self, slice_memb):
+        # sync slice memberships at all slice deployments 
+        logger.info("syncing slice privilege: %s %s" % (slice_memb.slice.name, slice_memb.user.email))
+        slice_deployments = SliceDeployments.objects.filter(slice=slice_memb.slice)
+        for slice_deployment in slice_deployments:
+            if not slice_deployment.tenant_id:
+                continue
+            user_deployments = UserDeployments.objects.filter(deployment=slice_deployment.deployment,
+                                                              user=slice_memb.user)
+            if user_deployments:
+                kuser_id  = user_deployments[0].kuser_id
+                driver = self.driver.admin_driver(deployment=slice_deployment.deployment.name)
+                driver.add_user_role(kuser_id,
+                                     slice_deployment.tenant_id,
+                                     slice_memb.role.role)
diff --git a/planetstack/observer/sync_slivers.py b/planetstack/observer/sync_slivers.py
new file mode 100644 (file)
index 0000000..a794ccf
--- /dev/null
@@ -0,0 +1,92 @@
+import os
+import base64
+from django.db.models import F, Q
+from planetstack.config import Config
+from observer.openstacksyncstep import OpenStackSyncStep
+from core.models.sliver import Sliver
+from core.models.slice import Slice, SlicePrivilege, SliceDeployments
+from core.models.network import Network, NetworkSlice, NetworkDeployments
+from util.logger import Logger, logging
+
+logger = Logger(level=logging.INFO)
+
+class SyncSlivers(OpenStackSyncStep):
+    provides=[Sliver]
+    requested_interval=0
+
+    def fetch_pending(self):
+        return Sliver.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
+
+    def sync_record(self, sliver):
+        logger.info("sync'ing sliver:%s deployment:%s " % (sliver, sliver.node.deployment))
+        metadata_update = {}
+        if ("numberCores" in sliver.changed_fields):
+            metadata_update["cpu_cores"] = str(sliver.numberCores)
+
+        for tag in sliver.slice.tags.all():
+            if tag.name.startswith("sysctl-"):
+                metadata_update[tag.name] = tag.value
+
+        if not sliver.instance_id:
+            driver = self.driver.client_driver(caller=sliver.creator, tenant=sliver.slice.name, deployment=sliver.deploymentNetwork.name)
+            # public keys
+            slice_memberships = SlicePrivilege.objects.filter(slice=sliver.slice)
+            pubkeys = [sm.user.public_key for sm in slice_memberships if sm.user.public_key]
+            if sliver.creator.public_key:
+                pubkeys.append(sliver.creator.public_key)
+            if sliver.slice.creator.public_key:
+                pubkeys.append(sliver.slice.creator.public_key) 
+            # netowrks
+            # include all networks available to the slice and/or associated network templates
+            nics = []
+            networks = [ns.network for ns in NetworkSlice.objects.filter(slice=sliver.slice)]   
+            network_deployments = NetworkDeployments.objects.filter(network__in=networks, 
+                                                                    deployment=sliver.node.deployment)
+            # Gather private networks first. This includes networks with a template that has
+            # visibility = private and translation = none
+            for network_deployment in network_deployments:
+                if network_deployment.network.template.visibility == 'private' and \
+                   network_deployment.network.template.translation == 'none': 
+                    nics.append({'net-id': network_deployment.net_id})
+    
+            # now include network template
+            network_templates = [network.template.sharedNetworkName for network in networks \
+                                 if network.template.sharedNetworkName]
+            #logger.info("%s %s %s %s" % (driver.shell.quantum.username, driver.shell.quantum.password, driver.shell.quantum.tenant, driver.shell.quantum.url))
+            for net in driver.shell.quantum.list_networks()['networks']:
+                if net['name'] in network_templates: 
+                    nics.append({'net-id': net['id']}) 
+
+            file("/tmp/scott-manager","a").write("slice: %s\nreq: %s\n" % (str(sliver.slice.name), str(nics)))
+         
+            # look up image id
+            deployment_driver = self.driver.admin_driver(deployment=sliver.deploymentNetwork.name)
+            image_id = None
+            images = deployment_driver.shell.glance.get_images()
+            for image in images:
+                if image['name'] == sliver.image.name:
+                    image_id = image['id']
+                    
+            # look up key name at the deployment
+            # create/fetch keypair
+            keyname = None
+            if sliver.creator.public_key:
+                keyname = sliver.creator.email.lower().replace('@', 'AT').replace('.', '') +\
+                          sliver.slice.name
+                key_fields =  {'name': keyname,
+                               'public_key': sliver.creator.public_key}
+                driver.create_keypair(**key_fields)       
+            instance = driver.spawn_instance(name=sliver.name,
+                                key_name = keyname,
+                                image_id = image_id,
+                                hostname = sliver.node.name,
+                                pubkeys = pubkeys,
+                                nics = nics )
+            sliver.instance_id = instance.id
+            sliver.instance_name = getattr(instance, 'OS-EXT-SRV-ATTR:instance_name')
+            sliver.save()    
+
+        if sliver.instance_id and metadata_update:
+            driver.update_instance_metadata(sliver.instance_id, metadata_update)
+
index fb8c77a..c41628a 100644 (file)
@@ -46,7 +46,8 @@ class SyncStep:
     
     def check_dependencies(self, obj, failed):
         for dep in self.dependencies:
-            peer_object = getattr(obj, dep.lower())
+            peer_name = dep[0].lower() + dep[1:]    # django names are camelCased with the first letter lower
+            peer_object = getattr(obj, peer_name)
             if (peer_object.pk==failed.pk):
                 raise FailedDependency
 
@@ -60,7 +61,7 @@ class SyncStep:
                 o.enacted = datetime.now() # Is this the same timezone? XXX
                 o.save(update_fields=['enacted'])
             except:
-                logger.log_exc("sync step failed!")
+                logger.log_exc("sync step %s failed!" % self.__name__)
                 failed.append(o)
 
         return failed