Merge branch 'master' of ssh://git.planet-lab.org/git/plstackapi into observer3.0
authorSapan Bhatia <gwsapan@gmail.com>
Wed, 16 Jul 2014 04:17:33 +0000 (00:17 -0400)
committerSapan Bhatia <gwsapan@gmail.com>
Wed, 16 Jul 2014 04:17:33 +0000 (00:17 -0400)
Conflicts:
planetstack/core/fixtures/demo_data.json
planetstack/core/fixtures/initial_data.json
planetstack/core/models/plcorebase.py
planetstack/observer/event_loop.py
planetstack/observer/event_manager.py
planetstack/observer/steps/garbage_collector.py
planetstack/observer/syncstep.py

1  2 
planetstack/core/models/plcorebase.py
planetstack/core/models/user.py
planetstack/observer/event_loop.py
planetstack/observer/event_manager.py
planetstack/observer/syncstep.py

@@@ -1,4 -1,6 +1,6 @@@
+ import datetime
  import os
+ import sys
  from django.db import models
  from django.forms.models import model_to_dict
  from django.core.urlresolvers import reverse
@@@ -8,32 -10,24 +10,37 @@@ try
      # This is a no-op if observer_disabled is set to 1 in the config file
      from observer import *
  except:
-     print "import of observer failed! printing traceback and disabling observer:"
+     print >> sys.stderr, "import of observer failed! printing traceback and disabling observer:"
      import traceback
      traceback.print_exc()
  
      # guard against something failing
-     def notify_observer():
+     def notify_observer(*args, **kwargs):
          pass
  
 -class PlCoreBase(models.Model):
 +# This manager will be inherited by all subclasses because
 +# the core model is abstract.
 +class PlCoreBaseManager(models.Manager):
 +    def get_query_set(self):
 +        return super(PlCoreBaseManager, self).get_query_set().filter(deleted=False)
  
 +class PlCoreBase(models.Model):
 +    objects = PlCoreBaseManager()
 +    created = models.DateTimeField(auto_now_add=True)
 +    updated = models.DateTimeField(auto_now=True)
+     # default values for created and updated are only there to keep evolution
+     # from failing.
+     created = models.DateTimeField(auto_now_add=True, default=datetime.datetime.now())
+     updated = models.DateTimeField(auto_now=True, default=datetime.datetime.now())
      enacted = models.DateTimeField(null=True, default=None)
 +    backend_status = models.CharField(max_length=140,
 +                                      default="Provisioning in progress")
 +    deleted = models.BooleanField(default=False)
  
      class Meta:
 +        # Changing abstract to False would require the managers of subclasses of
 +        # PlCoreBase to be customized individually.
          abstract = True
          app_label = "core"
  
  
      def delete(self, *args, **kwds):
          # so we have something to give the observer
 -        pk = self.pk
 -        model_dict = model_to_dict(self)
 -        for (k,v) in model_dict.items():
 -            # things like datetime are not JSON serializable
 -            model_dict[k] = str(v)
 +        purge = kwds.get('purge',True)
 +        if (observer_disabled or purge):
 +            super(PlCoreBase, self).delete(*args, **kwargs)
 +        else:
 +            self.deleted = True
 +            self.enacted=None
 +            self.save(update_fields=['enacted','deleted'])
  
 -        super(PlCoreBase, self).delete(*args, **kwds)
 -
 -        # This is a no-op if observer_disabled is set
 -        notify_observer(model=self, delete=True, pk=pk, model_dict=model_dict)
  
      def save(self, *args, **kwargs):
          super(PlCoreBase, self).save(*args, **kwargs)
 -        
 +
          # This is a no-op if observer_disabled is set
          notify_observer()
  
@@@ -3,16 -3,14 +3,17 @@@ import datetim
  from collections import defaultdict
  from django.db import models
  from django.db.models import F, Q
- from core.models import PlCoreBase,Site
- from core.models.deployment import Deployment
+ from core.models import PlCoreBase,Site, DashboardView
+ from core.models.site import Deployment
  from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
  from timezones.fields import TimeZoneField
+ from operator import itemgetter, attrgetter
  
  # Create your models here.
  class UserManager(BaseUserManager):
 +    def get_query_set(self):
 +        return super(UserManager, self).get_query_set().filter(deleted=False)
 +
      def create_user(self, email, firstname, lastname, password=None):
          """
          Creates and saves a User with the given email, date of
@@@ -77,12 -75,11 +78,14 @@@ class User(AbstractBaseUser)
      created = models.DateTimeField(auto_now_add=True)
      updated = models.DateTimeField(auto_now=True)
      enacted = models.DateTimeField(null=True, default=None)
 +    backend_status = models.CharField(max_length=140,
 +                                      default="Provisioning in progress")
 +    deleted = models.BooleanField(default=False)
  
      timezone = TimeZoneField()
  
+     dashboards = models.ManyToManyField('DashboardView', through='UserDashboardView', blank=True)
      objects = UserManager()
  
      USERNAME_FIELD = 'email'
      def is_superuser(self):
          return False
  
+     def get_dashboards(self):
+         DEFAULT_DASHBOARDS=["Tenant"]
+         dashboards = sorted(list(self.dashboardViews.all()), key=attrgetter('order'))
+         dashboards = [x.dashboardView for x in dashboards]
+         if not dashboards:
+             for dashboardName in DEFAULT_DASHBOARDS:
+                 dbv = DashboardView.objects.filter(name=dashboardName)
+                 if dbv:
+                     dashboards.append(dbv[0])
+         return dashboards
  #    def get_roles(self):
  #        from core.models.site import SitePrivilege
  #        from core.models.slice import SliceMembership
              qs = User.objects.filter(Q(site__in=sites) | Q(id__in=user_ids))
          return qs            
  
-              
-     
- class UserDeployments(PlCoreBase):
-     user = models.ForeignKey(User)
-     deployment = models.ForeignKey(Deployment)
-     kuser_id = models.CharField(null=True, blank=True, max_length=200, help_text="Keystone user id")
-     def __unicode__(self):  return u'%s %s' % (self.user, self.deployment.name)
-     @staticmethod
-     def select_by_user(user):
-         if user.is_admin:
-             qs = UserDeployments.objects.all()
-         else:
-             users = Users.select_by_user(user)
-             qs = Usereployments.objects.filter(user__in=slices)
-         return qs 
+ class UserDashboardView(PlCoreBase):
+      user = models.ForeignKey(User, related_name="dashboardViews")
+      dashboardView = models.ForeignKey(DashboardView, related_name="dashboardViews")
+      order = models.IntegerField(default=0)
@@@ -16,10 -16,9 +16,10 @@@ from openstack.driver import OpenStackD
  from util.logger import Logger, logging, logger
  #from timeout import timeout
  from planetstack.config import Config
- from observer.steps import *
#from observer.steps import *
  from syncstep import SyncStep
  from toposort import toposort
 +from observer.error_mapper import error_mapper
  
  debug_mode = False
  
@@@ -33,278 -32,264 +33,278 @@@ class NoOpDriver
           self.enabled = True
  
  class PlanetStackObserver:
 -    #sync_steps = [SyncNetworks,SyncNetworkSlivers,SyncSites,SyncSitePrivileges,SyncSlices,SyncSliceMemberships,SyncSlivers,SyncSliverIps,SyncExternalRoutes,SyncUsers,SyncRoles,SyncNodes,SyncImages,GarbageCollector]
 -    sync_steps = []
 -
 -    def __init__(self):
 -        # The Condition object that gets signalled by Feefie events
 -        self.step_lookup = {}
 -        self.load_sync_step_modules()
 -        self.load_sync_steps()
 -        self.event_cond = threading.Condition()
 -
 -
 -        self.driver_kind = getattr(Config(), "observer_driver", "openstack")
 -        if self.driver_kind=="openstack":
 -            self.driver = OpenStackDriver()
 -        else:
 -            self.driver = NoOpDriver()
 -
 -    def wait_for_event(self, timeout):
 -        logger.info('Waiting for event')
 -        self.event_cond.acquire()
 -        self.event_cond.wait(timeout)
 -        self.event_cond.release()
 -
 -    def wake_up(self):
 -        logger.info('Wake up routine called. Event cond %r'%self.event_cond)
 -        self.event_cond.acquire()
 -        self.event_cond.notify()
 -        self.event_cond.release()
 -
 -    def load_sync_step_modules(self, step_dir=None):
 -        if step_dir is None:
 -            if hasattr(Config(), "observer_steps_dir"):
 -                step_dir = Config().observer_steps_dir
 -            else:
 -                step_dir = "/opt/planetstack/observer/steps"
 -
 -        for fn in os.listdir(step_dir):
 -            pathname = os.path.join(step_dir,fn)
 -            if os.path.isfile(pathname) and fn.endswith(".py") and (fn!="__init__.py"):
 -                module = imp.load_source(fn[:-3],pathname)
 -                for classname in dir(module):
 -                    c = getattr(module, classname, None)
 -
 -                    # make sure 'c' is a descendent of SyncStep and has a
 -                    # provides field (this eliminates the abstract base classes
 -                    # since they don't have a provides)
 -
 -                    if inspect.isclass(c) and issubclass(c, SyncStep) and hasattr(c,"provides") and (c not in self.sync_steps):
 -                        self.sync_steps.append(c)
 -        logger.info('loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps]))
 -        # print 'loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps])
 -
 -    def load_sync_steps(self):
 -        dep_path = Config().observer_dependency_graph
 -        logger.info('Loading model dependency graph from %s' % dep_path)
 -        try:
 -            # This contains dependencies between records, not sync steps
 -            self.model_dependency_graph = json.loads(open(dep_path).read())
 -        except Exception,e:
 -            raise e
 -
 -        try:
 -            backend_path = Config().observer_pl_dependency_graph
 -            logger.info('Loading backend dependency graph from %s' % backend_path)
 -            # This contains dependencies between backend records
 -            self.backend_dependency_graph = json.loads(open(backend_path).read())
 -        except Exception,e:
 -            logger.info('Backend dependency graph not loaded')
 -            # We can work without a backend graph
 -            self.backend_dependency_graph = {}
 -
 -        provides_dict = {}
 -        for s in self.sync_steps:
 -            self.step_lookup[s.__name__] = s 
 -            for m in s.provides:
 -                try:
 -                    provides_dict[m.__name__].append(s.__name__)
 -                except KeyError:
 -                    provides_dict[m.__name__]=[s.__name__]
 -
 -                
 -        step_graph = {}
 -        for k,v in self.model_dependency_graph.iteritems():
 -            try:
 -                for source in provides_dict[k]:
 -                    for m in v:
 -                        try:
 -                            for dest in provides_dict[m]:
 -                                # no deps, pass
 -                                try:
 -                                    if (dest not in step_graph[source]):
 -                                        step_graph[source].append(dest)
 -                                except:
 -                                    step_graph[source]=[dest]
 -                        except KeyError:
 -                            pass
 -                    
 -            except KeyError:
 -                pass
 -                # no dependencies, pass
 -        
 -        #import pdb
 -        #pdb.set_trace()
 -        if (self.backend_dependency_graph):
 -            backend_dict = {}
 -            for s in self.sync_steps:
 -                for m in s.serves:
 -                    backend_dict[m]=s.__name__
 -                    
 -            for k,v in backend_dependency_graph.iteritems():
 -                try:
 -                    source = backend_dict[k]
 -                    for m in v:
 -                        try:
 -                            dest = backend_dict[m]
 -                        except KeyError:
 -                            # no deps, pass
 -                            pass
 -                        step_graph[source]=dest
 -                        
 -                except KeyError:
 -                    pass
 -                    # no dependencies, pass
 -
 -        dependency_graph = step_graph
 -
 -        self.ordered_steps = toposort(dependency_graph, map(lambda s:s.__name__,self.sync_steps))
 -        print "Order of steps=",self.ordered_steps
 -        self.load_run_times()
 -        
 -
 -    def check_duration(self, step, duration):
 -        try:
 -            if (duration > step.deadline):
 -                logger.info('Sync step %s missed deadline, took %.2f seconds'%(step.name,duration))
 -        except AttributeError:
 -            # S doesn't have a deadline
 -            pass
 -
 -    def update_run_time(self, step):
 -        self.last_run_times[step.__name__]=time.time()
 -
 -    def check_schedule(self, step):
 -        time_since_last_run = time.time() - self.last_run_times.get(step.__name__, 0)
 -        try:
 -            if (time_since_last_run < step.requested_interval):
 -                raise StepNotReady
 -        except AttributeError:
 -            logger.info('Step %s does not have requested_interval set'%step.__name__)
 -            raise StepNotReady
 -    
 -    def load_run_times(self):
 -        try:
 -            jrun_times = open('/tmp/observer_run_times').read()
 -            self.last_run_times = json.loads(jrun_times)
 -        except:
 -            self.last_run_times={}
 -            for e in self.ordered_steps:
 -                self.last_run_times[e]=0
 -
 -
 -    def save_run_times(self):
 -        run_times = json.dumps(self.last_run_times)
 -        open('/tmp/observer_run_times','w').write(run_times)
 -
 -    def check_class_dependency(self, step, failed_steps):
 -        step.dependenices = []
 -        for obj in step.provides:
 -            step.dependenices.extend(self.model_dependency_graph.get(obj.__name__, []))
 -        for failed_step in failed_steps:
 -            if (failed_step in step.dependencies):
 -                raise StepNotReady
 -
 -
 -    def run_steps(self):
 -        try:
 -            logger.info('Observer run steps')
 -
 -            # Set of whole steps that failed
 -            failed_steps = []
 -
 -            # Set of individual objects within steps that failed
 -            failed_step_objects = set()
 -
 -            for S in self.ordered_steps:
 -                step = self.step_lookup[S]
 -                start_time=time.time()
 -                
 -                sync_step = step(driver=self.driver)
 -                sync_step.__name__ = step.__name__
 -                sync_step.dependencies = []
 -                try:
 -                    mlist = sync_step.provides
 -                    
 -                    for m in mlist:
 -                        sync_step.dependencies.extend(self.model_dependency_graph[m.__name__])
 -                except KeyError:
 -                    pass
 -                sync_step.debug_mode = debug_mode
 -
 -                should_run = False
 -                try:
 -                    # Various checks that decide whether
 -                    # this step runs or not
 -                    self.check_class_dependency(sync_step, failed_steps) # dont run Slices if Sites failed
 -                    self.check_schedule(sync_step) # dont run sync_network_routes if time since last run < 1 hour
 -                    should_run = True
 -                except StepNotReady:
 -                    logger.info('Step not ready: %s'%sync_step.__name__)
 -                    failed_steps.append(sync_step)
 -                except:
 -                    logger.info('Exception when checking schedule: %s'%sync_step.__name__)
 -                    failed_steps.append(sync_step)
 -
 -                if (should_run):
 -                    try:
 -                        duration=time.time() - start_time
 -
 -                        logger.info('Executing step %s' % sync_step.__name__)
 -
 -                        # ********* This is the actual sync step
 -                        #import pdb
 -                        #pdb.set_trace()
 -                        failed_objects = sync_step(failed=list(failed_step_objects))
 -
 -
 -                        self.check_duration(sync_step, duration)
 -                        if failed_objects:
 -                            failed_step_objects.update(failed_objects)
 -                        self.update_run_time(sync_step)
 -                    except:
 -                        logger.log_exc('Failure in step: %s'%sync_step.__name__)
 -                        failed_steps.append(S)
 -            self.save_run_times()
 -        except:
 -            logger.log_exc("Exception in observer run loop")
 -            traceback.print_exc()
 -
 -    def run(self):
 -        try:
 -            logger.info('Observer start run loop')
 -            if not self.driver.enabled:
 -                return
 -            if (self.driver_kind=="openstack") and (not self.driver.has_openstack):
 -                return
 -
 -            while True:
 -                try:  
 -                    self.wait_for_event(timeout=30)       
 -                except: 
 -                    logger.log_exc("Exception in observer wait for event") 
 -                    traceback.print_exc()
 -
 -                try: 
 -                    self.run_steps()            
 -                except: 
 -                    logger.log_exc("Exception in observer run steps")
 -                    traceback.print_exc()
 -        except:
 -            logger.log_exc("Exception in observer run loop")
 -            traceback.print_exc()
 +      #sync_steps = [SyncNetworks,SyncNetworkSlivers,SyncSites,SyncSitePrivileges,SyncSlices,SyncSliceMemberships,SyncSlivers,SyncSliverIps,SyncExternalRoutes,SyncUsers,SyncRoles,SyncNodes,SyncImages,GarbageCollector]
 +      sync_steps = []
 +
 +      def __init__(self):
 +              # The Condition object that gets signalled by Feefie events
 +              self.step_lookup = {}
 +              self.load_sync_step_modules()
 +              self.load_sync_steps()
 +              self.event_cond = threading.Condition()
 +
 +              self.driver_kind = getattr(Config(), "observer_driver", "openstack")
 +              if self.driver_kind=="openstack":
 +                      self.driver = OpenStackDriver()
 +              else:
 +                      self.driver = NoOpDriver()
 +
 +      def wait_for_event(self, timeout):
 +              self.event_cond.acquire()
 +              self.event_cond.wait(timeout)
 +              self.event_cond.release()
 +
 +      def wake_up(self):
 +              logger.info('Wake up routine called. Event cond %r'%self.event_cond)
 +              self.event_cond.acquire()
 +              self.event_cond.notify()
 +              self.event_cond.release()
 +
 +      def load_sync_step_modules(self, step_dir=None):
 +              if step_dir is None:
 +                      if hasattr(Config(), "observer_steps_dir"):
 +                              step_dir = Config().observer_steps_dir
 +                      else:
 +                              step_dir = "/opt/planetstack/observer/steps"
 +
 +              for fn in os.listdir(step_dir):
 +                      pathname = os.path.join(step_dir,fn)
 +                      if os.path.isfile(pathname) and fn.endswith(".py") and (fn!="__init__.py"):
 +                              module = imp.load_source(fn[:-3],pathname)
 +                              for classname in dir(module):
 +                                      c = getattr(module, classname, None)
 +
 +                                      # make sure 'c' is a descendent of SyncStep and has a
 +                                      # provides field (this eliminates the abstract base classes
 +                                      # since they don't have a provides)
 +
 +                                      if inspect.isclass(c) and issubclass(c, SyncStep) and hasattr(c,"provides") and (c not in self.sync_steps):
 +                                              self.sync_steps.append(c)
 +              logger.info('loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps]))
 +              # print 'loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps])
 +
 +      def load_sync_steps(self):
 +              dep_path = Config().observer_dependency_graph
 +              logger.info('Loading model dependency graph from %s' % dep_path)
 +              try:
 +                      # This contains dependencies between records, not sync steps
 +                      self.model_dependency_graph = json.loads(open(dep_path).read())
 +              except Exception,e:
 +                      raise e
 +
 +              try:
 +                      backend_path = Config().observer_pl_dependency_graph
 +                      logger.info('Loading backend dependency graph from %s' % backend_path)
 +                      # This contains dependencies between backend records
 +                      self.backend_dependency_graph = json.loads(open(backend_path).read())
 +              except Exception,e:
 +                      logger.info('Backend dependency graph not loaded')
 +                      # We can work without a backend graph
 +                      self.backend_dependency_graph = {}
 +
 +              provides_dict = {}
 +              for s in self.sync_steps:
 +                      self.step_lookup[s.__name__] = s 
 +                      for m in s.provides:
 +                              try:
 +                                      provides_dict[m.__name__].append(s.__name__)
 +                              except KeyError:
 +                                      provides_dict[m.__name__]=[s.__name__]
 +
 +                              
 +              step_graph = {}
 +              for k,v in self.model_dependency_graph.iteritems():
 +                      try:
 +                              for source in provides_dict[k]:
 +                                      for m in v:
 +                                              try:
 +                                                      for dest in provides_dict[m]:
 +                                                              # no deps, pass
 +                                                              try:
 +                                                                      if (dest not in step_graph[source]):
 +                                                                              step_graph[source].append(dest)
 +                                                              except:
 +                                                                      step_graph[source]=[dest]
 +                                              except KeyError:
 +                                                      pass
 +                                      
 +                      except KeyError:
 +                              pass
 +                              # no dependencies, pass
 +              
 +              #import pdb
 +              #pdb.set_trace()
 +              if (self.backend_dependency_graph):
 +                      backend_dict = {}
 +                      for s in self.sync_steps:
 +                              for m in s.serves:
 +                                      backend_dict[m]=s.__name__
 +                                      
 +                      for k,v in backend_dependency_graph.iteritems():
 +                              try:
 +                                      source = backend_dict[k]
 +                                      for m in v:
 +                                              try:
 +                                                      dest = backend_dict[m]
 +                                              except KeyError:
 +                                                      # no deps, pass
 +                                                      pass
 +                                              step_graph[source]=dest
 +                                              
 +                              except KeyError:
 +                                      pass
 +                                      # no dependencies, pass
 +
 +              dependency_graph = step_graph
 +
 +              self.ordered_steps = toposort(dependency_graph, map(lambda s:s.__name__,self.sync_steps))
 +              print "Order of steps=",self.ordered_steps
 +              self.load_run_times()
 +              
 +
 +      def check_duration(self, step, duration):
 +              try:
 +                      if (duration > step.deadline):
 +                              logger.info('Sync step %s missed deadline, took %.2f seconds'%(step.name,duration))
 +              except AttributeError:
 +                      # S doesn't have a deadline
 +                      pass
 +
 +      def update_run_time(self, step, deletion):
 +              if (not deletion):
 +                      self.last_run_times[step.__name__]=time.time()
 +              else:
 +                      self.last_deletion_run_times[step.__name__]=time.time()
 +
 +
 +      def check_schedule(self, step, deletion):
 +              last_run_times = self.last_run_times if not deletion else self.last_deletion_run_times
 +
 +              time_since_last_run = time.time() - last_run_times.get(step.__name__, 0)
 +              try:
 +                      if (time_since_last_run < step.requested_interval):
 +                              raise StepNotReady
 +              except AttributeError:
 +                      logger.info('Step %s does not have requested_interval set'%step.__name__)
 +                      raise StepNotReady
 +      
 +      def load_run_times(self):
 +              try:
 +                      jrun_times = open('/tmp/observer_run_times').read()
 +                      self.last_run_times = json.loads(jrun_times)
 +              except:
 +                      self.last_run_times={}
 +                      for e in self.ordered_steps:
 +                              self.last_run_times[e]=0
 +              try:
 +                      jrun_times = open('/tmp/observer_deletion_run_times').read()
 +                      self.last_deletion_run_times = json.loads(jrun_times)
 +              except:
 +                      self.last_deletion_run_times={}
 +                      for e in self.ordered_steps:
 +                              self.last_deletion_run_times[e]=0
 +
 +
 +
 +      def save_run_times(self):
 +              run_times = json.dumps(self.last_run_times)
 +              open('/tmp/observer_run_times','w').write(run_times)
 +
 +              deletion_run_times = json.dumps(self.last_deletion_run_times)
 +              open('/tmp/observer_deletion_run_times','w').write(deletion_run_times)
 +
 +      def check_class_dependency(self, step, failed_steps):
 +              step.dependenices = []
 +              for obj in step.provides:
 +                      step.dependenices.extend(self.model_dependency_graph.get(obj.__name__, []))
 +              for failed_step in failed_steps:
 +                      if (failed_step in step.dependencies):
 +                              raise StepNotReady
 +
 +      def run(self):
 +              if not self.driver.enabled:
 +                      return
 +              if (self.driver_kind=="openstack") and (not self.driver.has_openstack):
 +                      return
 +
 +              while True:
 +                      try:
 +                              error_map_file = getattr(Config(), "error_map_path", "/opt/planetstack/error_map.txt")
 +                              error_mapper = ErrorMapper(error_map_file)
 +
 +                              logger.info('Waiting for event')
 +                              tBeforeWait = time.time()
 +                              self.wait_for_event(timeout=30)
 +                              logger.info('Observer woke up')
 +
 +                              # Two passes. One for sync, the other for deletion.
 +                              for deletion in (False,True):
 +                                      logger.info('Creation pass...')
 +                                      # Set of whole steps that failed
 +                                      failed_steps = []
 +
 +                                      # Set of individual objects within steps that failed
 +                                      failed_step_objects = set()
 +
 +                                      ordered_steps = self.ordered_steps if not deletion else reversed(self.ordered_steps)
 +
 +                                      for S in ordered_steps:
 +                                              step = self.step_lookup[S]
 +                                              start_time=time.time()
 +                                              
 +                                              sync_step = step(driver=self.driver,error_map=error_mapper)
 +                                              sync_step.__name__ = step.__name__
 +                                              sync_step.dependencies = []
 +                                              try:
 +                                                      mlist = sync_step.provides
 +                                                      
 +                                                      for m in mlist:
 +                                                              sync_step.dependencies.extend(self.model_dependency_graph[m.__name__])
 +                                              except KeyError:
 +                                                      pass
 +                                              sync_step.debug_mode = debug_mode
 +
 +                                              should_run = False
 +                                              try:
 +                                                      # Various checks that decide whether
 +                                                      # this step runs or not
 +                                                      self.check_class_dependency(sync_step, failed_steps) # dont run Slices if Sites failed
 +                                                      self.check_schedule(sync_step,deletion) # dont run sync_network_routes if time since last run < 1 hour
 +                                                      should_run = True
 +                                              except StepNotReady:
 +                                                      logging.info('Step not ready: %s'%sync_step.__name__)
 +                                                      failed_steps.append(sync_step)
 +                                              except Exception,e:
 +                                                      logging.error('%r',e)
 +                                                      logger.log_exc("sync step failed: %r. Deletion: %r"%(sync_step,deletion))
 +                                                      failed_steps.append(sync_step)
 +
 +                                              if (should_run):
 +                                                      try:
 +                                                              duration=time.time() - start_time
 +
 +                                                              logger.info('Executing step %s' % sync_step.__name__)
 +
 +                                                              # ********* This is the actual sync step
 +                                                              #import pdb
 +                                                              #pdb.set_trace()
 +                                                              failed_objects = sync_step(failed=list(failed_step_objects), deletion=deletion)
 +
 +
 +                                                              self.check_duration(sync_step, duration)
 +                                                              if failed_objects:
 +                                                                      failed_step_objects.update(failed_objects)
 +
 +                                                              if (not deletion):
 +                                                                      self.update_run_time(sync_step)
 +                                                              else:
 +                                                                      self.update_deletion_run_time(sync_step)
 +                                                      except Exception,e:
 +                                                              logging.error('Model step failed. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!',e)
 +                                                              logger.log_exc(e)
 +                                                              failed_steps.append(S)
 +                              self.save_run_times()
 +                      except Exception, e:
 +                              logging.error('Core error. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!',e)
 +                              logger.log_exc("Exception in observer run loop")
 +                              traceback.print_exc()
@@@ -2,6 -2,7 +2,6 @@@ import threadin
  import requests, json
  
  from planetstack.config import Config
 -from observer.deleter import Deleter
  
  import uuid
  import os
@@@ -12,83 -13,149 +12,149 @@@ from fofum import Fofu
  import json
  import traceback
  
- # decorator that marks dispatachable event methods    
- def event(func):
-     setattr(func, 'event', func.__name__)
-     return func         
- class EventHandler:
-     # This code is currently not in use.
-     def __init__(self):
-         pass 
-     @staticmethod
-     def get_events():
-         events = []
-         for name in dir(EventHandler):
-             attribute = getattr(EventHandler, name)
-             if hasattr(attribute, 'event'):
-                 events.append(getattr(attribute, 'event'))
-         return events
+ random_client_id=None
+ def get_random_client_id():
+     global random_client_id
  
-     def dispatch(self, event, *args, **kwds):
-         if hasattr(self, event):
-             return getattr(self, event)(*args, **kwds)
- class EventSender:
-     def __init__(self,user=None,clientid=None):
+     if (random_client_id is None) and os.path.exists("/opt/planetstack/random_client_id"):
+         # try to use the last one we used, if we saved it
          try:
-             user = Config().feefie_client_user
+             random_client_id = open("/opt/planetstack/random_client_id","r").readline().strip()
+             print "get_random_client_id: loaded %s" % random_client_id
          except:
-             user = 'pl'
+             print "get_random_client_id: failed to read /opt/planetstack/random_client_id"
  
+     if random_client_id is None:
+         random_client_id = base64.urlsafe_b64encode(os.urandom(12))
+         print "get_random_client_id: generated new id %s" % random_client_id
+         # try to save it for later (XXX: could race with another client here)
          try:
-             clid = Config().feefie_client_id
+             open("/opt/planetstack/random_client_id","w").write("%s\n" % random_client_id)
          except:
-             clid = self.random_client_id()
-             
+             print "get_random_client_id: failed to write /opt/planetstack/random_client_id"
+     return random_client_id
+ # decorator that marks dispatachable event methods
+ def event(func):
+       setattr(func, 'event', func.__name__)
+       return func
  
-         self.fofum = Fofum(user=user)
-         self.fofum.make(clid)
+ class EventHandler:
+       # This code is currently not in use.
+       def __init__(self):
+               pass
+       @staticmethod
+       def get_events():
+               events = []
+               for name in dir(EventHandler):
+                       attribute = getattr(EventHandler, name)
+                       if hasattr(attribute, 'event'):
+                               events.append(getattr(attribute, 'event'))
+               return events
+       def dispatch(self, event, *args, **kwds):
+               if hasattr(self, event):
+                       return getattr(self, event)(*args, **kwds)
+                       
  
-     def fire(self,**kwargs):
+ class EventSender:
+       def __init__(self,user=None,clientid=None):
+               try:
+                       user = Config().feefie_client_user
+               except:
+                       user = 'pl'
+               try:
+                       clid = Config().feefie_client_id
+               except:
+                       clid = get_random_client_id()
+                         print "EventSender: no feefie_client_id configured. Using random id %s" % clid
+               self.fofum = Fofum(user=user)
+               self.fofum.make(clid)
+       def fire(self,**kwargs):
                  kwargs["uuid"] = str(uuid.uuid1())
 -              self.fofum.fire(json.dumps(kwargs))
 +        self.fofum.fire(json.dumps(kwargs))
  
  class EventListener:
-     def __init__(self,wake_up=None):
-         self.handler = EventHandler()
-         self.wake_up = wake_up
-     def handle_event(self, payload):
-         payload_dict = json.loads(payload)
-         if (self.wake_up):
-             self.wake_up()
-     def random_client_id(self):
-         try:
-             return self.client_id
-         except AttributeError:
-             self.client_id = base64.urlsafe_b64encode(os.urandom(12))
-             return self.client_id
-     def run(self):
-         # This is our unique client id, to be used when firing and receiving events
-         # It needs to be generated once and placed in the config file
-         try:
-             user = Config().feefie_client_user
-         except:
-             user = 'pl'
-         try:
-             clid = Config().feefie_client_id
-         except:
-             clid = self.random_client_id()
-         f = Fofum(user=user)
-         
-         listener_thread = threading.Thread(target=f.listen_for_event,args=(clid,self.handle_event))
-         listener_thread.start()
+       def __init__(self,wake_up=None):
+               self.handler = EventHandler()
+               self.wake_up = wake_up
+                 self.deleters = {}
+                 self.load_deleter_modules()
+         def load_deleter_modules(self, deleter_dir=None):
+             if deleter_dir is None:
+                 if hasattr(Config(), "observer_deleters_dir"):
+                     deleter_dir = Config().observer_deleters_dir
+                 else:
+                     deleter_dir = "/opt/planetstack/observer/deleters"
+             for fn in os.listdir(deleter_dir):
+                 pathname = os.path.join(deleter_dir,fn)
+                 if os.path.isfile(pathname) and fn.endswith(".py") and (fn!="__init__.py"):
+                     module = imp.load_source(fn[:-3],pathname)
+                     for classname in dir(module):
+                         c = getattr(module, classname, None)
+                         # make sure 'c' is a descendent of Deleter and has a
+                         # provides field (this eliminates the abstract base classes
+                         # since they don't have a provides)
+                         if inspect.isclass(c) and issubclass(c, Deleter) and hasattr(c,"model") and c.model!=None:
+                             modelName = c.model
+                             if not modelName in self.deleters:
+                                 self.deleters[modelName] = []
+                             if not (c in self.deleters[modelName]):
+                                 self.deleters[modelName].append(c)
+             print 'loaded deleters: %s' % ",".join(self.deleters.keys())
+       def handle_event(self, payload):
+               payload_dict = json.loads(payload)
+               try:
+                       deletion = payload_dict.get('delete_flag', False)
+                       if (deletion):
+                               model = payload_dict['model']
+                               pk = payload_dict['pk']
+                                 model_dict = payload_dict['model_dict']
+                               for deleter in self.deleters[model]:
+                                         try:
+                                           deleter()(pk, model_dict)
+                                         except:
+                                             # something is silently eating these
+                                             # exceptions...
+                                             traceback.print_exc()
+                                             raise
+               except:
+                       deletion = False
+               if (not deletion and self.wake_up):
+                       self.wake_up()
+       def run(self):
+               # This is our unique client id, to be used when firing and receiving events
+               # It needs to be generated once and placed in the config file
+               try:
+                       user = Config().feefie_client_user
+               except:
+                       user = 'pl'
+               try:
+                       clid = Config().feefie_client_id
+               except:
+                       clid = get_random_client_id()
+                         print "EventListener: no feefie_client_id configured. Using random id %s" % clid
+               f = Fofum(user=user)
+               
+               listener_thread = threading.Thread(target=f.listen_for_event,args=(clid,self.handle_event))
+               listener_thread.start()
@@@ -3,7 -3,6 +3,7 @@@ import base6
  from datetime import datetime
  from planetstack.config import Config
  from util.logger import Logger, logging
 +from observer.steps import *
  
  logger = Logger(level=logging.INFO)
  
@@@ -34,8 -33,6 +34,8 @@@ class SyncStep
          """
          dependencies = []
          self.driver = args.get('driver')
 +        self.error_map = args.get('error_map')
 +
          try:
              self.soft_deadline = int(self.get_prop('soft_deadline_seconds'))
          except:
  
          return
  
 -    def fetch_pending(self):
 +    def fetch_pending(self, deletion=False):
          return []
          #return Sliver.objects.filter(ip=None)
      
      def check_dependencies(self, obj, failed):
          for dep in self.dependencies:
-             peer_object = getattr(obj, dep.lower())
+             peer_name = dep[0].lower() + dep[1:]    # django names are camelCased with the first letter lower
+             peer_object = getattr(obj, peer_name)
              if (peer_object.pk==failed.pk):
                  raise FailedDependency
  
 -    def call(self, failed=[]):
 -        pending = self.fetch_pending()
 +    def call(self, failed=[], deletion=False):
 +        pending = self.fetch_pending(deletion)
          for o in pending:
              try:
                  for f in failed:
                      self.check_dependencies(o,f) # Raises exception if failed
 -                self.sync_record(o)
 -                o.enacted = datetime.now() # Is this the same timezone? XXX
 -                o.save(update_fields=['enacted'])
 -            except:
 -                logger.log_exc("sync step %s failed!" % self.__name__)
 +                if (deletion):
 +                    self.delete_record(o)
 +                    o.delete(purge=True)
 +                else:
 +                    self.sync_record(o)
 +                    o.enacted = datetime.now() # Is this the same timezone? XXX
 +                    o.backend_status = "OK"
 +                    o.save(update_fields=['enacted'])
 +            except Exception,e:
 +                try:
 +                    o.backend_status = self.error_map.map(str(e))
 +                except:
 +                    o.backend_status = str(e)
 +
 +                o.save(update_fields=['backend_status'])
 +
 +                logger.log_exc("sync step failed!")
                  failed.append(o)
  
          return failed