Merge branch 'master' of ssh://git.planet-lab.org/git/plstackapi into observer3.0
authorSapan Bhatia <gwsapan@gmail.com>
Wed, 16 Jul 2014 04:17:33 +0000 (00:17 -0400)
committerSapan Bhatia <gwsapan@gmail.com>
Wed, 16 Jul 2014 04:17:33 +0000 (00:17 -0400)
Conflicts:
planetstack/core/fixtures/demo_data.json
planetstack/core/fixtures/initial_data.json
planetstack/core/models/plcorebase.py
planetstack/observer/event_loop.py
planetstack/observer/event_manager.py
planetstack/observer/steps/garbage_collector.py
planetstack/observer/syncstep.py

planetstack/core/models/plcorebase.py
planetstack/core/models/user.py
planetstack/observer/error_mapper.py [new file with mode: 0644]
planetstack/observer/event_loop.py
planetstack/observer/event_manager.py
planetstack/observer/syncstep.py

index ec79419..34af4a4 100644 (file)
@@ -18,16 +18,29 @@ except:
     def notify_observer(*args, **kwargs):
         pass
 
-class PlCoreBase(models.Model):
+# This manager will be inherited by all subclasses because
+# the core model is abstract.
+class PlCoreBaseManager(models.Manager):
+    def get_query_set(self):
+        return super(PlCoreBaseManager, self).get_query_set().filter(deleted=False)
 
+class PlCoreBase(models.Model):
+    objects = PlCoreBaseManager()
+    created = models.DateTimeField(auto_now_add=True)
+    updated = models.DateTimeField(auto_now=True)
     # default values for created and updated are only there to keep evolution
     # from failing.
 
     created = models.DateTimeField(auto_now_add=True, default=datetime.datetime.now())
     updated = models.DateTimeField(auto_now=True, default=datetime.datetime.now())
     enacted = models.DateTimeField(null=True, default=None)
+    backend_status = models.CharField(max_length=140,
+                                      default="Provisioning in progress")
+    deleted = models.BooleanField(default=False)
 
     class Meta:
+        # Changing abstract to False would require the managers of subclasses of
+        # PlCoreBase to be customized individually.
         abstract = True
         app_label = "core"
 
@@ -62,20 +75,18 @@ class PlCoreBase(models.Model):
 
     def delete(self, *args, **kwds):
         # so we have something to give the observer
-        pk = self.pk
-        model_dict = model_to_dict(self)
-        for (k,v) in model_dict.items():
-            # things like datetime are not JSON serializable
-            model_dict[k] = str(v)
+        purge = kwds.get('purge',True)
+        if (observer_disabled or purge):
+            super(PlCoreBase, self).delete(*args, **kwargs)
+        else:
+            self.deleted = True
+            self.enacted=None
+            self.save(update_fields=['enacted','deleted'])
 
-        super(PlCoreBase, self).delete(*args, **kwds)
-
-        # This is a no-op if observer_disabled is set
-        notify_observer(model=self, delete=True, pk=pk, model_dict=model_dict)
 
     def save(self, *args, **kwargs):
         super(PlCoreBase, self).save(*args, **kwargs)
-        
+
         # This is a no-op if observer_disabled is set
         notify_observer()
 
index 3c4d377..0272661 100644 (file)
@@ -11,6 +11,9 @@ from operator import itemgetter, attrgetter
 
 # Create your models here.
 class UserManager(BaseUserManager):
+    def get_query_set(self):
+        return super(UserManager, self).get_query_set().filter(deleted=False)
+
     def create_user(self, email, firstname, lastname, password=None):
         """
         Creates and saves a User with the given email, date of
@@ -75,6 +78,9 @@ class User(AbstractBaseUser):
     created = models.DateTimeField(auto_now_add=True)
     updated = models.DateTimeField(auto_now=True)
     enacted = models.DateTimeField(null=True, default=None)
+    backend_status = models.CharField(max_length=140,
+                                      default="Provisioning in progress")
+    deleted = models.BooleanField(default=False)
 
     timezone = TimeZoneField()
 
diff --git a/planetstack/observer/error_mapper.py b/planetstack/observer/error_mapper.py
new file mode 100644 (file)
index 0000000..a7daa59
--- /dev/null
@@ -0,0 +1,25 @@
+from planetstack.config import Config
+from util.logger import Logger, logging, logger
+
+class ErrorMapper:
+       def __init__(self, error_map_file):
+               self.error_map = {}
+               try:
+                       error_map_lines = open(error_map_file).read().splitlines()
+                       for l in error_map_lines:
+                               if (not l.startswith('#')):
+                                       splits = l.split('->')
+                                       k,v = map(lambda i:i.rstrip(),splits)
+                                       self.error_map[k]=v
+               except:
+                       logging.info('Could not read error map')
+
+
+       def map(self, error):
+               return self.error_map[error]
+
+
+
+
+
+
index 6c19215..ec49dd7 100644 (file)
@@ -19,6 +19,7 @@ from planetstack.config import Config
 #from observer.steps import *
 from syncstep import SyncStep
 from toposort import toposort
+from observer.error_mapper import error_mapper
 
 debug_mode = False
 
@@ -32,264 +33,278 @@ class NoOpDriver:
          self.enabled = True
 
 class PlanetStackObserver:
-    #sync_steps = [SyncNetworks,SyncNetworkSlivers,SyncSites,SyncSitePrivileges,SyncSlices,SyncSliceMemberships,SyncSlivers,SyncSliverIps,SyncExternalRoutes,SyncUsers,SyncRoles,SyncNodes,SyncImages,GarbageCollector]
-    sync_steps = []
-
-    def __init__(self):
-        # The Condition object that gets signalled by Feefie events
-        self.step_lookup = {}
-        self.load_sync_step_modules()
-        self.load_sync_steps()
-        self.event_cond = threading.Condition()
-
-
-        self.driver_kind = getattr(Config(), "observer_driver", "openstack")
-        if self.driver_kind=="openstack":
-            self.driver = OpenStackDriver()
-        else:
-            self.driver = NoOpDriver()
-
-    def wait_for_event(self, timeout):
-        logger.info('Waiting for event')
-        self.event_cond.acquire()
-        self.event_cond.wait(timeout)
-        self.event_cond.release()
-
-    def wake_up(self):
-        logger.info('Wake up routine called. Event cond %r'%self.event_cond)
-        self.event_cond.acquire()
-        self.event_cond.notify()
-        self.event_cond.release()
-
-    def load_sync_step_modules(self, step_dir=None):
-        if step_dir is None:
-            if hasattr(Config(), "observer_steps_dir"):
-                step_dir = Config().observer_steps_dir
-            else:
-                step_dir = "/opt/planetstack/observer/steps"
-
-        for fn in os.listdir(step_dir):
-            pathname = os.path.join(step_dir,fn)
-            if os.path.isfile(pathname) and fn.endswith(".py") and (fn!="__init__.py"):
-                module = imp.load_source(fn[:-3],pathname)
-                for classname in dir(module):
-                    c = getattr(module, classname, None)
-
-                    # make sure 'c' is a descendent of SyncStep and has a
-                    # provides field (this eliminates the abstract base classes
-                    # since they don't have a provides)
-
-                    if inspect.isclass(c) and issubclass(c, SyncStep) and hasattr(c,"provides") and (c not in self.sync_steps):
-                        self.sync_steps.append(c)
-        logger.info('loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps]))
-        # print 'loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps])
-
-    def load_sync_steps(self):
-        dep_path = Config().observer_dependency_graph
-        logger.info('Loading model dependency graph from %s' % dep_path)
-        try:
-            # This contains dependencies between records, not sync steps
-            self.model_dependency_graph = json.loads(open(dep_path).read())
-        except Exception,e:
-            raise e
-
-        try:
-            backend_path = Config().observer_pl_dependency_graph
-            logger.info('Loading backend dependency graph from %s' % backend_path)
-            # This contains dependencies between backend records
-            self.backend_dependency_graph = json.loads(open(backend_path).read())
-        except Exception,e:
-            logger.info('Backend dependency graph not loaded')
-            # We can work without a backend graph
-            self.backend_dependency_graph = {}
-
-        provides_dict = {}
-        for s in self.sync_steps:
-            self.step_lookup[s.__name__] = s 
-            for m in s.provides:
-                try:
-                    provides_dict[m.__name__].append(s.__name__)
-                except KeyError:
-                    provides_dict[m.__name__]=[s.__name__]
-
-                
-        step_graph = {}
-        for k,v in self.model_dependency_graph.iteritems():
-            try:
-                for source in provides_dict[k]:
-                    for m in v:
-                        try:
-                            for dest in provides_dict[m]:
-                                # no deps, pass
-                                try:
-                                    if (dest not in step_graph[source]):
-                                        step_graph[source].append(dest)
-                                except:
-                                    step_graph[source]=[dest]
-                        except KeyError:
-                            pass
-                    
-            except KeyError:
-                pass
-                # no dependencies, pass
-        
-        #import pdb
-        #pdb.set_trace()
-        if (self.backend_dependency_graph):
-            backend_dict = {}
-            for s in self.sync_steps:
-                for m in s.serves:
-                    backend_dict[m]=s.__name__
-                    
-            for k,v in backend_dependency_graph.iteritems():
-                try:
-                    source = backend_dict[k]
-                    for m in v:
-                        try:
-                            dest = backend_dict[m]
-                        except KeyError:
-                            # no deps, pass
-                            pass
-                        step_graph[source]=dest
-                        
-                except KeyError:
-                    pass
-                    # no dependencies, pass
-
-        dependency_graph = step_graph
-
-        self.ordered_steps = toposort(dependency_graph, map(lambda s:s.__name__,self.sync_steps))
-        print "Order of steps=",self.ordered_steps
-        self.load_run_times()
-        
-
-    def check_duration(self, step, duration):
-        try:
-            if (duration > step.deadline):
-                logger.info('Sync step %s missed deadline, took %.2f seconds'%(step.name,duration))
-        except AttributeError:
-            # S doesn't have a deadline
-            pass
-
-    def update_run_time(self, step):
-        self.last_run_times[step.__name__]=time.time()
-
-    def check_schedule(self, step):
-        time_since_last_run = time.time() - self.last_run_times.get(step.__name__, 0)
-        try:
-            if (time_since_last_run < step.requested_interval):
-                raise StepNotReady
-        except AttributeError:
-            logger.info('Step %s does not have requested_interval set'%step.__name__)
-            raise StepNotReady
-    
-    def load_run_times(self):
-        try:
-            jrun_times = open('/tmp/observer_run_times').read()
-            self.last_run_times = json.loads(jrun_times)
-        except:
-            self.last_run_times={}
-            for e in self.ordered_steps:
-                self.last_run_times[e]=0
-
-
-    def save_run_times(self):
-        run_times = json.dumps(self.last_run_times)
-        open('/tmp/observer_run_times','w').write(run_times)
-
-    def check_class_dependency(self, step, failed_steps):
-        step.dependenices = []
-        for obj in step.provides:
-            step.dependenices.extend(self.model_dependency_graph.get(obj.__name__, []))
-        for failed_step in failed_steps:
-            if (failed_step in step.dependencies):
-                raise StepNotReady
-
-
-    def run_steps(self):
-        try:
-            logger.info('Observer run steps')
-
-            # Set of whole steps that failed
-            failed_steps = []
-
-            # Set of individual objects within steps that failed
-            failed_step_objects = set()
-
-            for S in self.ordered_steps:
-                step = self.step_lookup[S]
-                start_time=time.time()
-                
-                sync_step = step(driver=self.driver)
-                sync_step.__name__ = step.__name__
-                sync_step.dependencies = []
-                try:
-                    mlist = sync_step.provides
-                    
-                    for m in mlist:
-                        sync_step.dependencies.extend(self.model_dependency_graph[m.__name__])
-                except KeyError:
-                    pass
-                sync_step.debug_mode = debug_mode
-
-                should_run = False
-                try:
-                    # Various checks that decide whether
-                    # this step runs or not
-                    self.check_class_dependency(sync_step, failed_steps) # dont run Slices if Sites failed
-                    self.check_schedule(sync_step) # dont run sync_network_routes if time since last run < 1 hour
-                    should_run = True
-                except StepNotReady:
-                    logger.info('Step not ready: %s'%sync_step.__name__)
-                    failed_steps.append(sync_step)
-                except:
-                    logger.info('Exception when checking schedule: %s'%sync_step.__name__)
-                    failed_steps.append(sync_step)
-
-                if (should_run):
-                    try:
-                        duration=time.time() - start_time
-
-                        logger.info('Executing step %s' % sync_step.__name__)
-
-                        # ********* This is the actual sync step
-                        #import pdb
-                        #pdb.set_trace()
-                        failed_objects = sync_step(failed=list(failed_step_objects))
-
-
-                        self.check_duration(sync_step, duration)
-                        if failed_objects:
-                            failed_step_objects.update(failed_objects)
-                        self.update_run_time(sync_step)
-                    except:
-                        logger.log_exc('Failure in step: %s'%sync_step.__name__)
-                        failed_steps.append(S)
-            self.save_run_times()
-        except:
-            logger.log_exc("Exception in observer run loop")
-            traceback.print_exc()
-
-    def run(self):
-        try:
-            logger.info('Observer start run loop')
-            if not self.driver.enabled:
-                return
-            if (self.driver_kind=="openstack") and (not self.driver.has_openstack):
-                return
-
-            while True:
-                try:  
-                    self.wait_for_event(timeout=30)       
-                except: 
-                    logger.log_exc("Exception in observer wait for event") 
-                    traceback.print_exc()
-
-                try: 
-                    self.run_steps()            
-                except: 
-                    logger.log_exc("Exception in observer run steps")
-                    traceback.print_exc()
-        except:
-            logger.log_exc("Exception in observer run loop")
-            traceback.print_exc()
+       #sync_steps = [SyncNetworks,SyncNetworkSlivers,SyncSites,SyncSitePrivileges,SyncSlices,SyncSliceMemberships,SyncSlivers,SyncSliverIps,SyncExternalRoutes,SyncUsers,SyncRoles,SyncNodes,SyncImages,GarbageCollector]
+       sync_steps = []
+
+       def __init__(self):
+               # The Condition object that gets signalled by Feefie events
+               self.step_lookup = {}
+               self.load_sync_step_modules()
+               self.load_sync_steps()
+               self.event_cond = threading.Condition()
+
+               self.driver_kind = getattr(Config(), "observer_driver", "openstack")
+               if self.driver_kind=="openstack":
+                       self.driver = OpenStackDriver()
+               else:
+                       self.driver = NoOpDriver()
+
+       def wait_for_event(self, timeout):
+               self.event_cond.acquire()
+               self.event_cond.wait(timeout)
+               self.event_cond.release()
+
+       def wake_up(self):
+               logger.info('Wake up routine called. Event cond %r'%self.event_cond)
+               self.event_cond.acquire()
+               self.event_cond.notify()
+               self.event_cond.release()
+
+       def load_sync_step_modules(self, step_dir=None):
+               if step_dir is None:
+                       if hasattr(Config(), "observer_steps_dir"):
+                               step_dir = Config().observer_steps_dir
+                       else:
+                               step_dir = "/opt/planetstack/observer/steps"
+
+               for fn in os.listdir(step_dir):
+                       pathname = os.path.join(step_dir,fn)
+                       if os.path.isfile(pathname) and fn.endswith(".py") and (fn!="__init__.py"):
+                               module = imp.load_source(fn[:-3],pathname)
+                               for classname in dir(module):
+                                       c = getattr(module, classname, None)
+
+                                       # make sure 'c' is a descendent of SyncStep and has a
+                                       # provides field (this eliminates the abstract base classes
+                                       # since they don't have a provides)
+
+                                       if inspect.isclass(c) and issubclass(c, SyncStep) and hasattr(c,"provides") and (c not in self.sync_steps):
+                                               self.sync_steps.append(c)
+               logger.info('loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps]))
+               # print 'loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps])
+
+       def load_sync_steps(self):
+               dep_path = Config().observer_dependency_graph
+               logger.info('Loading model dependency graph from %s' % dep_path)
+               try:
+                       # This contains dependencies between records, not sync steps
+                       self.model_dependency_graph = json.loads(open(dep_path).read())
+               except Exception,e:
+                       raise e
+
+               try:
+                       backend_path = Config().observer_pl_dependency_graph
+                       logger.info('Loading backend dependency graph from %s' % backend_path)
+                       # This contains dependencies between backend records
+                       self.backend_dependency_graph = json.loads(open(backend_path).read())
+               except Exception,e:
+                       logger.info('Backend dependency graph not loaded')
+                       # We can work without a backend graph
+                       self.backend_dependency_graph = {}
+
+               provides_dict = {}
+               for s in self.sync_steps:
+                       self.step_lookup[s.__name__] = s 
+                       for m in s.provides:
+                               try:
+                                       provides_dict[m.__name__].append(s.__name__)
+                               except KeyError:
+                                       provides_dict[m.__name__]=[s.__name__]
+
+                               
+               step_graph = {}
+               for k,v in self.model_dependency_graph.iteritems():
+                       try:
+                               for source in provides_dict[k]:
+                                       for m in v:
+                                               try:
+                                                       for dest in provides_dict[m]:
+                                                               # no deps, pass
+                                                               try:
+                                                                       if (dest not in step_graph[source]):
+                                                                               step_graph[source].append(dest)
+                                                               except:
+                                                                       step_graph[source]=[dest]
+                                               except KeyError:
+                                                       pass
+                                       
+                       except KeyError:
+                               pass
+                               # no dependencies, pass
+               
+               #import pdb
+               #pdb.set_trace()
+               if (self.backend_dependency_graph):
+                       backend_dict = {}
+                       for s in self.sync_steps:
+                               for m in s.serves:
+                                       backend_dict[m]=s.__name__
+                                       
+                       for k,v in backend_dependency_graph.iteritems():
+                               try:
+                                       source = backend_dict[k]
+                                       for m in v:
+                                               try:
+                                                       dest = backend_dict[m]
+                                               except KeyError:
+                                                       # no deps, pass
+                                                       pass
+                                               step_graph[source]=dest
+                                               
+                               except KeyError:
+                                       pass
+                                       # no dependencies, pass
+
+               dependency_graph = step_graph
+
+               self.ordered_steps = toposort(dependency_graph, map(lambda s:s.__name__,self.sync_steps))
+               print "Order of steps=",self.ordered_steps
+               self.load_run_times()
+               
+
+       def check_duration(self, step, duration):
+               try:
+                       if (duration > step.deadline):
+                               logger.info('Sync step %s missed deadline, took %.2f seconds'%(step.name,duration))
+               except AttributeError:
+                       # S doesn't have a deadline
+                       pass
+
+       def update_run_time(self, step, deletion):
+               if (not deletion):
+                       self.last_run_times[step.__name__]=time.time()
+               else:
+                       self.last_deletion_run_times[step.__name__]=time.time()
+
+
+       def check_schedule(self, step, deletion):
+               last_run_times = self.last_run_times if not deletion else self.last_deletion_run_times
+
+               time_since_last_run = time.time() - last_run_times.get(step.__name__, 0)
+               try:
+                       if (time_since_last_run < step.requested_interval):
+                               raise StepNotReady
+               except AttributeError:
+                       logger.info('Step %s does not have requested_interval set'%step.__name__)
+                       raise StepNotReady
+       
+       def load_run_times(self):
+               try:
+                       jrun_times = open('/tmp/observer_run_times').read()
+                       self.last_run_times = json.loads(jrun_times)
+               except:
+                       self.last_run_times={}
+                       for e in self.ordered_steps:
+                               self.last_run_times[e]=0
+               try:
+                       jrun_times = open('/tmp/observer_deletion_run_times').read()
+                       self.last_deletion_run_times = json.loads(jrun_times)
+               except:
+                       self.last_deletion_run_times={}
+                       for e in self.ordered_steps:
+                               self.last_deletion_run_times[e]=0
+
+
+
+       def save_run_times(self):
+               run_times = json.dumps(self.last_run_times)
+               open('/tmp/observer_run_times','w').write(run_times)
+
+               deletion_run_times = json.dumps(self.last_deletion_run_times)
+               open('/tmp/observer_deletion_run_times','w').write(deletion_run_times)
+
+       def check_class_dependency(self, step, failed_steps):
+               step.dependenices = []
+               for obj in step.provides:
+                       step.dependenices.extend(self.model_dependency_graph.get(obj.__name__, []))
+               for failed_step in failed_steps:
+                       if (failed_step in step.dependencies):
+                               raise StepNotReady
+
+       def run(self):
+               if not self.driver.enabled:
+                       return
+               if (self.driver_kind=="openstack") and (not self.driver.has_openstack):
+                       return
+
+               while True:
+                       try:
+                               error_map_file = getattr(Config(), "error_map_path", "/opt/planetstack/error_map.txt")
+                               error_mapper = ErrorMapper(error_map_file)
+
+                               logger.info('Waiting for event')
+                               tBeforeWait = time.time()
+                               self.wait_for_event(timeout=30)
+                               logger.info('Observer woke up')
+
+                               # Two passes. One for sync, the other for deletion.
+                               for deletion in (False,True):
+                                       logger.info('Creation pass...')
+                                       # Set of whole steps that failed
+                                       failed_steps = []
+
+                                       # Set of individual objects within steps that failed
+                                       failed_step_objects = set()
+
+                                       ordered_steps = self.ordered_steps if not deletion else reversed(self.ordered_steps)
+
+                                       for S in ordered_steps:
+                                               step = self.step_lookup[S]
+                                               start_time=time.time()
+                                               
+                                               sync_step = step(driver=self.driver,error_map=error_mapper)
+                                               sync_step.__name__ = step.__name__
+                                               sync_step.dependencies = []
+                                               try:
+                                                       mlist = sync_step.provides
+                                                       
+                                                       for m in mlist:
+                                                               sync_step.dependencies.extend(self.model_dependency_graph[m.__name__])
+                                               except KeyError:
+                                                       pass
+                                               sync_step.debug_mode = debug_mode
+
+                                               should_run = False
+                                               try:
+                                                       # Various checks that decide whether
+                                                       # this step runs or not
+                                                       self.check_class_dependency(sync_step, failed_steps) # dont run Slices if Sites failed
+                                                       self.check_schedule(sync_step,deletion) # dont run sync_network_routes if time since last run < 1 hour
+                                                       should_run = True
+                                               except StepNotReady:
+                                                       logging.info('Step not ready: %s'%sync_step.__name__)
+                                                       failed_steps.append(sync_step)
+                                               except Exception,e:
+                                                       logging.error('%r',e)
+                                                       logger.log_exc("sync step failed: %r. Deletion: %r"%(sync_step,deletion))
+                                                       failed_steps.append(sync_step)
+
+                                               if (should_run):
+                                                       try:
+                                                               duration=time.time() - start_time
+
+                                                               logger.info('Executing step %s' % sync_step.__name__)
+
+                                                               # ********* This is the actual sync step
+                                                               #import pdb
+                                                               #pdb.set_trace()
+                                                               failed_objects = sync_step(failed=list(failed_step_objects), deletion=deletion)
+
+
+                                                               self.check_duration(sync_step, duration)
+                                                               if failed_objects:
+                                                                       failed_step_objects.update(failed_objects)
+
+                                                               if (not deletion):
+                                                                       self.update_run_time(sync_step)
+                                                               else:
+                                                                       self.update_deletion_run_time(sync_step)
+                                                       except Exception,e:
+                                                               logging.error('Model step failed. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!',e)
+                                                               logger.log_exc(e)
+                                                               failed_steps.append(S)
+                               self.save_run_times()
+                       except Exception, e:
+                               logging.error('Core error. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!',e)
+                               logger.log_exc("Exception in observer run loop")
+                               traceback.print_exc()
index bd04ced..d2a53a7 100644 (file)
@@ -2,7 +2,6 @@ import threading
 import requests, json
 
 from planetstack.config import Config
-from observer.deleter import Deleter
 
 import uuid
 import os
@@ -79,7 +78,7 @@ class EventSender:
 
        def fire(self,**kwargs):
                 kwargs["uuid"] = str(uuid.uuid1())
-               self.fofum.fire(json.dumps(kwargs))
+        self.fofum.fire(json.dumps(kwargs))
 
 class EventListener:
        def __init__(self,wake_up=None):
index c41628a..fc0cb0b 100644 (file)
@@ -3,6 +3,7 @@ import base64
 from datetime import datetime
 from planetstack.config import Config
 from util.logger import Logger, logging
+from observer.steps import *
 
 logger = Logger(level=logging.INFO)
 
@@ -33,6 +34,8 @@ class SyncStep:
         """
         dependencies = []
         self.driver = args.get('driver')
+        self.error_map = args.get('error_map')
+
         try:
             self.soft_deadline = int(self.get_prop('soft_deadline_seconds'))
         except:
@@ -40,7 +43,7 @@ class SyncStep:
 
         return
 
-    def fetch_pending(self):
+    def fetch_pending(self, deletion=False):
         return []
         #return Sliver.objects.filter(ip=None)
     
@@ -51,17 +54,29 @@ class SyncStep:
             if (peer_object.pk==failed.pk):
                 raise FailedDependency
 
-    def call(self, failed=[]):
-        pending = self.fetch_pending()
+    def call(self, failed=[], deletion=False):
+        pending = self.fetch_pending(deletion)
         for o in pending:
             try:
                 for f in failed:
                     self.check_dependencies(o,f) # Raises exception if failed
-                self.sync_record(o)
-                o.enacted = datetime.now() # Is this the same timezone? XXX
-                o.save(update_fields=['enacted'])
-            except:
-                logger.log_exc("sync step %s failed!" % self.__name__)
+                if (deletion):
+                    self.delete_record(o)
+                    o.delete(purge=True)
+                else:
+                    self.sync_record(o)
+                    o.enacted = datetime.now() # Is this the same timezone? XXX
+                    o.backend_status = "OK"
+                    o.save(update_fields=['enacted'])
+            except Exception,e:
+                try:
+                    o.backend_status = self.error_map.map(str(e))
+                except:
+                    o.backend_status = str(e)
+
+                o.save(update_fields=['backend_status'])
+
+                logger.log_exc("sync step failed!")
                 failed.append(o)
 
         return failed