class StepNotReady(Exception):
pass
-def toposort(g, steps):
+def toposort(g, steps=None):
+ if (not steps):
+ keys = set(g.keys())
+ values = set({})
+ for v in g.values():
+ values=values | set(v)
+
+ steps=list(keys|values)
reverse = {}
for k,v in g.items():
marked.append(m)
except KeyError:
pass
- order.append(n)
+ if (n in steps):
+ order.append(n)
+
+ order.reverse()
+ order.extend(set(steps)-set(order))
return order
class PlanetStackObserver:
- sync_steps = [SyncNetworks,SyncNetworkSlivers,SyncSites,SyncSitePrivileges,SyncSlices,SyncSliceMemberships,SyncSlivers,SyncSliverIps,SyncExternalRoutes,SyncUsers,GarbageCollector]
+ sync_steps = [SyncNetworks,SyncNetworkSlivers,SyncSites,SyncSitePrivileges,SyncSlices,SyncSliceMemberships,SyncSlivers,SyncSliverIps,SyncExternalRoutes,SyncUsers,SyncRoles,SyncNodes,SyncImages,GarbageCollector]
def __init__(self):
# The Condition object that gets signalled by Feefie events
self.event_cond.release()
def load_sync_steps(self):
- dep_path = Config().observer_backend_dependency_graph
+ dep_path = Config().observer_dependency_graph
try:
# This contains dependencies between records, not sync steps
self.model_dependency_graph = json.loads(open(dep_path).read())
provides_dict = {}
for s in self.sync_steps:
- self.step_lookup[s.__name__] = s
+ self.step_lookup[s.__name__] = s
for m in s.provides:
try:
provides_dict[m.__name__].append(s.__name__)
dependency_graph = step_graph
- self.ordered_steps = toposort(dependency_graph, self.sync_steps)
+ self.ordered_steps = toposort(dependency_graph, map(lambda s:s.__name__,self.sync_steps))
print "Order of steps=",self.ordered_steps
self.load_run_times()
self.last_run_times[step.__name__]=time.time()
def check_schedule(self, step):
- time_since_last_run = time.time() - self.last_run_times[step.__name__]
+ time_since_last_run = time.time() - self.last_run_times.get(step.__name__, 0)
try:
if (time_since_last_run < step.requested_interval):
raise StepNotReady
self.last_run_times[e]=0
-
def save_run_times(self):
run_times = json.dumps(self.last_run_times)
open('/tmp/observer_run_times','w').write(run_times)
def check_class_dependency(self, step, failed_steps):
+ step.dependenices = []
+ for obj in step.provides:
+ step.dependenices.extend(self.model_dependency_graph.get(obj.__name__, []))
for failed_step in failed_steps:
- dependencies = self.model_dependency_graph.get(step.provides[0].__name__, [])
- if (failed_step in dependencies):
+ if (failed_step in step.dependencies):
raise StepNotReady
def run(self):
if not self.driver.enabled or not self.driver.has_openstack:
return
-
while True:
try:
logger.info('Waiting for event')
tBeforeWait = time.time()
- self.wait_for_event(timeout=300)
+ self.wait_for_event(timeout=30)
logger.info('Observer woke up')
# Set of whole steps that failed
start_time=time.time()
sync_step = step(driver=self.driver)
- sync_step.__name__ = step.__name__
- #sync_step.dependencies = self.dependencies[sync_step.name]
+ sync_step.__name__ = step.__name__
+ sync_step.dependencies = []
+ try:
+ mlist = sync_step.provides
+
+ for m in mlist:
+ sync_step.dependencies.extend(self.model_dependency_graph[m.__name__])
+ except KeyError:
+ pass
sync_step.debug_mode = debug_mode
should_run = False
duration=time.time() - start_time
# ********* This is the actual sync step
+ #import pdb
+ #pdb.set_trace()
failed_objects = sync_step(failed=failed_step_objects)
failed_step_objects.extend(failed_objects)
self.update_run_time(sync_step)
except:
+ raise
failed_steps.append(S)
self.save_run_times()
except: