+import os
+import imp
+import inspect
import time
import traceback
import commands
#from timeout import timeout
from planetstack.config import Config
from observer.steps import *
+from syncstep import SyncStep
debug_mode = False
-logger = Logger(logfile='observer.log', level=logging.INFO)
+logger = Logger(level=logging.INFO)
class StepNotReady(Exception):
pass
def toposort(g, steps=None):
- if (not steps):
- keys = set(g.keys())
- values = set({})
- for v in g.values():
- values=values | set(v)
-
- steps=list(keys|values)
+ if (not steps):
+ keys = set(g.keys())
+ values = set({})
+ for v in g.values():
+ values=values | set(v)
+
+ steps=list(keys|values)
reverse = {}
for k,v in g.items():
if (not v):
sources.append(k)
- rev_order = []
+ order = []
marked = []
while sources:
marked.append(m)
except KeyError:
pass
- if (n in steps):
- rev_order.append(n)
+ if (n in steps):
+ order.append(n)
- order = rev_order.reverse()
+ order.reverse()
order.extend(set(steps)-set(order))
return order
class PlanetStackObserver:
- sync_steps = [SyncNetworks,SyncNetworkSlivers,SyncSites,SyncSitePrivileges,SyncSlices,SyncSliceMemberships,SyncSlivers,SyncSliverIps,SyncExternalRoutes,SyncUsers,GarbageCollector]
+ #sync_steps = [SyncNetworks,SyncNetworkSlivers,SyncSites,SyncSitePrivileges,SyncSlices,SyncSliceMemberships,SyncSlivers,SyncSliverIps,SyncExternalRoutes,SyncUsers,SyncRoles,SyncNodes,SyncImages,GarbageCollector]
+ sync_steps = []
def __init__(self):
# The Condition object that gets signalled by Feefie events
self.step_lookup = {}
+ self.load_sync_step_modules()
self.load_sync_steps()
self.event_cond = threading.Condition()
self.driver = OpenStackDriver()
self.event_cond.acquire()
self.event_cond.wait(timeout)
self.event_cond.release()
-
+
def wake_up(self):
logger.info('Wake up routine called. Event cond %r'%self.event_cond)
self.event_cond.acquire()
self.event_cond.notify()
self.event_cond.release()
+ def load_sync_step_modules(self, step_dir=None):
+ if step_dir is None:
+ if hasattr(Config(), "step_dir"):
+ step_dir = Config().step_dir
+ else:
+ step_dir = "/opt/planetstack/observer/steps"
+
+ for fn in os.listdir(step_dir):
+ pathname = os.path.join(step_dir,fn)
+ if os.path.isfile(pathname) and fn.endswith(".py") and (fn!="__init__.py"):
+ module = imp.load_source(fn[:-3],pathname)
+ for classname in dir(module):
+ c = getattr(module, classname, None)
+
+ # make sure 'c' is a descendent of SyncStep and has a
+ # provides field (this eliminates the abstract base classes
+ # since they don't have a provides)
+
+ if inspect.isclass(c) and issubclass(c, SyncStep) and hasattr(c,"provides") and (c not in self.sync_steps):
+ self.sync_steps.append(c)
+ logger.info('loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps]))
+ # print 'loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps])
+
def load_sync_steps(self):
- dep_path = Config().observer_backend_dependency_graph
+ dep_path = Config().observer_dependency_graph
+ logger.info('Loading model dependency graph from %s' % dep_path)
try:
# This contains dependencies between records, not sync steps
self.model_dependency_graph = json.loads(open(dep_path).read())
try:
backend_path = Config().observer_pl_dependency_graph
+ logger.info('Loading backend dependency graph from %s' % backend_path)
# This contains dependencies between backend records
self.backend_dependency_graph = json.loads(open(backend_path).read())
except Exception,e:
+ logger.info('Backend dependency graph not loaded')
# We can work without a backend graph
self.backend_dependency_graph = {}
self.last_run_times[step.__name__]=time.time()
def check_schedule(self, step):
- time_since_last_run = time.time() - self.last_run_times[step.__name__]
+ time_since_last_run = time.time() - self.last_run_times.get(step.__name__, 0)
try:
if (time_since_last_run < step.requested_interval):
raise StepNotReady
self.last_run_times[e]=0
-
def save_run_times(self):
run_times = json.dumps(self.last_run_times)
open('/tmp/observer_run_times','w').write(run_times)
def check_class_dependency(self, step, failed_steps):
+ step.dependenices = []
+ for obj in step.provides:
+ step.dependenices.extend(self.model_dependency_graph.get(obj.__name__, []))
for failed_step in failed_steps:
- step.dependencies = self.model_dependency_graph.get(step.provides[0].__name__, [])
if (failed_step in step.dependencies):
raise StepNotReady
failed_steps = []
# Set of individual objects within steps that failed
- failed_step_objects = []
+ failed_step_objects = set()
for S in self.ordered_steps:
step = self.step_lookup[S]
try:
duration=time.time() - start_time
+ logger.info('Executing step %s' % sync_step.__name__)
+
# ********* This is the actual sync step
- import pdb
- pdb.set_trace()
- failed_objects = sync_step(failed=failed_step_objects)
+ #import pdb
+ #pdb.set_trace()
+ failed_objects = sync_step(failed=list(failed_step_objects))
self.check_duration(sync_step, duration)
if failed_objects:
- failed_step_objects.extend(failed_objects)
+ failed_step_objects.update(failed_objects)
self.update_run_time(sync_step)
except:
- raise
failed_steps.append(S)
self.save_run_times()
except: