14 from datetime import datetime
15 from collections import defaultdict
16 from core.models import *
17 from django.db.models import F, Q
18 from django.db import connection
19 #from openstack.manager import OpenStackManager
20 from openstack.driver import OpenStackDriver
21 from util.logger import Logger, logging, logger
22 #from timeout import timeout
23 from planetstack.config import Config
24 from observer.steps import *
25 from syncstep import SyncStep
26 from toposort import toposort
27 from observer.error_mapper import *
28 from openstack_observer.openstacksyncstep import OpenStackSyncStep
43 logger = Logger(level=logging.INFO)
45 class StepNotReady(Exception):
51 self.dependency_graph = None
67 class PlanetStackObserver:
68 #sync_steps = [SyncNetworks,SyncNetworkSlivers,SyncSites,SyncSitePrivilege,SyncSlices,SyncSliceMemberships,SyncSlivers,SyncSliverIps,SyncExternalRoutes,SyncUsers,SyncRoles,SyncNodes,SyncImages,GarbageCollector]
73 # The Condition object that gets signalled by Feefie events
75 self.load_sync_step_modules()
76 self.load_sync_steps()
77 self.event_cond = threading.Condition()
79 self.driver_kind = getattr(Config(), "observer_driver", "openstack")
80 if self.driver_kind=="openstack":
81 self.driver = OpenStackDriver()
83 self.driver = NoOpDriver()
85 def wait_for_event(self, timeout):
86 self.event_cond.acquire()
87 self.event_cond.wait(timeout)
88 self.event_cond.release()
91 logger.info('Wake up routine called. Event cond %r'%self.event_cond)
92 self.event_cond.acquire()
93 self.event_cond.notify()
94 self.event_cond.release()
96 def load_sync_step_modules(self, step_dir=None):
98 if hasattr(Config(), "observer_steps_dir"):
99 step_dir = Config().observer_steps_dir
101 step_dir = "/opt/planetstack/observer/steps"
103 for fn in os.listdir(step_dir):
104 pathname = os.path.join(step_dir,fn)
105 if os.path.isfile(pathname) and fn.endswith(".py") and (fn!="__init__.py"):
106 module = imp.load_source(fn[:-3],pathname)
107 for classname in dir(module):
108 c = getattr(module, classname, None)
110 # make sure 'c' is a descendent of SyncStep and has a
111 # provides field (this eliminates the abstract base classes
112 # since they don't have a provides)
114 if inspect.isclass(c) and (issubclass(c, SyncStep) or issubclass(c,OpenStackSyncStep)) and hasattr(c,"provides") and (c not in self.sync_steps):
115 self.sync_steps.append(c)
116 logger.info('loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps]))
117 # print 'loaded sync steps: %s' % ",".join([x.__name__ for x in self.sync_steps])
119 def load_sync_steps(self):
120 dep_path = Config().observer_dependency_graph
121 logger.info('Loading model dependency graph from %s' % dep_path)
123 # This contains dependencies between records, not sync steps
124 self.model_dependency_graph = json.loads(open(dep_path).read())
125 for lst in self.model_dependency_graph.values():
128 deps = self.model_dependency_graph[k]
130 self.model_dependency_graph[k] = []
135 backend_path = Config().observer_pl_dependency_graph
136 logger.info('Loading backend dependency graph from %s' % backend_path)
137 # This contains dependencies between backend records
138 self.backend_dependency_graph = json.loads(open(backend_path).read())
140 logger.info('Backend dependency graph not loaded')
141 # We can work without a backend graph
142 self.backend_dependency_graph = {}
145 for s in self.sync_steps:
146 self.step_lookup[s.__name__] = s
149 provides_dict[m.__name__].append(s.__name__)
151 provides_dict[m.__name__]=[s.__name__]
154 for k,v in self.model_dependency_graph.iteritems():
156 for source in provides_dict[k]:
158 step_graph[source] = []
162 for dest in provides_dict[m]:
165 if (dest not in step_graph[source]):
166 step_graph[source].append(dest)
168 step_graph[source]=[dest]
174 # no dependencies, pass
178 if (self.backend_dependency_graph):
180 for s in self.sync_steps:
182 backend_dict[m]=s.__name__
184 for k,v in backend_dependency_graph.iteritems():
186 source = backend_dict[k]
189 dest = backend_dict[m]
193 step_graph[source]=dest
197 # no dependencies, pass
199 self.dependency_graph = step_graph
200 self.deletion_dependency_graph = invert_graph(step_graph)
202 pp = pprint.PrettyPrinter(indent=4)
203 pp.pprint(step_graph)
204 self.ordered_steps = toposort(self.dependency_graph, map(lambda s:s.__name__,self.sync_steps))
205 #self.ordered_steps = ['SyncRoles', 'SyncControllerSites', 'SyncControllerSitePrivileges','SyncImages', 'SyncControllerImages','SyncControllerUsers','SyncControllerUserSitePrivileges','SyncControllerSlices', 'SyncControllerSlicePrivileges', 'SyncControllerUserSlicePrivileges', 'SyncControllerNetworks','SyncSlivers']
206 #self.ordered_steps = ['SyncControllerSites']
208 print "Order of steps=",self.ordered_steps
210 self.load_run_times()
213 def check_duration(self, step, duration):
215 if (duration > step.deadline):
216 logger.info('Sync step %s missed deadline, took %.2f seconds'%(step.name,duration))
217 except AttributeError:
218 # S doesn't have a deadline
221 def update_run_time(self, step, deletion):
223 self.last_run_times[step.__name__]=time.time()
225 self.last_deletion_run_times[step.__name__]=time.time()
228 def check_schedule(self, step, deletion):
229 last_run_times = self.last_run_times if not deletion else self.last_deletion_run_times
231 time_since_last_run = time.time() - last_run_times.get(step.__name__, 0)
233 if (time_since_last_run < step.requested_interval):
235 except AttributeError:
236 logger.info('Step %s does not have requested_interval set'%step.__name__)
239 def load_run_times(self):
241 jrun_times = open('/tmp/observer_run_times').read()
242 self.last_run_times = json.loads(jrun_times)
244 self.last_run_times={}
245 for e in self.ordered_steps:
246 self.last_run_times[e]=0
248 jrun_times = open('/tmp/observer_deletion_run_times').read()
249 self.last_deletion_run_times = json.loads(jrun_times)
251 self.last_deletion_run_times={}
252 for e in self.ordered_steps:
253 self.last_deletion_run_times[e]=0
256 def save_run_times(self):
257 run_times = json.dumps(self.last_run_times)
258 open('/tmp/observer_run_times','w').write(run_times)
260 deletion_run_times = json.dumps(self.last_deletion_run_times)
261 open('/tmp/observer_deletion_run_times','w').write(deletion_run_times)
263 def check_class_dependency(self, step, failed_steps):
264 step.dependenices = []
265 for obj in step.provides:
266 step.dependenices.extend(self.model_dependency_graph.get(obj.__name__, []))
267 for failed_step in failed_steps:
268 if (failed_step in step.dependencies):
271 def sync(self, S, deletion):
273 step = self.step_lookup[S]
274 start_time=time.time()
276 logger.info("Starting to work on step %s" % step.__name__)
278 dependency_graph = self.dependency_graph if not deletion else self.deletion_dependency_graph
280 # Wait for step dependencies to be met
282 deps = self.dependency_graph[S]
293 logger.info(" step %s self-wait skipped" % step.__name__)
297 cond = self.step_conditions[d]
299 if (self.step_status[d] is STEP_STATUS_WORKING):
300 logger.info(" step %s wait on dep %s" % (step.__name__, d))
302 elif self.step_status[d] == STEP_STATUS_OK:
314 print bcolors.FAIL + "Step %r skipped on %r" % (step,failed_dep) + bcolors.ENDC
315 # SMBAKER: sync_step was not defined here, so I changed
316 # this from 'sync_step' to 'step'. Verify.
317 self.failed_steps.append(step)
318 my_status = STEP_STATUS_KO
320 sync_step = step(driver=self.driver,error_map=self.error_mapper)
321 sync_step.__name__ = step.__name__
322 sync_step.dependencies = []
324 mlist = sync_step.provides
327 sync_step.dependencies.extend(self.model_dependency_graph[m.__name__])
330 sync_step.debug_mode = debug_mode
334 # Various checks that decide whether
335 # this step runs or not
336 self.check_class_dependency(sync_step, self.failed_steps) # dont run Slices if Sites failed
337 self.check_schedule(sync_step, deletion) # dont run sync_network_routes if time since last run < 1 hour
340 logger.info('Step not ready: %s'%sync_step.__name__)
341 self.failed_steps.append(sync_step)
342 my_status = STEP_STATUS_KO
344 logger.error('%r' % e)
345 logger.log_exc("sync step failed: %r. Deletion: %r"%(sync_step,deletion))
346 self.failed_steps.append(sync_step)
347 my_status = STEP_STATUS_KO
351 duration=time.time() - start_time
353 logger.info('Executing step %s' % sync_step.__name__)
355 print bcolors.OKBLUE + "Executing step %s" % sync_step.__name__ + bcolors.ENDC
356 failed_objects = sync_step(failed=list(self.failed_step_objects), deletion=deletion)
358 self.check_duration(sync_step, duration)
361 self.failed_step_objects.update(failed_objects)
363 logger.info("Step %r succeeded" % step)
364 print bcolors.OKGREEN + "Step %r succeeded" % step + bcolors.ENDC
365 my_status = STEP_STATUS_OK
366 self.update_run_time(sync_step,deletion)
368 print bcolors.FAIL + "Model step %r failed" % (step) + bcolors.ENDC
369 logger.error('Model step %r failed. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!' % (step, e))
371 self.failed_steps.append(S)
372 my_status = STEP_STATUS_KO
374 logger.info("Step %r succeeded due to non-run" % step)
375 my_status = STEP_STATUS_OK
378 my_cond = self.step_conditions[S]
380 self.step_status[S]=my_status
384 logger.info('Step %r is a leaf' % step)
390 if not self.driver.enabled:
393 if (self.driver_kind=="openstack") and (not self.driver.has_openstack):
398 error_map_file = getattr(Config(), "error_map_path", "/opt/planetstack/error_map.txt")
399 self.error_mapper = ErrorMapper(error_map_file)
401 # Set of whole steps that failed
402 self.failed_steps = []
404 # Set of individual objects within steps that failed
405 self.failed_step_objects = set()
407 # Set up conditions and step status
408 # This is needed for steps to run in parallel
409 # while obeying dependencies.
412 for v in self.dependency_graph.values():
416 self.step_conditions = {}
417 self.step_status = {}
418 for p in list(providers):
419 self.step_conditions[p] = threading.Condition()
420 self.step_status[p] = STEP_STATUS_WORKING
423 logger.info('Waiting for event')
424 tBeforeWait = time.time()
425 self.wait_for_event(timeout=30)
426 logger.info('Observer woke up')
428 # Two passes. One for sync, the other for deletion.
429 for deletion in [False,True]:
431 logger.info('Deletion=%r...'%deletion)
432 schedule = self.ordered_steps if not deletion else reversed(self.ordered_steps)
435 thread = threading.Thread(target=self.sync, args=(S, deletion))
437 logger.info('Deletion=%r...'%deletion)
438 threads.append(thread)
444 # Wait for all threads to finish before continuing with the run loop
448 self.save_run_times()
450 logger.error('Core error. This seems like a misconfiguration or bug: %r. This error will not be relayed to the user!' % e)
451 logger.log_exc("Exception in observer run loop")
452 traceback.print_exc()