maximum stack size.
"""
-#import Queue
import os
import pwd, grp
import threading
def ensure_created(self, rec, startingup = Startingup):
"""Check account type is still valid. If not, recreate sliver.
If still valid, check if running and configure/start if not."""
+ logger.log_data_in_file(rec,"/var/lib/nodemanager/%s.rec.txt"%rec['name'],
+ 'raw rec captured in ensure_created',logger.LOG_VERBOSE)
curr_class = self._get_class()
next_class = type_acct_class[rec['type']]
if next_class != curr_class:
db_pickle = cPickle.dumps(db, cPickle.HIGHEST_PROTOCOL)
dump_requested = False
db_lock.release()
- try: tools.write_file(DB_FILE, lambda f: f.write(db_pickle))
- except: logger.log_exc("database: failed in database.start.run")
+ try:
+ tools.write_file(DB_FILE, lambda f: f.write(db_pickle))
+ logger.log_database(db)
+ except:
+ logger.log_exc("database.start: failed to pickle/dump")
global db
try:
f = open(DB_FILE)
LOG_FILE = '/var/log/nodemanager'
LOG_SLIVERS = '/var/lib/nodemanager/getslivers.txt'
+LOG_DATABASE = '/var/lib/nodemanager/database.txt'
-# Thierry - trying to debug this for 4.2
# basically define 3 levels
LOG_NONE=0
LOG_NODE=1
def log_missing_data (msg,key):
log("%s: could not find the %s key in data (PLC connection down?) - IGNORED"%(msg,key))
-def log_data_in_file (data, file, message=""):
+def log_data_in_file (data, file, message="",level=LOG_NODE):
+ if (level > LOG_LEVEL):
+ return
import pprint, time
try:
f=open(file,'w')
pp=pprint.PrettyPrinter(stream=f,indent=2)
pp.pprint(data)
f.close()
+ log("logger:.log_data_in_file Owerwrote %s"%file)
except:
- log_verbose('log_data_in_file failed - file=%s - message=%r'%(file,message))
+ log_exc('logger.log_data_in_file failed - file=%s - message=%r'%(file,message))
def log_slivers (data):
log_data_in_file (data, LOG_SLIVERS, "raw GetSlivers")
+def log_database (db):
+ log_data_in_file (db, LOG_DATABASE, "raw database")
#################### child processes
# avoid waiting until the process returns;