+def array_to_priority_map(array):
+ """ Create a mapping where each entry of array is given a priority equal
+ to its position in the array. This is useful for subsequent use in the
+ cmpMap() function."""
+ map = {}
+ count = 0
+ for i in array:
+ map[i] = count
+ count += 1
+ return map
+
+def getdebug():
+ return config.debug
+
+def print_stats(key, stats):
+ if key in stats: print "%20s : %d" % (key, stats[key])
+
+class Merge(Thread):
+ def __init__(self, l_merge, toRT):
+ self.toRT = toRT
+ self.merge_list = l_merge
+ # the hostname to loginbase mapping
+ self.plcdb_hn2lb = soltesz.dbLoad("plcdb_hn2lb")
+
+ # Previous actions taken on nodes.
+ self.act_all = soltesz.if_cached_else(1, "act_all", lambda : {})
+ self.findbad = soltesz.if_cached_else(1, "findbad", lambda : {})
+
+ self.cache_all = soltesz.if_cached_else(1, "act_all", lambda : {})
+ self.sickdb = {}
+ self.mergedb = {}
+ Thread.__init__(self)
+
+ def run(self):
+ # populate sickdb
+ self.accumSickSites()
+ # read data from findbad and act_all
+ self.mergeActionsAndBadDB()
+ # pass node_records to RT
+ self.sendToRT()
+
+ def accumSickSites(self):
+ """
+ Take all nodes, from l_diagnose, look them up in the act_all database,
+ and insert them into sickdb[] as:
+
+ sickdb[loginbase][nodename] = fb_record
+ """
+ # look at all problems reported by findbad
+ l_nodes = self.findbad['nodes'].keys()
+ count = 0
+ for nodename in l_nodes:
+ if nodename not in self.merge_list:
+ continue # skip this node, since it's not wanted
+
+ count += 1
+ loginbase = self.plcdb_hn2lb[nodename]
+ values = self.findbad['nodes'][nodename]['values']
+
+ fb_record = {}
+ fb_record['nodename'] = nodename
+ try:
+ fb_record['category'] = values['category']
+ except:
+ print values
+ print nodename
+ print self.findbad['nodes'][nodename]
+ count -= 1
+ continue
+ fb_record['state'] = values['state']
+ fb_record['comonstats'] = values['comonstats']
+ fb_record['plcnode'] = values['plcnode']
+ fb_record['kernel'] = self.getKernel(values['kernel'])
+ fb_record['stage'] = "findbad"
+ fb_record['message'] = None
+ fb_record['bootcd'] = values['bootcd']
+ fb_record['args'] = None
+ fb_record['info'] = None
+ fb_record['time'] = time.time()
+ fb_record['date_created'] = time.time()
+
+ if loginbase not in self.sickdb:
+ self.sickdb[loginbase] = {}
+
+ self.sickdb[loginbase][nodename] = fb_record
+
+ print "Found %d nodes" % count
+
+ def getKernel(self, unamestr):
+ s = unamestr.split()
+ if len(s) > 2:
+ return s[2]
+ else:
+ return ""
+
+ def mergeActionsAndBadDB(self):
+ """
+ - Look at the sick node_records as reported in findbad,
+ - Then look at the node_records in act_all.
+
+ There are four cases:
+ 1) Problem in findbad, no problem in act_all
+ this ok, b/c it just means it's a new problem
+ 2) Problem in findbad, problem in act_all
+ -Did the problem get better or worse?
+ -If Same, or Worse, then continue looking for open tickets.
+ -If Better, or No problem, then "back-off" penalties.
+ This judgement may need to wait until 'Diagnose()'
+
+ 3) No problem in findbad, problem in act_all
+ The the node is operational again according to Findbad()
+
+ 4) No problem in findbad, no problem in act_all
+ There won't be a record in either db, so there's no code.
+ """
+
+ sorted_sites = self.sickdb.keys()
+ sorted_sites.sort()
+ # look at all problems reported by findbad
+ for loginbase in sorted_sites:
+ d_fb_nodes = self.sickdb[loginbase]
+ sorted_nodes = d_fb_nodes.keys()
+ sorted_nodes.sort()
+ for nodename in sorted_nodes:
+ fb_record = self.sickdb[loginbase][nodename]
+ x = fb_record
+ if loginbase not in self.mergedb:
+ self.mergedb[loginbase] = {}
+
+ # We must compare findbad state with act_all state
+ if nodename not in self.act_all:
+ # 1) ok, b/c it's a new problem. set ticket_id to null
+ self.mergedb[loginbase][nodename] = {}
+ self.mergedb[loginbase][nodename].update(x)
+ self.mergedb[loginbase][nodename]['ticket_id'] = ""
+ self.mergedb[loginbase][nodename]['prev_category'] = "NORECORD"
+ else:
+ if len(self.act_all[nodename]) == 0:
+ print "len(act_all[%s]) == 0, skipping %s %s" % (nodename, loginbase, nodename)
+ continue
+
+ y = self.act_all[nodename][0]
+
+ ## skip if end-stage
+ #if 'stage' in y and "monitor-end-record" in y['stage']:
+ # # 1) ok, b/c it's a new problem. set ticket_id to null
+ ## self.mergedb[loginbase][nodename] = {}
+ # self.mergedb[loginbase][nodename].update(x)
+ # self.mergedb[loginbase][nodename]['ticket_id'] = ""
+ # self.mergedb[loginbase][nodename]['prev_category'] = None
+ # continue
+
+ ## for legacy actions
+ #if 'bucket' in y and y['bucket'][0] == 'dbg':
+ # # Only bootcd debugs made it to the act_all db.
+ # y['prev_category'] = "OLDBOOTCD"
+ #elif 'bucket' in y and y['bucket'][0] == 'down':
+ # y['prev_category'] = "ERROR"
+ #elif 'bucket' not in y:
+ # # for all other actions, just carry over the
+ # # previous category
+ # y['prev_category'] = y['category']
+ #else:
+ # print "UNKNOWN state for record: %s" % y
+ # sys.exit(1)
+
+ # determine through translation, if the buckets match
+ #if 'category' in y and x['category'] == y['category']:
+ # b_match = True
+ #elif x['category'] == "OLDBOOTCD" and y['bucket'][0] == 'dbg':
+ # b_match = True
+ #elif x['category'] == "ERROR" and y['bucket'][0] == 'down':
+ # b_match = True
+ #else:
+ # b_match = False
+
+ #if b_match:
+ # # 2b) ok, b/c they agree that there's still a problem..
+ # # 2b) Comon & Monitor still agree; RT ticket?
+ #else:
+ # # 2a) mismatch, need a policy for how to resolve
+ # # resolution will be handled in __diagnoseNode()
+ # # for now just record the two categories.
+ # #if x['category'] == "PROD" and x['state'] == "BOOT" and \
+ # # ( y['bucket'][0] == 'down' or y['bucket'][0] == 'dbg'):
+ # print "FINDBAD and MONITOR have a mismatch: %s vs %s" % \
+ # (x['category'], y['bucket'])
+
+ y['prev_category'] = y['category']
+ self.mergedb[loginbase][nodename] = {}
+ self.mergedb[loginbase][nodename].update(y)
+ self.mergedb[loginbase][nodename]['comonstats'] = x['comonstats']
+ self.mergedb[loginbase][nodename]['category'] = x['category']
+ self.mergedb[loginbase][nodename]['state'] = x['state']
+ self.mergedb[loginbase][nodename]['kernel']=x['kernel']
+ self.mergedb[loginbase][nodename]['bootcd']=x['bootcd']
+ self.mergedb[loginbase][nodename]['plcnode']=x['plcnode']
+ # delete the entry from cache_all to keep it out of case 3)
+ del self.cache_all[nodename]
+
+ # 3) nodes that remin in cache_all were not identified by findbad.
+ # Do we keep them or not?
+ # NOTE: i think that since the categories are performed before this
+ # step now, and by a monitor-controlled agent.
+
+ # TODO: This does not work correctly. Do we need this?
+ #for hn in self.cache_all.keys():
+ # y = self.act_all[hn][0]
+ # if 'monitor' in y['bucket']:
+ # loginbase = self.plcdb_hn2lb[hn]
+ # if loginbase not in self.sickdb:
+ # self.sickdb[loginbase] = {}
+ # self.sickdb[loginbase][hn] = y
+ # else:
+ # del self.cache_all[hn]
+
+ print "len of cache_all: %d" % len(self.cache_all.keys())
+ return
+
+ def sendToRT(self):
+ sorted_sites = self.mergedb.keys()
+ sorted_sites.sort()
+ # look at all problems reported by merge
+ for loginbase in sorted_sites:
+ d_merge_nodes = self.mergedb[loginbase]
+ for nodename in d_merge_nodes.keys():
+ record = self.mergedb[loginbase][nodename]
+ self.toRT.put(record)
+
+ # send signal to stop reading
+ self.toRT.put(None)
+ return
+
+class Diagnose(Thread):
+ def __init__(self, fromRT):
+ self.fromRT = fromRT
+ self.plcdb_hn2lb = soltesz.dbLoad("plcdb_hn2lb")
+ self.findbad = soltesz.if_cached_else(1, "findbad", lambda : {})
+
+ self.diagnose_in = {}
+ self.diagnose_out = {}
+ Thread.__init__(self)
+
+
+ def run(self):
+ self.accumSickSites()
+
+ print "Accumulated %d sick sites" % len(self.diagnose_in.keys())
+ logger.debug("Accumulated %d sick sites" % len(self.diagnose_in.keys()))
+
+ try:
+ stats = self.diagnoseAll()
+ except Exception, err:
+ print "----------------"
+ import traceback
+ print traceback.print_exc()
+ print err
+ #if config.policysavedb:
+ sys.exit(1)
+
+ print_stats("sites_observed", stats)
+ print_stats("sites_diagnosed", stats)
+ print_stats("nodes_diagnosed", stats)
+
+ if config.policysavedb:
+ print "Saving Databases... diagnose_out"
+ soltesz.dbDump("diagnose_out", self.diagnose_out)
+
+ def accumSickSites(self):
+ """
+ Take all nodes, from l_diagnose, look them up in the diagnose_out database,
+ and insert them into diagnose_in[] as:
+
+ diagnose_in[loginbase] = [diag_node1, diag_node2, ...]
+ """
+ while 1:
+ node_record = self.fromRT.get(block = True)
+ if node_record == None:
+ break;
+
+ nodename = node_record['nodename']
+ loginbase = self.plcdb_hn2lb[nodename]
+
+ if loginbase not in self.diagnose_in:
+ self.diagnose_in[loginbase] = {}
+
+ self.diagnose_in[loginbase][nodename] = node_record
+
+ return
+
+ def diagnoseAll(self):
+ i_sites_observed = 0
+ i_sites_diagnosed = 0
+ i_nodes_diagnosed = 0
+ i_nodes_actedon = 0
+ i_sites_emailed = 0
+ l_allsites = []
+
+ sorted_sites = self.diagnose_in.keys()
+ sorted_sites.sort()
+ self.diagnose_out= {}
+ for loginbase in sorted_sites:
+ l_allsites += [loginbase]
+
+ d_diag_nodes = self.diagnose_in[loginbase]
+ d_act_records = self.__diagnoseSite(loginbase, d_diag_nodes)
+ # store records in diagnose_out, for saving later.
+ self.diagnose_out.update(d_act_records)
+
+ if len(d_act_records[loginbase]['nodes'].keys()) > 0:
+ i_nodes_diagnosed += (len(d_act_records[loginbase]['nodes'].keys()))
+ i_sites_diagnosed += 1
+ i_sites_observed += 1
+
+ return {'sites_observed': i_sites_observed,
+ 'sites_diagnosed': i_sites_diagnosed,
+ 'nodes_diagnosed': i_nodes_diagnosed,
+ 'allsites':l_allsites}
+
+ pass
+
+ def __getDaysDown(self, diag_record, nodename):
+ daysdown = -1
+ if diag_record['comonstats']['sshstatus'] != "null":
+ daysdown = int(diag_record['comonstats']['sshstatus']) // (60*60*24)
+ elif diag_record['comonstats']['lastcotop'] != "null":
+ daysdown = int(diag_record['comonstats']['lastcotop']) // (60*60*24)
+ else:
+ now = time.time()
+ last_contact = diag_record['plcnode']['last_contact']
+ if last_contact == None:
+ # the node has never been up, so give it a break
+ daysdown = -1
+ else:
+ diff = now - last_contact
+ daysdown = diff // (60*60*24)
+ return daysdown
+
+ def __getStrDaysDown(self, diag_record, nodename):
+ daysdown = self.__getDaysDown(diag_record, nodename)
+ if daysdown > 0:
+ return "(%d days down)"%daysdown
+ else:
+ return "Unknown number of days"
+
+ def __getCDVersion(self, diag_record, nodename):
+ cdversion = ""
+ #print "Getting kernel for: %s" % diag_record['nodename']
+ cdversion = diag_record['kernel']
+ return cdversion
+
+ def __diagnoseSite(self, loginbase, d_diag_nodes):
+ """
+ d_diag_nodes are diagnose_in entries.
+ """
+ d_diag_site = {loginbase : { 'config' :
+ {'squeeze': False,
+ 'email': False
+ },
+ 'nodes': {}
+ }
+ }
+ sorted_nodes = d_diag_nodes.keys()
+ sorted_nodes.sort()
+ for nodename in sorted_nodes:
+ node_record = d_diag_nodes[nodename]
+ diag_record = self.__diagnoseNode(loginbase, node_record)
+
+ if diag_record != None:
+ d_diag_site[loginbase]['nodes'][nodename] = diag_record
+
+ # NOTE: improvement means, we need to act/squeeze and email.
+ #print "DIAG_RECORD", diag_record
+ if 'monitor-end-record' in diag_record['stage'] or \
+ 'nmreset' in diag_record['stage']:
+ # print "resetting loginbase!"
+ d_diag_site[loginbase]['config']['squeeze'] = True
+ d_diag_site[loginbase]['config']['email'] = True
+ #else:
+ # print "NO IMPROVEMENT!!!!"
+ else:
+ pass # there is nothing to do for this node.
+
+ # NOTE: these settings can be overridden by command line arguments,
+ # or the state of a record, i.e. if already in RT's Support Queue.
+ nodes_up = self.getUpAtSite(loginbase, d_diag_site)
+ if nodes_up < MINUP:
+ d_diag_site[loginbase]['config']['squeeze'] = True
+
+ max_slices = self.getMaxSlices(loginbase)
+ num_nodes = self.getNumNodes(loginbase)
+ # NOTE: when max_slices == 0, this is either a new site (the old way)
+ # or an old disabled site from previous monitor (before site['enabled'])
+ if nodes_up < num_nodes and max_slices != 0:
+ d_diag_site[loginbase]['config']['email'] = True
+
+ if len(d_diag_site[loginbase]['nodes'].keys()) > 0:
+ print "SITE: %20s : %d nodes up, at most" % (loginbase, nodes_up)
+
+ return d_diag_site
+
+ def diagRecordByCategory(self, node_record):
+ nodename = node_record['nodename']
+ category = node_record['category']
+ state = node_record['state']
+ loginbase = self.plcdb_hn2lb[nodename]
+ diag_record = None
+
+ if "ERROR" in category: # i.e. "DOWN"
+ diag_record = {}
+ diag_record.update(node_record)
+ daysdown = self.__getDaysDown(diag_record, nodename)
+ if daysdown < 7:
+ format = "DIAG: %20s : %-40s Down only %s days NOTHING DONE"
+ print format % (loginbase, nodename, daysdown)
+ return None
+
+ s_daysdown = self.__getStrDaysDown(diag_record, nodename)
+ diag_record['message'] = emailTxt.mailtxt.newdown
+ diag_record['args'] = {'nodename': nodename}
+ diag_record['info'] = (nodename, s_daysdown, "")
+
+ if 'reboot_node_failed' in node_record:
+ # there was a previous attempt to use the PCU.
+ if node_record['reboot_node_failed'] == False:
+ # then the last attempt apparently, succeeded.
+ # But, the category is still 'ERROR'. Therefore, the
+ # PCU-to-Node mapping is broken.
+ #print "Setting message for ERROR node to PCU2NodeMapping: %s" % nodename
+ diag_record['message'] = emailTxt.mailtxt.pcutonodemapping
+ diag_record['email_pcu'] = True
+
+ if diag_record['ticket_id'] == "":
+ diag_record['log'] = "DOWN: %20s : %-40s == %20s %s" % \
+ (loginbase, nodename, diag_record['info'][1:], diag_record['found_rt_ticket'])
+ else:
+ diag_record['log'] = "DOWN: %20s : %-40s == %20s %s" % \
+ (loginbase, nodename, diag_record['info'][1:], diag_record['ticket_id'])
+
+ elif "OLDBOOTCD" in category:
+ # V2 boot cds as determined by findbad
+ s_daysdown = self.__getStrDaysDown(node_record, nodename)
+ s_cdversion = self.__getCDVersion(node_record, nodename)
+ diag_record = {}
+ diag_record.update(node_record)
+ #if "2.4" in diag_record['kernel'] or "v2" in diag_record['bootcd']:
+ diag_record['message'] = emailTxt.mailtxt.newbootcd
+ diag_record['args'] = {'nodename': nodename}
+ diag_record['info'] = (nodename, s_daysdown, s_cdversion)
+ if diag_record['ticket_id'] == "":
+ diag_record['log'] = "BTCD: %20s : %-40s == %20s %20s %s" % \
+ (loginbase, nodename, diag_record['kernel'],
+ diag_record['bootcd'], diag_record['found_rt_ticket'])
+ else:
+ diag_record['log'] = "BTCD: %20s : %-40s == %20s %20s %s" % \
+ (loginbase, nodename, diag_record['kernel'],
+ diag_record['bootcd'], diag_record['ticket_id'])
+
+ elif "PROD" in category:
+ if "DEBUG" in state:
+ # Not sure what to do with these yet. Probably need to
+ # reboot, and email.
+ print "DEBG: %20s : %-40s NOTHING DONE" % (loginbase, nodename)
+ return None
+ elif "BOOT" in state:
+ # no action needed.
+ # TODO: remove penalties, if any are applied.
+ now = time.time()
+ last_contact = node_record['plcnode']['last_contact']
+ if last_contact == None:
+ time_diff = 0
+ else:
+ time_diff = now - last_contact;
+
+ if 'improvement' in node_record['stage']:
+ # then we need to pass this on to 'action'
+ diag_record = {}
+ diag_record.update(node_record)
+ diag_record['message'] = emailTxt.mailtxt.newthankyou
+ diag_record['args'] = {'nodename': nodename}
+ diag_record['info'] = (nodename, node_record['prev_category'],
+ node_record['category'])
+ if diag_record['ticket_id'] == "":
+ diag_record['log'] = "IMPR: %20s : %-40s == %20s %20s %s %s" % \
+ (loginbase, nodename, diag_record['stage'],
+ state, category, diag_record['found_rt_ticket'])
+ else:
+ diag_record['log'] = "IMPR: %20s : %-40s == %20s %20s %s %s" % \
+ (loginbase, nodename, diag_record['stage'],
+ state, category, diag_record['ticket_id'])
+ return diag_record
+ elif time_diff >= 6*SPERHOUR:
+ # heartbeat is older than 30 min.
+ # then reset NM.
+ #print "Possible NM problem!! %s - %s = %s" % (now, last_contact, time_diff)
+ diag_record = {}
+ diag_record.update(node_record)
+ diag_record['message'] = emailTxt.mailtxt.NMReset
+ diag_record['args'] = {'nodename': nodename}
+ diag_record['stage'] = "nmreset"
+ diag_record['info'] = (nodename,
+ node_record['prev_category'],
+ node_record['category'])
+ if diag_record['ticket_id'] == "":
+ diag_record['log'] = "NM : %20s : %-40s == %20s %20s %s %s" % \
+ (loginbase, nodename, diag_record['stage'],
+ state, category, diag_record['found_rt_ticket'])
+ else:
+ diag_record['log'] = "NM : %20s : %-40s == %20s" % \
+ (loginbase, nodename, diag_record['stage'])
+
+ return diag_record
+ else:
+ return None
+ else:
+ # unknown
+ pass
+ elif "ALPHA" in category:
+ pass
+ elif "clock_drift" in category:
+ pass
+ elif "dns" in category:
+ pass
+ elif "filerw" in category:
+ pass
+ else:
+ print "Unknown category!!!! %s" % category
+ sys.exit(1)
+
+ return diag_record
+
+ def __diagnoseNode(self, loginbase, node_record):
+ # TODO: change the format of the hostname in this
+ # record to something more natural.
+ nodename = node_record['nodename']
+ category = node_record['category']
+ prev_category = node_record['prev_category']
+ state = node_record['state']
+ #if 'prev_category' in node_record:
+ # prev_category = node_record['prev_category']
+ #else:
+ # prev_category = "ERROR"
+ if node_record['prev_category'] != "NORECORD":
+
+ val = cmpCategoryVal(category, prev_category)
+ print "%s went from %s -> %s" % (nodename, prev_category, category)
+ if val == 1:
+ # improved
+ if node_record['ticket_id'] == "" or node_record['ticket_id'] == None:
+ print "closing record with no ticket: ", node_record['nodename']
+ node_record['action'] = ['close_rt']
+ node_record['message'] = None
+ node_record['stage'] = 'monitor-end-record'
+ return node_record
+ else:
+ node_record['stage'] = 'improvement'
+
+ #if 'monitor-end-record' in node_record['stage']:
+ # # just ignore it if it's already ended.
+ # # otherwise, the status should be worse, and we won't get
+ # # here.
+ # print "monitor-end-record: ignoring ", node_record['nodename']
+ # return None
+#
+# #return None
+ elif val == -1:
+ # current category is worse than previous, carry on
+ pass
+ else:
+ #values are equal, carry on.
+ #print "why are we here?"
+ pass
+
+ #### COMPARE category and prev_category
+ # if not_equal
+ # then assign a stage based on relative priorities
+ # else equal
+ # then check category for stats.
+ diag_record = self.diagRecordByCategory(node_record)
+ if diag_record == None:
+ #print "diag_record == None"
+ return None
+
+ #### found_RT_ticket
+ # TODO: need to record time found, and maybe add a stage for acting on it...
+ if 'found_rt_ticket' in diag_record and \
+ diag_record['found_rt_ticket'] is not None:
+ if diag_record['stage'] is not 'improvement':
+ diag_record['stage'] = 'ticket_waitforever'
+
+ current_time = time.time()
+ # take off four days, for the delay that database caused.
+ # TODO: generalize delays at PLC, and prevent enforcement when there
+ # have been no emails.
+ # NOTE: 7*SPERDAY exists to offset the 'bad week'
+ #delta = current_time - diag_record['time'] - 7*SPERDAY
+ delta = current_time - diag_record['time']
+
+ message = diag_record['message']
+ act_record = {}
+ act_record.update(diag_record)
+
+ #### DIAGNOSE STAGES
+ if 'findbad' in diag_record['stage']:
+ # The node is bad, and there's no previous record of it.
+ act_record['email'] = TECH
+ act_record['action'] = ['noop']
+ act_record['message'] = message[0]
+ act_record['stage'] = 'stage_actinoneweek'
+
+ elif 'nmreset' in diag_record['stage']:
+ act_record['email'] = ADMIN
+ act_record['action'] = ['reset_nodemanager']
+ act_record['message'] = message[0]
+ act_record['stage'] = 'nmreset'
+ return None
+
+ elif 'reboot_node' in diag_record['stage']:
+ act_record['email'] = TECH
+ act_record['action'] = ['noop']
+ act_record['message'] = message[0]
+ act_record['stage'] = 'stage_actinoneweek'
+
+ elif 'improvement' in diag_record['stage']:
+ # - backoff previous squeeze actions (slice suspend, nocreate)
+ # TODO: add a backoff_squeeze section... Needs to runthrough
+ act_record['action'] = ['close_rt']
+ act_record['message'] = message[0]
+ act_record['stage'] = 'monitor-end-record'
+
+ elif 'actinoneweek' in diag_record['stage']:
+ if delta >= 7 * SPERDAY:
+ act_record['email'] = TECH | PI
+ act_record['stage'] = 'stage_actintwoweeks'
+ act_record['message'] = message[1]
+ act_record['action'] = ['nocreate' ]
+ act_record['time'] = current_time # reset clock for waitforever
+ elif delta >= 3* SPERDAY and not 'second-mail-at-oneweek' in act_record:
+ act_record['email'] = TECH
+ act_record['message'] = message[0]
+ act_record['action'] = ['sendmailagain-waitforoneweekaction' ]
+ act_record['second-mail-at-oneweek'] = True
+ else:
+ act_record['message'] = None
+ act_record['action'] = ['waitforoneweekaction' ]
+ print "ignoring this record for: %s" % act_record['nodename']
+ return None # don't send if there's no action
+
+ elif 'actintwoweeks' in diag_record['stage']:
+ if delta >= 7 * SPERDAY:
+ act_record['email'] = TECH | PI | USER
+ act_record['stage'] = 'stage_waitforever'
+ act_record['message'] = message[2]
+ act_record['action'] = ['suspendslices']
+ act_record['time'] = current_time # reset clock for waitforever
+ elif delta >= 3* SPERDAY and not 'second-mail-at-twoweeks' in act_record:
+ act_record['email'] = TECH | PI
+ act_record['message'] = message[1]
+ act_record['action'] = ['sendmailagain-waitfortwoweeksaction' ]
+ act_record['second-mail-at-twoweeks'] = True
+ else:
+ act_record['message'] = None
+ act_record['action'] = ['waitfortwoweeksaction']
+ return None # don't send if there's no action
+
+ elif 'ticket_waitforever' in diag_record['stage']:
+ act_record['email'] = TECH
+ if 'first-found' not in act_record:
+ act_record['first-found'] = True
+ act_record['log'] += " firstfound"
+ act_record['action'] = ['ticket_waitforever']
+ act_record['message'] = None
+ act_record['time'] = current_time
+ else:
+ if delta >= 7*SPERDAY:
+ act_record['action'] = ['ticket_waitforever']
+ act_record['message'] = None
+ act_record['time'] = current_time # reset clock
+ else:
+ act_record['action'] = ['ticket_waitforever']
+ act_record['message'] = None
+ return None
+
+ elif 'waitforever' in diag_record['stage']:
+ # more than 3 days since last action
+ # TODO: send only on weekdays.
+ # NOTE: expects that 'time' has been reset before entering waitforever stage
+ if delta >= 3*SPERDAY:
+ act_record['action'] = ['email-againwaitforever']
+ act_record['message'] = message[2]
+ act_record['time'] = current_time # reset clock
+ else:
+ act_record['action'] = ['waitforever']
+ act_record['message'] = None
+ return None # don't send if there's no action
+
+ else:
+ # There is no action to be taken, possibly b/c the stage has
+ # already been performed, but diagnose picked it up again.
+ # two cases,
+ # 1. stage is unknown, or
+ # 2. delta is not big enough to bump it to the next stage.
+ # TODO: figure out which. for now assume 2.
+ print "UNKNOWN stage for %s; nothing done" % nodename
+ act_record['action'] = ['unknown']
+ act_record['message'] = message[0]
+ #print "Exiting..."
+ return None
+ #sys.exit(1)
+
+ print "%s" % act_record['log'],
+ print "%15s" % act_record['action']
+ return act_record
+
+ def getMaxSlices(self, loginbase):
+ # if sickdb has a loginbase, then it will have at least one node.
+ site_stats = None
+
+ for nodename in self.diagnose_in[loginbase].keys():
+ if nodename in self.findbad['nodes']:
+ site_stats = self.findbad['nodes'][nodename]['values']['plcsite']
+ break
+
+ if site_stats == None:
+ raise Exception, "loginbase with no nodes in findbad"
+ else:
+ return site_stats['max_slices']
+
+ def getNumNodes(self, loginbase):
+ # if sickdb has a loginbase, then it will have at least one node.
+ site_stats = None
+
+ for nodename in self.diagnose_in[loginbase].keys():
+ if nodename in self.findbad['nodes']:
+ site_stats = self.findbad['nodes'][nodename]['values']['plcsite']
+ break
+
+ if site_stats == None:
+ raise Exception, "loginbase with no nodes in findbad"
+ else:
+ return site_stats['num_nodes']
+
+ """
+ Returns number of up nodes as the total number *NOT* in act_all with a
+ stage other than 'steady-state' .
+ """
+ def getUpAtSite(self, loginbase, d_diag_site):
+ # TODO: THIS DOESN"T WORK!!! it misses all the 'debug' state nodes
+ # that aren't recorded yet.
+
+ numnodes = self.getNumNodes(loginbase)
+ # NOTE: assume nodes we have no record of are ok. (too conservative)
+ # TODO: make the 'up' value more representative
+ up = numnodes
+ for nodename in d_diag_site[loginbase]['nodes'].keys():
+
+ rec = d_diag_site[loginbase]['nodes'][nodename]
+ if rec['stage'] != 'monitor-end-record':
+ up -= 1
+ else:
+ pass # the node is assumed to be up.
+
+ #if up != numnodes:
+ # print "ERROR: %s total nodes up and down != %d" % (loginbase, numnodes)
+
+ return up
+
+
+class SiteAction:
+ def __init__(self, parameter_names=['hostname', 'ticket_id']):
+ self.parameter_names = parameter_names
+ def checkParam(self, args):
+ for param in self.parameter_names:
+ if param not in args:
+ raise Exception("Parameter %s not provided in args"%param)
+ def run(self, args):
+ self.checkParam(args)
+ return self._run(args)
+ def _run(self, args):
+ pass
+
+class SuspendAction(SiteAction):
+ def _run(self, args):
+ return plc.suspendSlices(args['hostname'])
+
+class RemoveSliceCreation(SiteAction):
+ def _run(self, args):
+ return plc.removeSliceCreation(args['hostname'])
+
+class BackoffActions(SiteAction):
+ def _run(self, args):
+ plc.enableSlices(args['hostname'])
+ plc.enableSliceCreation(args['hostname'])
+ return True
+
+# TODO: create class for each action below,
+# allow for lists of actions to be performed...
+
+def close_rt_backoff(args):
+ if 'ticket_id' in args and (args['ticket_id'] != "" and args['ticket_id'] != None):
+ mailer.closeTicketViaRT(args['ticket_id'],
+ "Ticket CLOSED automatically by SiteAssist.")
+ plc.enableSlices(args['hostname'])
+ plc.enableSliceCreation(args['hostname'])
+ return
+
+def reboot_node(args):
+ host = args['hostname']
+ return reboot.reboot_new(host, True, config.debug)
+
+def reset_nodemanager(args):
+ os.system("ssh root@%s /sbin/service nm restart" % nodename)
+ return
+
+class Action(Thread):
+ def __init__(self, l_action):
+ self.l_action = l_action
+
+ # the hostname to loginbase mapping
+ self.plcdb_hn2lb = soltesz.dbLoad("plcdb_hn2lb")
+
+ # Actions to take.
+ self.diagnose_db = soltesz.if_cached_else(1, "diagnose_out", lambda : {})
+ # Actions taken.
+ self.act_all = soltesz.if_cached_else(1, "act_all", lambda : {})
+
+ # A dict of actions to specific functions. PICKLE doesnt' like lambdas.
+ self.actions = {}
+ self.actions['suspendslices'] = lambda args: plc.suspendSlices(args['hostname'])
+ self.actions['nocreate'] = lambda args: plc.removeSliceCreation(args['hostname'])
+ self.actions['close_rt'] = lambda args: close_rt_backoff(args)
+ self.actions['rins'] = lambda args: plc.nodeBootState(args['hostname'], "rins")
+ self.actions['noop'] = lambda args: args
+ self.actions['reboot_node'] = lambda args: reboot_node(args)
+ self.actions['reset_nodemanager'] = lambda args: args # reset_nodemanager(args)
+
+ self.actions['ticket_waitforever'] = lambda args: args
+ self.actions['waitforever'] = lambda args: args
+ self.actions['unknown'] = lambda args: args
+ self.actions['waitforoneweekaction'] = lambda args: args
+ self.actions['waitfortwoweeksaction'] = lambda args: args
+ self.actions['sendmailagain-waitforoneweekaction'] = lambda args: args
+ self.actions['sendmailagain-waitfortwoweeksaction'] = lambda args: args
+ self.actions['email-againwaitforever'] = lambda args: args
+ self.actions['email-againticket_waitforever'] = lambda args: args
+
+
+ self.sickdb = {}