- args['action'] = 'nocreate'
- # args['action'] = 'rins'
- args['message'] = message[1]
- args['stage'] = 'stage_actintwoweeks'
- diag_node.update(args)
-
- else:
- # the node is bad, but there's no previous record of it.
- args['email'] = TECH
- args['action'] = 'noop'
- args['message'] = message[0]
- args['stage'] = 'stage_actinoneweek'
- diag_node.update(args)
-
- print "%s" % diag_node['log'],
- print "%15s" % args['action']
-
- if nodename not in self.act_all: self.act_all[nodename] = []
- self.act_all[nodename].insert(0,diag_node)
-
- return args
-
- def lappend_once(list, element):
- if element not in list:
- list.append(element)
- def sappend_once(string, element, separator=','):
- if element not in string:
- return ("%s%c%s" % (string, separator, element),1)
- else:
- return (string,0)
-
- def analyseSites(self):
- i_sites = 0
- i_sites_diagnosed = 0
- i_nodes_diagnosed = 0
- i_nodes_actedon = 0
- i_sites_emailed = 0
- l_allsites = []
-
- sorted_sites = self.sickdb.keys()
- sorted_sites.sort()
- for loginbase in sorted_sites:
- rec_nodedict = self.sickdb[loginbase]
- #print "calling diagnoseSite(%s)" % loginbase
- rec_diaglist = self.__diagnoseSite(loginbase, rec_nodedict)
- l_allsites += [loginbase]
-
-
- if len(rec_diaglist) > 0:
- i_nodes_diagnosed += len(rec_diaglist)
- i_sites_diagnosed += 1
-
- #print "calling actOnSite(%s)" % loginbase
- (na,ne) = self.__actOnSite(loginbase, rec_diaglist)
-
- i_sites += 1
- i_nodes_actedon += na
- i_sites_emailed += ne
-
- return {'sites': i_sites,
- 'sites_diagnosed': i_sites_diagnosed,
- 'nodes_diagnosed': i_nodes_diagnosed,
- 'sites_emailed': i_sites_emailed,
- 'nodes_actedon': i_nodes_actedon,
- 'allsites':l_allsites}
-
-
- def __diagnoseSite(self, loginbase, rec_nodedict):
- """
- rec_sitelist is a sickdb entry:
- """
- diag_list = []
- sorted_nodes = rec_nodedict.keys()
- sorted_nodes.sort()
- for nodename in sorted_nodes:
- rec_node = rec_nodedict[nodename]
- diag_node = self.__diagnoseNode(loginbase, rec_node)
- if diag_node != None:
- diag_list += [ diag_node ]
- return diag_list
-
- def __getDaysDown(self, nodename):
- daysdown = -1
- if self.comon.codata[nodename]['sshstatus'] != "null":
- daysdown = int(self.comon.codata[nodename]['sshstatus']) // (60*60*24)
- return daysdown
-
- def __getStrDaysDown(self, nodename):
- daysdown = self.__getDaysDown(nodename)
- if daysdown > 0:
- return "(%d days down)"%daysdown
- else:
- return ""
-
- def __getCDVersion(self, nodename):
- cdversion = ""
- if nodename in self.bootcds:
- cdversion = self.bootcds[nodename]
- return cdversion
-
- def __diagnoseNode(self, loginbase, rec_node):
- # TODO: change the format of the hostname in this
- # record to something more natural.
- nodename = rec_node['nodename']
- buckets = rec_node['bucket']
- diag_record = None
-
- # xyz as determined by monitor
- # down as determined by comon
- if rec_node['stage'] == "stage_rt_working":
- # err, this can be used as a counter of some kind..
- # but otherwise, no diagnosis is necessary, return None, implies that
- # it gets skipped.
- print "DIAG: %20s : %-40s ticket %d" % \
- (loginbase, nodename, rec_node['ticket_id'])
-
- elif "down" in buckets:
- diag_record = {}
- diag_record.update(rec_node)
- diag_record['nodename'] = nodename
- diag_record['message'] = emailTxt.mailtxt.newdown
- diag_record['args'] = {'nodename': nodename}
- s_daysdown = self.__getStrDaysDown(nodename)
- diag_record['info'] = (nodename, s_daysdown, "")
- diag_record['bucket'] = ["down"]
- diag_record['log'] = "DOWN: %20s : %-40s == %20s" % \
- (loginbase, nodename, diag_record['info']),
-
- elif "dbg" in buckets:
- # V2 boot cds as determined by monitor
- s_daysdown = self.__getStrDaysDown(nodename)
- s_cdversion = self.__getCDVersion(nodename)
- diag_record = {}
- diag_record.update(rec_node)
- diag_record['nodename'] = nodename
- diag_record['info'] = (nodename, s_daysdown, s_cdversion)
-
- if nodename in self.bootcds and "v2" in self.bootcds[nodename]:
- diag_record['log'] = "BTCD: %20s : %-40s == %20s" % \
- (loginbase, nodename, self.bootcds[nodename]),
- diag_record['message'] = emailTxt.mailtxt.newbootcd
- diag_record['args'] = {'nodename': nodename}
- # TODO: figure a better 'bucket' scheme, for merge()
- #diag_record['bucket'] = ["monitor"]
- else:
- print "DEBG: %20s : %-40s" % \
- (loginbase, nodename)
- return None
-
- msg = ("dbg mode",
- "Comon reports the node in debug mode, %s" % \
- "but monitor does not know what to do yet.")
- # TODO: replace with a real action
- diag_record['message'] = [msg, msg, msg]
- diag_record['bucket'] = ["dbg"]
- diag_record['args'] = {'nodename': nodename}
- elif "ssh" in buckets:
- pass
- elif "clock_drift" in buckets:
- pass
- elif "dns" in buckets:
- pass
- elif "filerw" in buckets:
- pass
- else:
- print "Unknown buckets!!!! %s" % buckets
- sys.exit(1)
-
- return diag_record
-
-
- def __actOnFilerw(self, node):
- """
- Report to PLC when node needs disk checked.
- """
- target = [PLCEMAIL]
- logger.info("POLICY: Emailing PLC for " + node)
- tmp = emailTxt.mailtxt.filerw
- sbj = tmp[0] % {'hostname': node}
- msg = tmp[1] % {'hostname': node}
- mailer.email(sbj, msg, target)
- self.actionlogdb[node] = ["filerw", None, time.time()]
-
-
- def __actOnDNS(self, node):
- """
- """
-
-
- def __policy(self, node, loginbase, bucket):
- # ...and spam 'em
- target = [TECHEMAIL % loginbase]
- tmp = emailTxt.mailtxt.down
- sbj = tmp[0] % {'hostname': node}
- msg = tmp[1] % {'hostname': node, 'days': daysdown}
- mailer.email(sbj, msg, target)
-
-
- """
- Prints, logs, and emails status of up nodes, down nodes, and buckets.
- """
- def status(self):
- sub = "Monitor Summary"
- msg = "\nThe following nodes were acted upon: \n\n"
- for (node, (type, date)) in self.emailed.items():
- # Print only things acted on today.
- if (time.gmtime(time.time())[2] == time.gmtime(date)[2]):
- msg +="%s\t(%s)\t%s\n" %(node, type, time.ctime(date))
- msg +="\n\nThe following sites have been 'squeezed':\n\n"
- for (loginbase, (date, type)) in self.squeezed.items():
- # Print only things acted on today.
- if (time.gmtime(time.time())[2] == time.gmtime(date)[2]):
- msg +="%s\t(%s)\t%s\n" %(loginbase, type, time.ctime(date))
- mailer.email(sub, msg, [SUMTO])
- logger.info(msg)
- return
-
- """
- Store/Load state of emails. When, where, what.
- """
- def emailedStore(self, action):
- try:
- if action == "LOAD":
- f = open(DAT, "r+")
- logger.info("POLICY: Found and reading " + DAT)
- self.emailed.update(pickle.load(f))
- if action == "WRITE":
- f = open(DAT, "w")
- #logger.debug("Writing " + DAT)
- pickle.dump(self.emailed, f)
- f.close()
- except Exception, err:
- logger.info("POLICY: Problem with DAT, %s" %err)
-
- """
- Returns True if more than MINUP nodes are up at a site.
- """
- def enoughUp(self, loginbase):
- allsitenodes = plc.getSiteNodes([loginbase])
- if len(allsitenodes) == 0:
- logger.info("Node not in db")
- return
-
- numnodes = len(allsitenodes)
- sicknodes = []
- # Get all sick nodes from comon
- for bucket in self.comon.comon_buckets.keys():
- for host in getattr(self.comon, bucket):
- sicknodes.append(host)
- # Diff.
- for node in allsitenodes:
- if node in sicknodes:
- numnodes -= 1
-
- if numnodes < MINUP:
- logger.info(\
-"POLICY: site with %s has nodes %s up." %(loginbase, numnodes))
- return False
- else:
- return True
+ # TODO: there MUST be a better way to do this...
+ # get fb node record for pcuid
+ fbpcu = None
+ fbnode = FindbadNodeRecord.get_latest_by(hostname=host)
+ if fbnode:
+ fbpcu = FindbadPCURecord.get_latest_by(plc_pcuid=fbnode.plc_pcuid)
+ if fbpcu:
+ pcu_name = fbpcu.pcu_name()
+ else:
+ pcu_name = "error looking up pcu name"
+
+ # get fb pcu record for pcuid
+ # send pcu failure message
+ sitehist.sendMessage('pcufailed_notice', hostname=host, pcu_name=pcu_name)
+ print "send message for host %s PCU Failure" % host
+
+ if nodehist.status == 'failboot' and \
+ changed_greaterthan(nodehist.last_changed, 0.25) and \
+ not found_between(recent_actions, 'bootmanager_restore', 0.5, 0):
+ # send down node notice
+ # delay 0.5 days before retrying...
+
+ print "send message for host %s bootmanager_restore" % host
+ sitehist.runBootManager(host)
+ # sitehist.sendMessage('retry_bootman', hostname=host)
+
+ if nodehist.status == 'down' and \
+ changed_greaterthan(nodehist.last_changed, 2):
+ if not nodehist.firewall and not found_within(recent_actions, 'down_notice', 3.5):
+ # send down node notice
+ sitehist.sendMessage('down_notice', hostname=host)
+ print "send message for host %s down" % host
+
+ if nodehist.firewall and not found_within(recent_actions, 'firewall_notice', 3.5):
+ # send down node notice
+ #email_exception(host, "firewall_notice")
+ sitehist.sendMessage('firewall_notice', hostname=host)
+ print "send message for host %s down" % host
+
+ node_count = node_count + 1
+ print "time: ", time.strftime('%Y-%m-%d %H:%M:%S')
+ sys.stdout.flush()
+ session.flush()
+
+ for i,site in enumerate(sitenames):
+ sitehist = SiteInterface.get_or_make(loginbase=site)
+ siteblack = BlacklistRecord.get_by(loginbase=site)
+ skip_due_to_blacklist=False
+
+ if siteblack and not siteblack.expired():
+ print "skipping %s due to blacklist. will expire %s" % (site, siteblack.willExpire() )
+ skip_due_to_blacklist=True
+ sitehist.clearPenalty()
+ sitehist.applyPenalty()
+ continue
+
+ # TODO: make query only return records within a certin time range,
+ # i.e. greater than 0.5 days ago. or 5 days, etc.
+ recent_actions = sitehist.getRecentActions(loginbase=site)
+
+ print "%s %s %s" % (i, sitehist.db.loginbase, sitehist.db.status)
+
+ if sitehist.db.status == 'down':
+ if sitehist.db.penalty_pause and \
+ changed_greaterthan(sitehist.db.penalty_pause_time, 30):
+
+ email_exception("", "clear pause penalty for site: %s" % sitehist.db.loginbase)
+ sitehist.closeTicket()
+ # NOTE: but preserve the penalty status.
+ sitehist.clearPenaltyPause()
+
+ if sitehist.db.message_id != 0 and \
+ sitehist.db.message_status == 'open' and \
+ not sitehist.db.penalty_pause:
+
+ email_exception("", "pause penalty for site: %s" % sitehist.db.loginbase)
+ sitehist.setPenaltyPause()
+
+ if not sitehist.db.penalty_pause and \
+ not found_within(recent_actions, 'increase_penalty', 7) and \
+ changed_greaterthan(sitehist.db.last_changed, 7):
+
+ # TODO: catch errors
+ sitehist.increasePenalty()
+ sitehist.applyPenalty()
+ sitehist.sendMessage('increase_penalty')
+
+ print "send message for site %s penalty increase" % site
+
+ if sitehist.db.status == 'good':
+ # clear penalty
+ # NOTE: because 'all clear' should have an indefinite status, we
+ # have a boolean value rather than a 'recent action'
+ if sitehist.db.penalty_applied or sitehist.db.penalty_pause:
+ # send message that penalties are cleared.
+
+ sitehist.clearPenalty()
+ sitehist.applyPenalty()
+ sitehist.sendMessage('clear_penalty')
+ sitehist.closeTicket()
+
+ print "send message for site %s penalty cleared" % site
+
+
+ site_count = site_count + 1
+
+ print "time: ", time.strftime('%Y-%m-%d %H:%M:%S')
+ sys.stdout.flush()
+ session.flush()
+
+ session.flush()
+ return
+
+
+if __name__ == "__main__":
+ parser = parsermodule.getParser(['nodesets'])
+ parser.set_defaults( timewait=0,
+ skip=0,
+ rins=False,
+ reboot=False,
+ findbad=False,
+ force=False,
+ nosetup=False,
+ verbose=False,
+ quiet=False,)
+
+ parser.add_option("", "--stopselect", dest="stopselect", metavar="",
+ help="The select string that must evaluate to true for the node to be considered 'done'")
+ parser.add_option("", "--findbad", dest="findbad", action="store_true",
+ help="Re-run findbad on the nodes we're going to check before acting.")
+ parser.add_option("", "--force", dest="force", action="store_true",
+ help="Force action regardless of previous actions/logs.")
+ parser.add_option("", "--rins", dest="rins", action="store_true",
+ help="Set the boot_state to 'rins' for all nodes.")
+ parser.add_option("", "--reboot", dest="reboot", action="store_true",
+ help="Actively try to reboot the nodes, keeping a log of actions.")
+
+ parser.add_option("", "--verbose", dest="verbose", action="store_true",
+ help="Extra debug output messages.")
+ parser.add_option("", "--nosetup", dest="nosetup", action="store_true",
+ help="Do not perform the orginary setup phase.")
+ parser.add_option("", "--skip", dest="skip",
+ help="Number of machines to skip on the input queue.")
+ parser.add_option("", "--timewait", dest="timewait",
+ help="Minutes to wait between iterations of 10 nodes.")
+
+ parser = parsermodule.getParser(['defaults'], parser)
+ config = parsermodule.parse_args(parser)
+
+ fbquery = HistoryNodeRecord.query.all()
+ hostnames = [ n.hostname for n in fbquery ]