X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=policy.py;h=f9605ae77dc6746647840e7f880f8a0b2255b468;hb=035a846d8617889c01cae12bc6d64eb7c48b64bd;hp=d6e51e6be79c6d88d5c3cedbc84477fd5a1efb55;hpb=56565f0af4cf94c47b3cbae369ecfcb8eb535ea6;p=monitor.git diff --git a/policy.py b/policy.py old mode 100644 new mode 100755 index d6e51e6..f9605ae --- a/policy.py +++ b/policy.py @@ -1,342 +1,260 @@ -# -# Copyright (c) 2004 The Trustees of Princeton University (Trustees). -# -# Faiyaz Ahmed -# -# $Id: policy.py,v 1.12 2007/04/06 17:38:14 faiyaza Exp $ -# -# Policy Engine. - -#from monitor import * -from threading import * +#!/usr/bin/python + +# This script is used to manipulate the operational state of nodes in +# different node groups. These are basically set operations on nodes via the +# PLC api. +# +# Take the ng name as an argument.... +# optionally, +# * get a list of nodes in the given nodegroup. +# * set some or all in the set to rins. +# * restart them all. +# * do something else to them all. +# + +import os import time -import logging -import mailer -import emailTxt -import pickle -import Queue -import plc -import reboot -import config - -DAT="./monitor.dat" - -logger = logging.getLogger("monitor") - -# Time to enforce policy -POLSLEEP = 7200 - -# Where to email the summary -SUMTO = "faiyaza@cs.princeton.edu" -TECHEMAIL="tech-%s@sites.planet-lab.org" -PIEMAIL="pi-%s@sites.planet-lab.org" -SLICEMAIL="%s@slices.planet-lab.org" -PLCEMAIL="support@planet-lab.org" - -#Thresholds (DAYS) -SPERDAY = 86400 -PITHRESH = 7 * SPERDAY -SLICETHRESH = 7 * SPERDAY -# Days before attempting rins again -RINSTHRESH = 5 * SPERDAY - -# Days before calling the node dead. -DEADTHRESH = 30 * SPERDAY -# Minimum number of nodes up before squeezing -MINUP = 2 - -# IF: -# no SSH, down. -# bad disk, down -# DNS, kinda down (sick) -# clock, kinda down (sick) -# Full disk, going to be down - -# Actions: -# Email -# suspend slice creation -# kill slices - - -class Policy(Thread): - def __init__(self, comonthread, sickNoTicket, emailed): - self.cmn = comonthread - # host - > (time of email, type of email) - self.emailed = emailed - # all sick nodes w/o tickets - # from thread - self.sickNoTicket = sickNoTicket - # Actions taken on nodes. - # actionlogdb{node: [action, date]} - self.actionlogdb = {} - # Actions taken on sites. - # sitelogdb{site: [action, daysdown, date]} - self.sitelogdb = {} - # sick nodes with no tickets - # sickdb{loginbase: [{hostname1: [buckets]}, {...}]} - self.sickdb = {} - Thread.__init__(self) - - - def accumSickSites(self): - """ - Take all sick nodes, find their sites, and put in - sickdb{loginbase: [{hostname1: [buckets]}, {...}]} - """ - while self.sickNoTicket.empty() == False: - node = self.sickNoTicket.get(block = True) - bkts= [] - for bkt in self.cmn.comonbkts.keys(): - if (node in getattr(self.cmn, bkt)): - bkts.append("%s" % bkt) - self.sickdb[plc.siteId(node)] = {node: bkts} - - - def __actOnDebug(self, node): - """ - If in debug, set the node to rins, reboot via PCU/POD - """ - daysdown = self.cmn.codata[node]['sshstatus'] // (60*60*24) - logger.info("POLICY: Node %s in dbg. down for %s" %(node,daysdown)) - plc.nodeBootState(node, "rins") - # If it has a PCU - reboot.reboot(node) - # Log it - self.actionlogdb[node] = ['rins', daysdown, time.time()] - - - def __actOnDown(self, node): - """ - If down (not debug), do the same as actOnDebug for now - """ - self.__actOnDebug(node) - - - def __actOnFilerw(self, node): - """ - Report to PLC when node needs disk checked. - """ - target = [PLCEMAIL] - logger.info("POLICY: Emailing PLC for " + node) - tmp = emailTxt.mailtxt.filerw - sbj = tmp[0] % {'hostname': node} - msg = tmp[1] % {'hostname': node} - mailer.email(sbj, msg, target) - self.actionlogdb[node] = ["filerw", None, time.time()] - - - def __actOnDNS(self, node): - """ - """ - - - def __policy(self, node, loginbase, bkt): - # ...and spam 'em - target = [TECHEMAIL % loginbase] - tmp = emailTxt.mailtxt.down - sbj = tmp[0] % {'hostname': node} - msg = tmp[1] % {'hostname': node, 'days': daysdown} - mailer.email(sbj, msg, target) - - - - - def actOnSick(self): - """ - Acts on sick nodes. - """ - global TECHEMAIL, PIEMAIL - - # Princeton Backdoor - if loginbase == "princeton": return - - # Send appropriate message for node if in appropriate bucket. - # If we know where to send a message - if not loginbase: - logger.info("POLICY: loginbase for %s not found" %node) - # And we didn't email already. - else: - # If first email, send to Tech - target = [TECHEMAIL % loginbase] - - # If disk is foobarred, PLC should check it. - if (node in self.cmn.filerw) and \ - (node not in self.emailed.keys()): - self.__actOnFilerw(node) - return - - # If in dbg, set to rins, then reboot. Inform PLC. - if (node in self.cmn.dbg): - self.__actOnDebug(node) - - if (node in self.emailed.keys()) and \ - (node not in self.cmn.filerw) and \ - (node not in self.cmn.clock_drift): - # If we emailed before, how long ago? - delta = time.time() - self.emailed[node][1] - if delta < SPERDAY: - logger.info("POLICY: already acted on %s today." % node) - return - - logger.info("POLICY: acted %s on %s days ago" % (node, - delta // SPERDAY)) - - # If no luck with tech, email PI - if (delta >= SPERDAY): - target.append(PIEMAIL % loginbase) - - if (delta >= 7 * SPERDAY): - #remove slice creation if enough nodes arent up - if not self.enoughUp(loginbase): - slices = plc.slices(loginbase) - if len(slices) >= 1: - for slice in slices: - target.append(SLICEMAIL % slice) - logger.info("POLICY: Removing slice creation from %s" % loginbase) - tmp = emailTxt.mailtxt.removedSliceCreation - sbj = tmp[0] - msg = tmp[1] % {'loginbase': loginbase} - plc.removeSliceCreation(node) - mailer.email(sbj, msg, target) - self.squeezed[loginbase] = (time.time(), "creation") - self.emailed[node] = ("creation", time.time()) - logger.info("POLICY: Emailing (%s) %s - %s"\ - %("creation", node, target)) - return - - if (delta >= 14 * SPERDAY): - target.append(PIEMAIL % loginbase) - # Email slices at site. - slices = plc.slices([loginbase]) - if len(slices) >= 1: - for slice in slices: - target.append(SLICEMAIL % slice) - # If not enough up, freeze slices and email everyone. - if not self.enoughUp(loginbase): - logger.info("POLICY: Suspending %s slices." % loginbase) - tmp = emailTxt.mailtxt.suspendSlices - sbj = tmp[0] - msg = tmp[1] % {'loginbase': loginbase} - plc.suspendSlices([node]) - self.squeezed[loginbase] = (time.time(), "freeze") - mailer.email(sbj, msg, target) - self.emailed[node] = ("freeze", time.time()) - logger.info("POLICY: Emailing (%s) %s - %s"\ - %("freeze", node, target)) - - return - - # Find the bucket the node is in and send appropriate email - # to approriate list of people. - for bkt in self.cmn.comonbkts.keys(): - if (node in getattr(self.cmn, bkt)): - # Send predefined message for that bucket. - logger.info("POLICY: Emailing (%s) %s - %s"\ - %(bkt, node, target)) - tmp = getattr(emailTxt.mailtxt, bkt) - sbj = tmp[0] % {'hostname': node} - msg = tmp[1] % {'hostname': node} - mailer.email(sbj, msg, target) - self.emailed[node] = (bkt , time.time()) - return - - - """ - Prints, logs, and emails status of up nodes, down nodes, and buckets. - """ - def status(self): - sub = "Monitor Summary" - msg = "\nThe following nodes were acted upon: \n\n" - for (node, (type, date)) in self.emailed.items(): - # Print only things acted on today. - if (time.gmtime(time.time())[2] == time.gmtime(date)[2]): - msg +="%s\t(%s)\t%s\n" %(node, type, time.ctime(date)) - msg +="\n\nThe following sites have been 'squeezed':\n\n" - for (loginbase, (date, type)) in self.squeezed.items(): - # Print only things acted on today. - if (time.gmtime(time.time())[2] == time.gmtime(date)[2]): - msg +="%s\t(%s)\t%s\n" %(loginbase, type, time.ctime(date)) - mailer.email(sub, msg, [SUMTO]) - logger.info(msg) - return - - """ - Store/Load state of emails. When, where, what. - """ - def emailedStore(self, action): +import traceback +import sys +from optparse import OptionParser + +from monitor import config +from monitor import parser as parsermodule +from monitor.common import * +from monitor.model import * +from monitor.wrapper import plc +from monitor.wrapper import plccache +from monitor.database.info.model import * +from monitor.database.info.interface import * + +from nodequery import verify,query_to_dict,node_select + +api = plc.getAuthAPI() + +def logic(): + + plc.nodeBootState(host, 'reinstall') + node_end_record(host) + +def main(hostnames, sitenames): + # commands: + i = 1 + node_count = 1 + site_count = 1 + #print "hosts: %s" % hostnames + for i,host in enumerate(hostnames): try: - if action == "LOAD": - f = open(DAT, "r+") - logger.info("POLICY: Found and reading " + DAT) - self.emailed.update(pickle.load(f)) - if action == "WRITE": - f = open(DAT, "w") - #logger.debug("Writing " + DAT) - pickle.dump(self.emailed, f) - f.close() - except Exception, err: - logger.info("POLICY: Problem with DAT, %s" %err) - - """ - Returns True if more than MINUP nodes are up at a site. - """ - def enoughUp(self, loginbase): - allsitenodes = plc.getSiteNodes([loginbase]) - if len(allsitenodes) == 0: - logger.info("Node not in db") - return - - numnodes = len(allsitenodes) - sicknodes = [] - # Get all sick nodes from comon - for bucket in self.cmn.comonbkts.keys(): - for host in getattr(self.cmn, bucket): - sicknodes.append(host) - # Diff. - for node in allsitenodes: - if node in sicknodes: - numnodes -= 1 - - if numnodes < MINUP: - logger.info(\ -"POLICY: site with %s has nodes %s up." %(loginbase, numnodes)) - return False - else: - return True - - - - - def run(self): - self.accumSickSites() - #self.actOnSick() - #self.emailedStore("WRITE") - print self.sickdb + lb = plccache.plcdb_hn2lb[host] + except: + print "unknown host in plcdb_hn2lb %s" % host + email_exception(host) + continue + + nodeblack = BlacklistRecord.get_by(hostname=host) + + if nodeblack and not nodeblack.expired(): + print "skipping %s due to blacklist. will expire %s" % (host, nodeblack.willExpire() ) + continue + + sitehist = SiteInterface.get_or_make(loginbase=lb) + + recent_actions = sitehist.getRecentActions(hostname=host) + + nodehist = HistoryNodeRecord.findby_or_create(hostname=host) + + print "%s %s %s" % (i, nodehist.hostname, nodehist.status) + if nodehist.status == 'good' and \ + changed_lessthan(nodehist.last_changed, 1.0) and \ + found_within(recent_actions, 'down_notice', 7.0) and \ + not found_within(recent_actions, 'online_notice', 0.5): + # NOTE: chronicly flapping nodes will not get 'online' notices + # since, they are never up long enough to be 'good'. + # NOTE: searching for down_notice proves that the node has + # gone through a 'down' state first, rather than just + # flapping through: good, offline, online, ... + # + # NOTE: there is a narrow window in which this command must be + # evaluated, otherwise the notice will not go out. + # this is not ideal. + sitehist.sendMessage('online_notice', hostname=host, viart=False, saveact=True) + print "send message for host %s online" % host + + + # if a node is offline and doesn't have a PCU, remind the user that they should have one. + #if not nodehist.haspcu and nodehist.status in ['offline', 'down'] and \ + # changed_greaterthan(nodehist.last_changed,1.0) and \ + # not found_within(recent_actions, 'pcumissing_notice', 7.0): + # + # sitehist.sendMessage('pcumissing_notice', hostname=host) + # print "send message for host %s pcumissing_notice" % host + + # if it is offline and HAS a PCU, then try to use it. + if nodehist.haspcu and nodehist.status in ['offline', 'down'] and \ + changed_greaterthan(nodehist.last_changed,1.0) and \ + not found_between(recent_actions, 'try_reboot', 3.5, 1): + + sitehist.attemptReboot(host) + print "send message for host %s try_reboot" % host + + # NOTE: non-intuitive is that found_between(try_reboot, 3.5, 1) + # will be false for a day after the above condition is satisfied + if nodehist.haspcu and nodehist.status in ['offline', 'down'] and \ + changed_greaterthan(nodehist.last_changed,1.5) and \ + found_between(recent_actions, 'try_reboot', 3.5, 1) and \ + not found_within(recent_actions, 'pcufailed_notice', 3.5): + + # send pcu failure message + #act = ActionRecord(**kwargs) + sitehist.sendMessage('pcufailed_notice', hostname=host) + print "send message for host %s PCU Failure" % host + + if nodehist.status == 'monitordebug' and \ + changed_greaterthan(nodehist.last_changed, 1) and \ + not found_between(recent_actions, 'bootmanager_restore', 0.5, 0): + # send down node notice + # delay 0.5 days before retrying... + + print "send message for host %s bootmanager_restore" % host + sitehist.runBootManager(host) + # sitehist.sendMessage('retry_bootman', hostname=host) + + if nodehist.status == 'down' and \ + changed_greaterthan(nodehist.last_changed, 2) and \ + not found_within(recent_actions, 'down_notice', 3.5): + # send down node notice + + sitehist.sendMessage('down_notice', hostname=host) + print "send message for host %s down" % host + + node_count = node_count + 1 + print "time: ", time.strftime('%Y-%m-%d %H:%M:%S') + sys.stdout.flush() + session.flush() + + for i,site in enumerate(sitenames): + sitehist = SiteInterface.get_or_make(loginbase=site) + siteblack = BlacklistRecord.get_by(loginbase=site) + skip_due_to_blacklist=False + + if siteblack and not siteblack.expired(): + print "skipping %s due to blacklist. will expire %s" % (site, siteblack.willExpire() ) + skip_due_to_blacklist=True + continue + + # TODO: make query only return records within a certin time range, + # i.e. greater than 0.5 days ago. or 5 days, etc. + recent_actions = sitehist.getRecentActions(loginbase=site) + + print "%s %s %s" % (i, sitehist.db.loginbase, sitehist.db.status) + + # determine if there are penalties within the last 30 days? + # if so, add a 'pause_penalty' action. + if sitehist.db.message_id != 0 and sitehist.db.message_status == 'open' and \ + sitehist.db.penalty_level > 0 and not found_within(recent_actions, 'pause_penalty', 30): + # pause escalation + print "Pausing penalties for %s" % site + sitehist.pausePenalty() + else: + + if sitehist.db.status == 'down': + if not found_within(recent_actions, 'pause_penalty', 30) and \ + not found_within(recent_actions, 'increase_penalty', 7) and \ + changed_greaterthan(sitehist.db.last_changed, 7): + + # TODO: catch errors + sitehist.increasePenalty() + sitehist.applyPenalty() + sitehist.sendMessage('increase_penalty') + + print "send message for site %s penalty increase" % site + + if sitehist.db.status == 'good': + # clear penalty + # NOTE: because 'all clear' should have an indefinite status, we + # have a boolean value rather than a 'recent action' + if sitehist.db.penalty_applied: + # send message that penalties are cleared. + + sitehist.clearPenalty() + sitehist.applyPenalty() + sitehist.sendMessage('clear_penalty') + sitehist.closeTicket() + + print "send message for site %s penalty cleared" % site + + + site_count = site_count + 1 + + print "time: ", time.strftime('%Y-%m-%d %H:%M:%S') + sys.stdout.flush() + session.flush() + + session.flush() + return + + +if __name__ == "__main__": + parser = parsermodule.getParser(['nodesets']) + parser.set_defaults( timewait=0, + skip=0, + rins=False, + reboot=False, + findbad=False, + force=False, + nosetup=False, + verbose=False, + quiet=False,) + + parser.add_option("", "--stopselect", dest="stopselect", metavar="", + help="The select string that must evaluate to true for the node to be considered 'done'") + parser.add_option("", "--findbad", dest="findbad", action="store_true", + help="Re-run findbad on the nodes we're going to check before acting.") + parser.add_option("", "--force", dest="force", action="store_true", + help="Force action regardless of previous actions/logs.") + parser.add_option("", "--rins", dest="rins", action="store_true", + help="Set the boot_state to 'rins' for all nodes.") + parser.add_option("", "--reboot", dest="reboot", action="store_true", + help="Actively try to reboot the nodes, keeping a log of actions.") + + parser.add_option("", "--verbose", dest="verbose", action="store_true", + help="Extra debug output messages.") + parser.add_option("", "--nosetup", dest="nosetup", action="store_true", + help="Do not perform the orginary setup phase.") + parser.add_option("", "--skip", dest="skip", + help="Number of machines to skip on the input queue.") + parser.add_option("", "--timewait", dest="timewait", + help="Minutes to wait between iterations of 10 nodes.") + + parser = parsermodule.getParser(['defaults'], parser) + config = parsermodule.parse_args(parser) + + fbquery = HistoryNodeRecord.query.all() + hostnames = [ n.hostname for n in fbquery ] + fbquery = HistorySiteRecord.query.all() + sitenames = [ s.loginbase for s in fbquery ] + + if config.site: + # TODO: replace with calls to local db. the api fails so often that + # these calls should be regarded as unreliable. + l_nodes = plccache.GetNodesBySite(config.site) + filter_hostnames = [ n['hostname'] for n in l_nodes ] + + hostnames = filter(lambda x: x in filter_hostnames, hostnames) + sitenames = [config.site] + if config.node: + hostnames = [ config.node ] + sitenames = [ plccache.plcdb_hn2lb[config.node] ] -def main(): - logger.setLevel(logging.DEBUG) - ch = logging.StreamHandler() - ch.setLevel(logging.DEBUG) - formatter = logging.Formatter('%(message)s') - ch.setFormatter(formatter) - logger.addHandler(ch) - - #print NodesDebug() - #tmp = Queue.Queue() - #a = Policy(None, tmp) - #a.emailedStore("LOAD") - #print a.emailed - - #print plc.slices([plc.siteId(["alice.cs.princeton.edu"])]) - os._exit(0) -if __name__ == '__main__': - import os - import plc try: - main() + main(hostnames, sitenames) + session.flush() except KeyboardInterrupt: - print "Killed. Exitting." - logger.info('Monitor Killed') - os._exit(0) + print "Killed by interrupt" + session.flush() + sys.exit(0) + except: + email_exception() + print traceback.print_exc(); + print "fail all..."