add pcu_name to pcufailed_notice
[monitor.git] / policy.py
old mode 100644 (file)
new mode 100755 (executable)
index 1af7ab7..cb5d93f
--- a/policy.py
+++ b/policy.py
-#
-# Copyright (c) 2004  The Trustees of Princeton University (Trustees).
-#
-# Faiyaz Ahmed <faiyaza@cs.princeton.edu>
-#
-# $Id: policy.py,v 1.4 2006/11/14 19:20:13 faiyaza Exp $
-#
-# Policy Engine.
-
-#from monitor import *
-from threading import *
+#!/usr/bin/python
+
+# This script is used to manipulate the operational state of nodes in
+# different node groups.  These are basically set operations on nodes via the
+# PLC api.
+# 
+# Take the ng name as an argument....
+# optionally, 
+#  * get a list of nodes in the given nodegroup.
+#  * set some or all in the set to rins.
+#  * restart them all.
+#  * do something else to them all.
+# 
+
+import os
 import time
-import logging
-import mailer
-import emailTxt
-import pickle
-import Queue
-import plc
-import reboot
-import config
-
-DAT="./monitor.dat"
-
-logger = logging.getLogger("monitor")
-
-# Time to enforce policy
-POLSLEEP = 7200
-
-# Where to email the summary
-SUMTO = "pupadm@lists.planet-lab.org"
-TECHEMAIL="tech-%s@sites.planet-lab.org"
-PIEMAIL="pi-%s@sites.planet-lab.org"
-SLICEMAIL="%s@slices.planet-lab.org"
-PLCEMAIL="support@planet-lab.org"
-
-#Thresholds (DAYS)
-SPERDAY = 86400
-PITHRESH = 1 * SPERDAY
-SLICETHRESH = 5 * SPERDAY
-# Days before attempting rins again
-RINSTHRESH = 5 * SPERDAY
-
-# Minimum number of nodes up before squeezing
-MINUP = 2
-
-# IF:
-#  no SSH, down.
-#  bad disk, down
-#  DNS, kinda down (sick)
-#  clock, kinda down (sick)
-#  Full disk, going to be down
-
-# Actions:
-#  Email
-#  suspend slice creation
-#  kill slices
-class Policy(Thread):
-       def __init__(self, comonthread, sickNoTicket, emailed):
-               self.cmn = comonthread
-               # host - > (time of email, type of email)
-               self.emailed = emailed 
-               # all sick nodes w/o tickets
-               self.sickNoTicket = sickNoTicket
-               # Sitess we've Squeezed.
-               self.squeezed = {}
-               Thread.__init__(self)
-       
+import traceback
+import sys
+from optparse import OptionParser
 
-       '''
-       What to do when node is in dbg (as reported by CoMon).
-       '''
-       def __actOnDebug(self, node):
-               # Check to see if we've done this before
-               if (node in self.emailed.keys()):
-                       if (self.emailed[node][0] == "dbg"):
-                               delta = time.time() - self.emailed[node][1]
-                               if (delta <= RINSTHRESH ):
-                                       # Don't mess with node if under Thresh. 
-                                       # Return, move on.
-                                       logger.info("POLICY:  %s in dbg, but acted on %s days ago" % (node, delta // SPERDAY))
-                                       return
-                       logger.info("POLICY:  Node in dbg - " + node)
-                       plc.nodeBootState(node, "rins") 
-                       # If it has a PCU
-                       return reboot.reboot(node)
-       
-       '''
-       What to do when node is in dbg (as reported by CoMon).
-       '''
-       def __actOnFilerw(self, node):
-               target = [PLCEMAIL]     
-               logger.info("POLICY:  Emailing PLC for " + node)
-               tmp = emailTxt.mailtxt.filerw
-               sbj = tmp[0] % {'hostname': node}
-               msg = tmp[1] % {'hostname': node}
-               mailer.email(sbj, msg, target)  
-               self.emailed[node] = ("filerw", time.time())
-
-
-       '''
-       Acts on sick nodes.
-       '''
-       def actOnSick(self):
-               # Get list of nodes in debug from PLC
-               #dbgNodes = NodesDebug()
-               global TECHEMAIL, PIEMAIL
-               # Grab a node from the queue (pushed by rt thread).
-               node = self.sickNoTicket.get(block = True)
-               # Get the login base    
-               loginbase = plc.siteId(node)
-
-               # Send appropriate message for node if in appropriate bucket.
-               # If we know where to send a message
-               if not loginbase: 
-                       logger.info("POLICY:  loginbase for %s not found" %node)
-               # And we didn't email already.
-               else:
-                       # If first email, send to Tech
-                       target = [TECHEMAIL % loginbase]
-                       
-                       # If disk is foobarred, PLC should check it.
-                       if (node in self.cmn.filerw) and \
-                       (node not in self.emailed.keys()):
-                               self.__actOnFilerw(node)
-                               return 
-
-                       # If in dbg, set to rins, then reboot.  Inform PLC.
-                       if (node in self.cmn.dbg):
-                       # If reboot failure via PCU, POD and send email
-                       # if contacted PCU, return
-                               if self.__actOnDebug(node):  return
-
-                       if (node in self.emailed.keys()) and \
-                       (node not in self.cmn.filerw)    and \
-                       (node not in self.cmn.clock_drift):
-                               # If we emailed before, how long ago?   
-                               delta = time.time() - self.emailed[node][1]
-                               if delta < SPERDAY:  
-                                       logger.info("POLICY:  already acted on %s today." % node)
-                                       return
-
-                               logger.info("POLICY:  acted %s on %s days ago" % (node, 
-                               delta // SPERDAY))
-
-                               # If more than PI thresh, but less than slicethresh
-                               if (delta >= PITHRESH) and (delta < SLICETHRESH): 
-                                       target.append(PIEMAIL % loginbase)
-                                       #remove slice creation if enough nodes arent up
-                                       if not self.enoughUp(loginbase):
-                                               slices = plc.slices(loginbase)
-                                               if len(slices) >= 1:
-                                                       for slice in slices:
-                                                               target.append(SLICEMAIL % slice)
-                                               logger.info("POLICY:  Removing slice creation from %s" % loginbase)
-                                               tmp = emailTxt.mailtxt.removedSliceCreation
-                                               sbj = tmp[0] 
-                                               msg = tmp[1] % {'loginbase': loginbase}
-                                               plc.removeSliceCreation(node)
-                                               mailer.email(sbj, msg, target)  
-                                               self.squeezed[loginbase] = (time.time(), "creation")
-                                               return
-
-                               # If more than PI thresh and slicethresh
-                               if (delta >= PITHRESH) and (delta > SLICETHRESH):
-                                       target.append(PIEMAIL % loginbase)
-                                       # Email slices at site.
-                                       slices = plc.slices(loginbase)
-                                       if len(slices) >= 1:
-                                               for slice in slices:
-                                                       target.append(SLICEMAIL % slice)
-                                       # If not enough up, freeze slices and email everyone.
-                                       if not self.enoughUp(loginbase):
-                                               logger.info("POLICY:  Suspending %s slices." % loginbase)
-                                               tmp = emailTxt.mailtxt.suspendSlices
-                                               sbj = tmp[0] 
-                                               msg = tmp[1] % {'loginbase': loginbase}
-                                               plc.suspendSlices(node)
-                                               self.squeezed[loginbase] = (time.time(), "freeze")
-                                               mailer.email(sbj, msg, target)  
-                                               return
-
-                       # Find the bucket the node is in and send appropriate email
-                       # to approriate list of people.
-                       for bkt in self.cmn.comonbkts.keys():
-                               if (node in getattr(self.cmn, bkt)):
-                                       # Send predefined message for that bucket.
-                                       logger.info("POLICY: Emailing (%s) %s - %s"\
-                                               %(bkt, node, target))
-                                       tmp = getattr(emailTxt.mailtxt, bkt)
-                                       sbj = tmp[0] % {'hostname': node}
-                                       msg = tmp[1] % {'hostname': node}
-                                       mailer.email(sbj, msg, target)  
-                                       self.emailed[node] = (bkt , time.time())
-                                       return
-
-
-       '''
-       Prints, logs, and emails status of up nodes, down nodes, and buckets.
-       '''
-       def status(self):
-               sub = "Monitor Summary"
-               msg = "\nThe following nodes were acted upon:  \n\n"
-               for (node, (type, date)) in self.emailed.items():
-                       # Print only things acted on today.
-                       if (time.gmtime(time.time())[2] == time.gmtime(date)[2]):
-                               msg +="%s\t(%s)\t%s\n" %(node, type, time.ctime(date))
-               msg +="\n\nThe following sites have been 'squeezed':\n\n"
-               for (loginbase, (date, type)) in self.squeezed.items():
-                       # Print only things acted on today.
-                       if (time.gmtime(time.time())[2] == time.gmtime(date)[2]):
-                               msg +="%s\t(%s)\t%s\n" %(loginbase, type, time.ctime(date))
-               mailer.email(sub, msg, [SUMTO])
-               logger.info(msg)
-               return 
-
-       '''
-       Store/Load state of emails.  When, where, what.
-       '''
-       def emailedStore(self, action):
+from monitor import config
+from monitor import parser as parsermodule
+from monitor.common import *
+from monitor.model import *
+from monitor.wrapper import plc
+from monitor.wrapper import plccache
+from monitor.database.info.model import *
+from monitor.database.info.interface import *
+
+from nodequery import verify,query_to_dict,node_select
+
+api = plc.getAuthAPI()
+
+def logic():
+
+       plc.nodeBootState(host, 'reinstall')
+       node_end_record(host)
+
+def main(hostnames, sitenames):
+       # commands:
+       i = 1
+       node_count = 1
+       site_count = 1
+       #print "hosts: %s" % hostnames
+       for i,host in enumerate(hostnames):
                try:
-                       if action == "LOAD":
-                               f = open(DAT, "r+")
-                               logger.info("POLICY:  Found and reading " + DAT)
-                               self.emailed.update(pickle.load(f))
-                       if action == "WRITE":
-                               f = open(DAT, "w")
-                               #logger.debug("Writing " + DAT)
-                               pickle.dump(self.emailed, f)
-                       f.close()
-               except Exception, err:
-                       logger.info("POLICY:  Problem with DAT, %s" %err)
-
-       '''
-       Returns True if more than MINUP nodes are up at a site.
-       '''
-       def enoughUp(self, loginbase):
-               allsitenodes = plc.getSiteNodes(loginbase)
-               if len(allsitenodes) == 0:
-                       logger.info("Node not in db")
-                       return
-
-               numnodes = len(allsitenodes)
-               sicknodes = []
-               # Get all sick nodes from comon
-               for bucket in self.cmn.comonbkts.keys():
-                       for host in getattr(self.cmn, bucket):
-                               sicknodes.append(host)
-               # Diff.
-               for node in allsitenodes:
-                       if node in sicknodes:
-                               numnodes -= 1
-
-               if numnodes < MINUP:
-                       logger.info(\
-"POLICY:  site with %s has nodes %s up." %(loginbase, numnodes))
-                       return False 
-               else: 
-                       return True 
-                       
-               
-
-
-       def run(self):
-               while 1:
-                       self.actOnSick()
-                       self.emailedStore("WRITE")
-
-
-def main():
-       logger.setLevel(logging.DEBUG)
-       ch = logging.StreamHandler()
-       ch.setLevel(logging.DEBUG)
-       formatter = logging.Formatter('%(message)s')
-       ch.setFormatter(formatter)
-       logger.addHandler(ch)
-
-       #print NodesDebug()
-       #tmp = Queue.Queue()
-       #a = Policy(None, tmp) 
-       #a.emailedStore("LOAD")
-       #print a.emailed
-
-       print plc.slices(plc.siteId("alice.cs.princeton.edu"))
-       os._exit(0)
-if __name__ == '__main__':
-       import os
-       import plc
+                       lb = plccache.plcdb_hn2lb[host]
+               except:
+                       print "unknown host in plcdb_hn2lb %s" % host
+                       email_exception(host)
+                       continue
+
+               nodeblack = BlacklistRecord.get_by(hostname=host)
+
+               if nodeblack and not nodeblack.expired():
+                       print "skipping %s due to blacklist.  will expire %s" % (host, nodeblack.willExpire() )
+                       continue
+
+               sitehist = SiteInterface.get_or_make(loginbase=lb)
+
+               recent_actions = sitehist.getRecentActions(hostname=host)
+
+               nodehist = HistoryNodeRecord.findby_or_create(hostname=host)
+
+               print "%s %s %s" % (i, nodehist.hostname, nodehist.status)
+               if nodehist.status == 'good' and \
+                       changed_lessthan(nodehist.last_changed, 1.0) and \
+                       found_within(recent_actions, 'down_notice', 7.0) and \
+                       not found_within(recent_actions, 'online_notice', 0.5):
+                               # NOTE: chronicly flapping nodes will not get 'online' notices
+                               #               since, they are never up long enough to be 'good'.
+                           # NOTE: searching for down_notice proves that the node has
+                               #               gone through a 'down' state first, rather than just
+                               #               flapping through: good, offline, online, ...
+                               #       
+                               # NOTE: there is a narrow window in which this command must be
+                               #               evaluated, otherwise the notice will not go out.  
+                               #               this is not ideal.
+                               sitehist.sendMessage('online_notice', hostname=host, viart=False, saveact=True)
+                               print "send message for host %s online" % host
+
+
+               # if a node is offline and doesn't have a PCU, remind the user that they should have one.
+               #if not nodehist.haspcu and nodehist.status in ['offline', 'down'] and \
+               #       changed_greaterthan(nodehist.last_changed,1.0) and \
+               #       not found_within(recent_actions, 'pcumissing_notice', 7.0):
+               #
+               #               sitehist.sendMessage('pcumissing_notice', hostname=host)
+               #               print "send message for host %s pcumissing_notice" % host
+
+               # if it is offline and HAS a PCU, then try to use it.
+               if nodehist.haspcu and nodehist.status in ['offline', 'down'] and \
+                       changed_greaterthan(nodehist.last_changed,1.0) and \
+                       not nodehist.firewall and \
+                       not found_between(recent_actions, 'try_reboot', 3.5, 1):
+
+                               # TODO: there MUST be a better way to do this... 
+                               # get fb node record for pcuid
+                               fbpcu = None
+                               fbnode = FindbadNodeRecord.get_latest_by(hostname=host)
+                               if fbnode:
+                                       fbpcu = FindbadPCURecord.get_latest_by(plc_pcuid=fbnode.plc_pcuid)
+
+                               sitehist.attemptReboot(host)
+                               print "send message for host %s try_reboot" % host
+                               if not fbpcu.test_is_ok() and \
+                                       not found_within(recent_actions, 'pcuerror_notice', 3.0):
+
+                                       args = {}
+                                       if fbpcu:
+                                               args['pcu_name'] = fbpcu.pcu_name()
+                                               args['pcu_errors'] = fbpcu.pcu_errors()
+                                       else:
+                                               args['pcu_name'] = "error looking up pcu name"
+                                               args['pcu_errors'] = ""
+
+                                       args['hostname'] = host
+                                       sitehist.sendMessage('pcuerror_notice', **args)
+                                       print "send message for host %s PCU Failure" % host
+                                       
+
+               # NOTE: non-intuitive is that found_between(try_reboot, 3.5, 1)
+               #               will be false for a day after the above condition is satisfied
+               if nodehist.haspcu and nodehist.status in ['offline', 'down'] and \
+                       changed_greaterthan(nodehist.last_changed,1.5) and \
+                       not nodehist.firewall and \
+                       found_between(recent_actions, 'try_reboot', 3.5, 1) and \
+                       not found_within(recent_actions, 'pcufailed_notice', 3.5):
+                               
+                               # TODO: there MUST be a better way to do this... 
+                               # get fb node record for pcuid
+                               fbpcu = None
+                               fbnode = FindbadNodeRecord.get_latest_by(hostname=host)
+                               if fbnode:
+                                       fbpcu = FindbadPCURecord.get_latest_by(plc_pcuid=fbnode.plc_pcuid)
+                               if fbpcu:
+                                       pcu_name = fbpcu.pcu_name()
+                               else:
+                                       pcu_name = "error looking up pcu name"
+
+                               # get fb pcu record for pcuid
+                               # send pcu failure message
+                               sitehist.sendMessage('pcufailed_notice', hostname=host, pcu_name=pcu_name)
+                               print "send message for host %s PCU Failure" % host
+
+               if nodehist.status == 'failboot' and \
+                       changed_greaterthan(nodehist.last_changed, 0.25) and \
+                       not found_between(recent_actions, 'bootmanager_restore', 0.5, 0):
+                               # send down node notice
+                               # delay 0.5 days before retrying...
+
+                               print "send message for host %s bootmanager_restore" % host
+                               sitehist.runBootManager(host)
+                       #       sitehist.sendMessage('retry_bootman', hostname=host)
+
+               if nodehist.status == 'down' and \
+                       changed_greaterthan(nodehist.last_changed, 2):
+                               if not nodehist.firewall and not found_within(recent_actions, 'down_notice', 3.5):
+                                       # send down node notice
+                                       sitehist.sendMessage('down_notice', hostname=host)
+                                       print "send message for host %s down" % host
+
+                               if nodehist.firewall and not found_within(recent_actions, 'firewall_notice', 3.5):
+                                       # send down node notice
+                                       #email_exception(host, "firewall_notice")
+                                       sitehist.sendMessage('firewall_notice', hostname=host)
+                                       print "send message for host %s down" % host
+
+               node_count = node_count + 1
+               print "time: ", time.strftime('%Y-%m-%d %H:%M:%S')
+               sys.stdout.flush()
+               session.flush()
+
+       for i,site in enumerate(sitenames):
+               sitehist = SiteInterface.get_or_make(loginbase=site)
+               siteblack = BlacklistRecord.get_by(loginbase=site)
+               skip_due_to_blacklist=False
+
+               if siteblack and not siteblack.expired():
+                       print "skipping %s due to blacklist.  will expire %s" % (site, siteblack.willExpire() )
+                       skip_due_to_blacklist=True
+                       sitehist.clearPenalty()
+                       sitehist.applyPenalty()
+                       continue
+
+               # TODO: make query only return records within a certin time range,
+               #               i.e. greater than 0.5 days ago. or 5 days, etc.
+               recent_actions = sitehist.getRecentActions(loginbase=site)
+
+               print "%s %s %s" % (i, sitehist.db.loginbase, sitehist.db.status)
+
+               # determine if there are penalties within the last 30 days?
+               # if so, add a 'pause_penalty' action.
+               if sitehist.db.message_id != 0 and sitehist.db.message_status == 'open' and \
+                       sitehist.db.penalty_level > 0 and not found_within(recent_actions, 'pause_penalty', 30):
+                       #       pause escalation
+                       print "Pausing penalties for %s" % site
+                       sitehist.pausePenalty()
+               else:
+
+                       if sitehist.db.status == 'down':
+                               if  not found_within(recent_actions, 'pause_penalty', 30) and \
+                                       not found_within(recent_actions, 'increase_penalty', 7) and \
+                                       changed_greaterthan(sitehist.db.last_changed, 7):
+
+                                       # TODO: catch errors
+                                       sitehist.increasePenalty()
+                                       sitehist.applyPenalty()
+                                       sitehist.sendMessage('increase_penalty')
+
+                                       print "send message for site %s penalty increase" % site
+
+                       if sitehist.db.status == 'good':
+                               # clear penalty
+                               # NOTE: because 'all clear' should have an indefinite status, we
+                               #               have a boolean value rather than a 'recent action'
+                               if sitehist.db.penalty_applied:
+                                       # send message that penalties are cleared.
+
+                                       sitehist.clearPenalty()
+                                       sitehist.applyPenalty()
+                                       sitehist.sendMessage('clear_penalty')
+                                       sitehist.closeTicket()
+
+                                       print "send message for site %s penalty cleared" % site
+
+
+               site_count = site_count + 1
+
+               print "time: ", time.strftime('%Y-%m-%d %H:%M:%S')
+               sys.stdout.flush()
+               session.flush()
+
+       session.flush()
+       return
+
+
+if __name__ == "__main__":
+       parser = parsermodule.getParser(['nodesets'])
+       parser.set_defaults( timewait=0,
+                                               skip=0,
+                                               rins=False,
+                                               reboot=False,
+                                               findbad=False,
+                                               force=False, 
+                                               nosetup=False, 
+                                               verbose=False, 
+                                               quiet=False,)
+
+       parser.add_option("", "--stopselect", dest="stopselect", metavar="", 
+                                               help="The select string that must evaluate to true for the node to be considered 'done'")
+       parser.add_option("", "--findbad", dest="findbad", action="store_true", 
+                                               help="Re-run findbad on the nodes we're going to check before acting.")
+       parser.add_option("", "--force", dest="force", action="store_true", 
+                                               help="Force action regardless of previous actions/logs.")
+       parser.add_option("", "--rins", dest="rins", action="store_true", 
+                                               help="Set the boot_state to 'rins' for all nodes.")
+       parser.add_option("", "--reboot", dest="reboot", action="store_true", 
+                                               help="Actively try to reboot the nodes, keeping a log of actions.")
+
+       parser.add_option("", "--verbose", dest="verbose", action="store_true", 
+                                               help="Extra debug output messages.")
+       parser.add_option("", "--nosetup", dest="nosetup", action="store_true", 
+                                               help="Do not perform the orginary setup phase.")
+       parser.add_option("", "--skip", dest="skip", 
+                                               help="Number of machines to skip on the input queue.")
+       parser.add_option("", "--timewait", dest="timewait", 
+                                               help="Minutes to wait between iterations of 10 nodes.")
+
+       parser = parsermodule.getParser(['defaults'], parser)
+       config = parsermodule.parse_args(parser)
+
+       fbquery = HistoryNodeRecord.query.all()
+       hostnames = [ n.hostname for n in fbquery ]
+       
+       fbquery = HistorySiteRecord.query.all()
+       sitenames = [ s.loginbase for s in fbquery ]
+
+       if config.site:
+               # TODO: replace with calls to local db.  the api fails so often that
+               #               these calls should be regarded as unreliable.
+               l_nodes = plccache.GetNodesBySite(config.site)
+               filter_hostnames = [ n['hostname'] for n in l_nodes ]
+
+               hostnames = filter(lambda x: x in filter_hostnames, hostnames)
+               sitenames = [config.site]
+
+       if config.node:
+               hostnames = [ config.node ] 
+               sitenames = [ plccache.plcdb_hn2lb[config.node] ]
+
        try:
-               main()
+               main(hostnames, sitenames)
+               session.flush()
        except KeyboardInterrupt:
-               print "Killed.  Exitting."
-               logger.info('Monitor Killed')
-               os._exit(0)
+               print "Killed by interrupt"
+               session.flush()
+               sys.exit(0)
+       except:
+               email_exception()
+               print traceback.print_exc();
+               print "fail all..."