X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=grouprins.py;h=b85bbadd55c3dae8fb86c04c796ba2a588d70950;hb=refs%2Fheads%2F1.0;hp=99af75251426c3c24bcf6fb9a217ee36ccba309f;hpb=590ac12c941310b40a92d2fe938e62e3538f2893;p=monitor.git diff --git a/grouprins.py b/grouprins.py index 99af752..b85bbad 100755 --- a/grouprins.py +++ b/grouprins.py @@ -13,26 +13,29 @@ # import plc -import auth -api = plc.PLC(auth.auth, auth.plc) +api = plc.getAuthAPI() -import policy - -from config import config as cfg +import traceback +import config +import util.file from optparse import OptionParser +import const from nodecommon import * from nodequery import verify,query_to_dict,node_select -import soltesz +import database from unified_model import * +import os import time +import parser as parsermodule from model import * import bootman # debug nodes -import monitor # down nodes with pcu import reboot # down nodes without pcu -reboot.verbose = 0 +import mailmonitor # down nodes with pcu +from emailTxt import mailtxt +#reboot.verbose = 0 import sys class Reboot(object): @@ -50,26 +53,73 @@ class Reboot(object): m = PersistMessage(host, mailtxt.pcudown_one[0] % args, mailtxt.pcudown_one[1] % args, True, db='pcu_persistmessages') - loginbase = plc.siteId(hostname) - m.send([policy.TECHEMAIL % loginbase]) + loginbase = plc.siteId(host) + m.send([const.TECHEMAIL % loginbase]) def pcu(self, host): # TODO: It should be possible to diagnose the various conditions of # the PCU here, and send different messages as appropriate. - if self.fbnode['pcu'] == "PCU": + print "'%s'" % self.fbnode['pcu'] + if self.fbnode['pcu'] == "PCU" or "PCUOK" in self.fbnode['pcu']: self.action = "reboot.reboot('%s')" % host - pflags = PersistFlags(host, 1*60*60*24, db='pcu_persistflags') - if not pflags.getRecentFlag('pcutried'): # or not pflags.getFlag('pcufailed'): - pflags.setRecentFlag('pcutried') + pflags = PersistFlags(host, 2*60*60*24, db='pcu_persistflags') + #pflags.resetRecentFlag('pcutried') + if not pflags.getRecentFlag('pcutried'): try: - ret = reboot.reboot(host) + node_pf = PersistFlags(host, 1, db='node_persistflags') + if node_pf.checkattr('last_change') and \ + node_pf.last_change < time.time() - 60*60*24 and \ + node_pf.checkattr('status') and \ + node_pf.status != "good": + + print "CALLING REBOOT!!!" + ret = reboot.reboot(host) + + pflags.setRecentFlag('pcutried') + pflags.save() + return ret + else: + return True + + except Exception,e: + email_exception() + print traceback.print_exc(); print e + + # NOTE: this failure could be an implementation issue on + # our end. So, extra notices are confusing... + # self._send_pcunotice(host) + pflags.setRecentFlag('pcufailed') pflags.save() - return ret + return False + + elif not pflags.getRecentFlag('pcu_rins_tried'): + try: + # NOTE: check that the node has been down for at least a + # day before rebooting it. this avoids false-reboots/rins + # from failed node detections. circa 03-12-09 + node_pf = PersistFlags(host, 1, db='node_persistflags') + if node_pf.checkattr('last_change') and \ + node_pf.last_change < time.time() - 60*60*24 and \ + node_pf.checkattr('status') and \ + node_pf.status != "good": + + # set node to 'rins' boot state. + print "CALLING REBOOT +++ RINS" + plc.nodeBootState(host, 'reinstall') + ret = reboot.reboot(host) + + pflags.setRecentFlag('pcu_rins_tried') + pflags.save() + return ret + + else: + return True except Exception,e: - import traceback; print traceback.print_exc(); print e + email_exception() + print traceback.print_exc(); print e # NOTE: this failure could be an implementation issue on # our end. So, extra notices are confusing... @@ -88,10 +138,13 @@ class Reboot(object): pflags.setRecentFlag('pcumessagesent') pflags.save() - else: - pass # just skip it? + # This will result in mail() being called next, to try to + # engage the technical contact to take care of it also. + print "RETURNING FALSE" + return False else: + print "NO PCUOK" self.action = "None" return False @@ -104,12 +157,13 @@ class Reboot(object): pflags.setRecentFlag('endrecord') pflags.save() - # Then in either case, run monitor.reboot() - self.action = "monitor.reboot('%s')" % host + # Then in either case, run mailmonitor.reboot() + self.action = "mailmonitor.reboot('%s')" % host try: - return monitor.reboot(host) + return mailmonitor.reboot(host) except Exception, e: - import traceback; print traceback.print_exc(); print e + email_exception(host) + print traceback.print_exc(); print e return False class RebootDebug(Reboot): @@ -130,18 +184,34 @@ class RebootDown(Reboot): self.action = "None" return False # this always fails, since the node will be down. +def set_node_to_rins(host, fb): + + node = api.GetNodes(host, ['boot_state', 'last_contact', 'last_updated', 'date_created']) + record = {'observation' : node[0], + 'model' : 'USER_REQUEST', + 'action' : 'api.UpdateNode(%s, {"boot_state" : "reinstall"})' % host, + 'time' : time.time()} + l = Log(host, record) + + ret = api.UpdateNode(host, {'boot_state' : 'reinstall'}) + if ret: + # it's nice to see the current status rather than the previous status on the console + node = api.GetNodes(host)[0] + print l + print "%-2d" % (i-1), nodegroup_display(node, fb) + return l + else: + print "FAILED TO UPDATE NODE BOOT STATE : %s" % host + return None + try: - rebootlog = soltesz.dbLoad("rebootlog") + rebootlog = database.dbLoad("rebootlog") except: rebootlog = LogRoll() -parser = OptionParser() -parser.set_defaults(nodegroup=None, - node=None, - nodelist=None, - nodeselect=None, - timewait=30, +parser = parsermodule.getParser(['nodesets']) +parser.set_defaults( timewait=0, skip=0, rins=False, reboot=False, @@ -149,37 +219,11 @@ parser.set_defaults(nodegroup=None, force=False, nosetup=False, verbose=False, - stopkey=None, - stopvalue=None, quiet=False, ) -parser.add_option("", "--node", dest="node", metavar="nodename.edu", - help="A single node name to add to the nodegroup") -parser.add_option("", "--nodelist", dest="nodelist", metavar="list.txt", - help="Use all nodes in the given file for operation.") -parser.add_option("", "--nodegroup", dest="nodegroup", metavar="NodegroupName", - help="Specify a nodegroup to perform actions on") -parser.add_option("", "--nodeselect", dest="nodeselect", metavar="querystring", - help="Specify a query to perform on findbad db") - -parser.add_option("", "--verbose", dest="verbose", action="store_true", - help="Extra debug output messages.") -parser.add_option("", "--nosetup", dest="nosetup", action="store_true", - help="Do not perform the orginary setup phase.") - -parser.add_option("", "--skip", dest="skip", - help="Number of machines to skip on the input queue.") -parser.add_option("", "--timewait", dest="timewait", - help="Minutes to wait between iterations of 10 nodes.") parser.add_option("", "--stopselect", dest="stopselect", metavar="", help="The select string that must evaluate to true for the node to be considered 'done'") - -parser.add_option("", "--stopkey", dest="stopkey", metavar="", - help="") -parser.add_option("", "--stopvalue", dest="stopvalue", metavar="", - help="") - parser.add_option("", "--findbad", dest="findbad", action="store_true", help="Re-run findbad on the nodes we're going to check before acting.") parser.add_option("", "--force", dest="force", action="store_true", @@ -188,34 +232,51 @@ parser.add_option("", "--rins", dest="rins", action="store_true", help="Set the boot_state to 'rins' for all nodes.") parser.add_option("", "--reboot", dest="reboot", action="store_true", help="Actively try to reboot the nodes, keeping a log of actions.") -#config = config(parser) -config = cfg(parser) -config.parse_args() + +parser.add_option("", "--verbose", dest="verbose", action="store_true", + help="Extra debug output messages.") +parser.add_option("", "--nosetup", dest="nosetup", action="store_true", + help="Do not perform the orginary setup phase.") +parser.add_option("", "--skip", dest="skip", + help="Number of machines to skip on the input queue.") +parser.add_option("", "--timewait", dest="timewait", + help="Minutes to wait between iterations of 10 nodes.") + +parser = parsermodule.getParser(['defaults'], parser) +config = parsermodule.parse_args(parser) # COLLECT nodegroups, nodes and node lists if config.nodegroup: - ng = api.GetNodeGroups({'name' : config.nodegroup}) + ng = api.GetNodeGroups({'groupname' : config.nodegroup}) nodelist = api.GetNodes(ng[0]['node_ids']) hostnames = [ n['hostname'] for n in nodelist ] +if config.site: + site = api.GetSites(config.site) + l_nodes = api.GetNodes(site[0]['node_ids'], ['hostname']) + hostnames = [ n['hostname'] for n in l_nodes ] + if config.node or config.nodelist: if config.node: hostnames = [ config.node ] - else: hostnames = config.getListFromFile(config.nodelist) + else: hostnames = util.file.getListFromFile(config.nodelist) + +fb = database.dbLoad("findbad") if config.nodeselect: - hostnames = node_select(config.nodeselect) + hostnames = node_select(config.nodeselect, fb['nodes'].keys(), fb) if config.findbad: # rerun findbad with the nodes in the given nodes. - import os file = "findbad.txt" - config.setFileFromList(file, hostnames) - os.system("./findbad.py --cachenodes --debug=0 --dbname=findbad --increment --nodelist %s" % file) + util.file.setFileFromList(file, hostnames) + os.system("./findbad.py --cachenodes --increment --nodelist %s" % file) + # TODO: shouldn't we reload the node list now? -fb = soltesz.dbLoad("findbad") +l_blacklist = database.if_cached_else(1, "l_blacklist", lambda : []) # commands: i = 1 count = 1 +#print "hosts: %s" % hostnames for host in hostnames: #if 'echo' in host or 'hptest-1' in host: continue @@ -224,13 +285,17 @@ for host in hostnames: try: node = api.GetNodes(host)[0] except: - import traceback; print traceback.print_exc(); + email_exception() + print traceback.print_exc(); print "FAILED GETNODES for host: %s" % host continue print "%-2d" % i, nodegroup_display(node, fb) i += 1 - if i < int(config.skip): continue + if i-1 <= int(config.skip): continue + if host in l_blacklist: + print "%s is blacklisted. Skipping." % host + continue if config.stopselect: dict_query = query_to_dict(config.stopselect) @@ -240,45 +305,23 @@ for host in hostnames: if verify(dict_query, fbnode) and observed_state != "dbg ": # evaluates to true, therefore skip. print "%s evaluates true for %s ; skipping..." % ( config.stopselect, host ) - continue - - if config.stopkey and config.stopvalue: - fbnode = fb['nodes'][host]['values'] - observed_state = get_current_state(fbnode) + try: + # todo: clean up act_all record here. + # todo: send thank you, etc. + mailmonitor.reboot(host) + except Exception, e: + email_exception() + print traceback.print_exc(); print e - if config.stopkey in fbnode: - if config.stopvalue in fbnode[config.stopkey] and observed_state != "dbg ": - print "%s has stopvalue; skipping..." % host - continue - else: - print "stopkey %s not in fbnode record for %s; skipping..." % (config.stopkey, host) - print fbnode continue + #else: + #print "%s failed to match %s: -%s-" % ( host, dict_query, observed_state ) + #sys.exit(1) if not config.force and rebootlog.find(host, {'action' : ".*reboot"}, 60*60*2): print "recently rebooted %s. skipping... " % host continue - if config.rins: - # reset the boot_state to 'rins' - node = api.GetNodes(host, ['boot_state', 'last_contact', 'last_updated', 'date_created']) - record = {'observation' : node[0], - 'model' : 'USER_REQUEST', - 'action' : 'api.UpdateNode(%s, {"boot_state" : "rins"})' % host, - 'time' : time.time()} - l = Log(host, record) - - ret = api.UpdateNode(host, {'boot_state' : 'rins'}) - if ret: - # it's nice to see the current status rather than the previous status on the console - node = api.GetNodes(host)[0] - print l - print "%-2d" % (i-1), nodegroup_display(node, fb) - rebootlog.add(l) - else: - print "FAILED TO UPDATE NODE BOOT STATE : %s" % host - - if config.reboot: fbnode = fb['nodes'][host]['values'] @@ -288,9 +331,17 @@ for host in hostnames: o = RebootDebug(fbnode) elif observed_state == "boot" : + if config.rins: + l = set_node_to_rins(host, fb) + if l: rebootlog.add(l) + o = RebootBoot(fbnode) elif observed_state == "down": + if config.rins: + l = set_node_to_rins(host, fb) + if l: rebootlog.add(l) + o = RebootDown(fbnode) @@ -316,6 +367,12 @@ for host in hostnames: 'time' : time.time()} print "ALL METHODS OF RESTARTING %s FAILED" % host + args = {} + args['hostname'] = host + #m = PersistMessage(host, "ALL METHODS FAILED for %(hostname)s" % args, + # "CANNOT CONTACT", False, db='suspect_persistmessages') + #m.reset() + #m.send(['monitor-list@lists.planet-lab.org']) l = Log(host, record) print l @@ -324,13 +381,14 @@ for host in hostnames: print "Killed by interrupt" sys.exit(0) except: - import traceback; print traceback.print_exc(); + email_exception() + print traceback.print_exc(); print "Continuing..." time.sleep(1) if count % 10 == 0: print "Saving rebootlog" - soltesz.dbDump("rebootlog", rebootlog) + database.dbDump("rebootlog", rebootlog) wait_time = int(config.timewait) print "Sleeping %d minutes" % wait_time ti = 0 @@ -345,4 +403,4 @@ for host in hostnames: count = count + 1 print "Saving rebootlog" -soltesz.dbDump("rebootlog", rebootlog) +database.dbDump("rebootlog", rebootlog)