X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=findbadpcu.py;h=ab4f5ff81077165eb618943d9a0e5625eaae0a79;hb=c9d06f3b274ecbc092a0b3eb1f5ceb6c0f734aad;hp=2900b65f52aa6c4894e45355339a5c8ca64696a5;hpb=8424072ea9faa9afaee496c039e3f626b5b36e41;p=monitor.git diff --git a/findbadpcu.py b/findbadpcu.py index 2900b65..ab4f5ff 100755 --- a/findbadpcu.py +++ b/findbadpcu.py @@ -5,306 +5,70 @@ import sys import string import time import socket - - +import sets import signal +import traceback +from datetime import datetime,timedelta +import threadpool +import threading -#old_handler = signal.getsignal(signal.SIGCHLD) - -#def sig_handler(signum, stack): -# """ Handle SIGCHLD signal """ -# global old_handler -# if signum == signal.SIGCHLD: -# try: -# os.wait() -# except: -# pass -# if old_handler != signal.SIG_DFL: -# old_handler(signum, stack) -# -#orig_sig_handler = signal.signal(signal.SIGCHLD, sig_handler) - -from config import config -from optparse import OptionParser -parser = OptionParser() -parser.set_defaults(filename=None, - increment=False, - pcuid=None, - dbname="findbadpcus", - cachenodes=False, - refresh=False, - ) -parser.add_option("-f", "--nodelist", dest="filename", metavar="FILE", - help="Provide the input file for the node list") -parser.add_option("", "--pcuid", dest="pcuid", metavar="id", - help="Provide the id for a single pcu") -parser.add_option("", "--cachenodes", action="store_true", - help="Cache node lookup from PLC") -parser.add_option("", "--dbname", dest="dbname", metavar="FILE", - help="Specify the name of the database to which the information is saved") -parser.add_option("", "--refresh", action="store_true", dest="refresh", - help="Refresh the cached values") -parser.add_option("-i", "--increment", action="store_true", dest="increment", - help="Increment round number to force refresh or retry") -config = config(parser) -config.parse_args() - -# QUERY all nodes. -COMON_COTOPURL= "http://summer.cs.princeton.edu/status/tabulator.cgi?" + \ - "table=table_nodeview&" + \ - "dumpcols='name,resptime,sshstatus,uptime,lastcotop'&" + \ - "formatcsv" - #"formatcsv&" + \ - #"select='lastcotop!=0'" +import monitor +from monitor import config +from monitor.database.info.model import FindbadPCURecord, session +from monitor import database +from monitor import util +from monitor.wrapper import plc, plccache +from nodequery import pcu_select +from monitor.common import nmap_port_status +from monitor.scanapi import * -import threading plc_lock = threading.Lock() -round = 1 -externalState = {'round': round, 'nodes': {'a': None}} +global_round = 1 errorState = {} count = 0 -import reboot -from reboot import pcu_name - -import soltesz -import plc -import comon -import threadpool -import syncplcdb - -def nmap_portstatus(status): - ps = {} - l_nmap = status.split() - ports = l_nmap[4:] - - continue_probe = False - for port in ports: - results = port.split('/') - ps[results[0]] = results[1] - if results[1] == "open": - continue_probe = True - return (ps, continue_probe) - -def collectPingAndSSH(pcuname, cohash): - - continue_probe = True - errors = None - values = {} - ### GET PCU ###################### - try: - b_except = False - plc_lock.acquire() - - try: - l_pcu = plc.GetPCUs({'pcu_id' : pcuname}) - - if len(l_pcu) > 0: - site_id = l_pcu[0]['site_id'] - - node_ids = l_pcu[0]['node_ids'] - l_node = plc.getNodes(node_ids, ['hostname', 'last_contact', - 'node_id', 'ports']) - if len(l_node) > 0: - for node in l_node: - values[node['hostname']] = node['ports'][0] - - values['nodenames'] = [node['hostname'] for node in l_node] - # NOTE: this is for a dry run later. It doesn't matter which node. - values['node_id'] = l_node[0]['node_id'] - - if len(l_pcu) > 0: - values.update(l_pcu[0]) - else: - continue_probe = False - - except: - b_except = True - import traceback - traceback.print_exc() - - continue_probe = False - - plc_lock.release() - if b_except: return (None, None) - - if values['hostname'] is not None: - values['hostname'] = values['hostname'].strip() - - if values['ip'] is not None: - values['ip'] = values['ip'].strip() - - #### COMPLETE ENTRY ####################### - - values['complete_entry'] = [] - #if values['protocol'] is None or values['protocol'] is "": - # values['complete_entry'] += ["protocol"] - if values['model'] is None or values['model'] is "": - values['complete_entry'] += ["model"] - # Cannot continue due to this condition - continue_probe = False - - if values['password'] is None or values['password'] is "": - values['complete_entry'] += ["password"] - # Cannot continue due to this condition - continue_probe = False - - if len(values['complete_entry']) > 0: - continue_probe = False - - if values['hostname'] is None or values['hostname'] is "": - values['complete_entry'] += ["hostname"] - if values['ip'] is None or values['ip'] is "": - values['complete_entry'] += ["ip"] - - # If there are no nodes associated with this PCU, then we cannot continue. - if len(values['node_ids']) == 0: - continue_probe = False - values['complete_entry'] += ['NoNodeIds'] - - #### DNS and IP MATCH ####################### - if values['hostname'] is not None and values['hostname'] is not "" and \ - values['ip'] is not None and values['ip'] is not "": - #print "Calling socket.gethostbyname(%s)" % values['hostname'] - try: - ipaddr = socket.gethostbyname(values['hostname']) - if ipaddr == values['ip']: - values['dnsmatch'] = "DNS-OK" - else: - values['dnsmatch'] = "DNS-MISMATCH" - continue_probe = False - - except Exception, err: - values['dnsmatch'] = "DNS-NOENTRY" - values['hostname'] = values['ip'] - #print err - else: - if values['ip'] is not None and values['ip'] is not "": - values['dnsmatch'] = "NOHOSTNAME" - values['hostname'] = values['ip'] - else: - values['dnsmatch'] = "NO-DNS-OR-IP" - values['hostname'] = "No_entry_in_DB" - continue_probe = False - - #### RUN NMAP ############################### - if continue_probe: - nmap = soltesz.CMD() - (oval,eval) = nmap.run_noexcept("nmap -oG - -P0 -p22,23,80,443,5869,16992 %s | grep Host:" % pcu_name(values)) - # NOTE: an empty / error value for oval, will still work. - (values['portstatus'], continue_probe) = nmap_portstatus(oval) - else: - values['portstatus'] = None - - - ###### DRY RUN ############################ - if 'node_ids' in values and len(values['node_ids']) > 0: - rb_ret = reboot.reboot_test(values['nodenames'][0], values, continue_probe, 1, True) - else: - rb_ret = "Not_Run" # No nodes to test" - - values['reboot'] = rb_ret - - ### GET PLC SITE ###################### - b_except = False - plc_lock.acquire() - - try: - d_site = plc.getSites({'site_id': site_id}, - ['max_slices', 'slice_ids', 'node_ids', 'login_base']) - except: - b_except = True - import traceback - traceback.print_exc() - - plc_lock.release() - if b_except: return (None, None) - - if d_site and len(d_site) > 0: - max_slices = d_site[0]['max_slices'] - num_slices = len(d_site[0]['slice_ids']) - num_nodes = len(d_site[0]['node_ids']) - loginbase = d_site[0]['login_base'] - values['plcsite'] = {'num_nodes' : num_nodes, - 'max_slices' : max_slices, - 'num_slices' : num_slices, - 'login_base' : loginbase, - 'status' : 'SUCCESS'} - else: - values['plcsite'] = {'status' : "GS_FAILED"} - except: - print "____________________________________" - print values - errors = values - print "____________________________________" - import traceback - errors['traceback'] = traceback.format_exc() - print errors['traceback'] - - values['checked'] = time.time() - return (pcuname, values, errors) - -def recordPingAndSSH(request, result): - global errorState - global externalState - global count - (nodename, values, errors) = result - - if values is not None: - global_round = externalState['round'] - pcu_id = "id_%s" % nodename - externalState['nodes'][pcu_id]['values'] = values - externalState['nodes'][pcu_id]['round'] = global_round - - count += 1 - print "%d %s %s" % (count, nodename, externalState['nodes'][pcu_id]['values']) - soltesz.dbDump(config.dbname, externalState) - - if errors is not None: - pcu_id = "id_%s" % nodename - errorState[pcu_id] = errors - soltesz.dbDump("findbadpcu_errors", errorState) - # this will be called when an exception occurs within a thread def handle_exception(request, result): print "Exception occured in request %s" % request.requestID for i in result: print "Result: %s" % i - -def checkAndRecordState(l_pcus, cohash): - global externalState +def checkPCUs(l_pcus, cohash): + global global_round global count - global_round = externalState['round'] - tp = threadpool.ThreadPool(20) + tp = threadpool.ThreadPool(10) + scanpcu = ScanPCU(global_round) # CREATE all the work requests for pcuname in l_pcus: - pcu_id = "id_%s" % pcuname - if pcuname not in externalState['nodes']: - #print type(externalState['nodes']) + pcu_id = int(pcuname) + #fbnodesync = FindbadPCURecordSync.findby_or_create(plc_pcuid=pcu_id, if_new_set={'round' : 0}) + #fbnodesync.flush() - externalState['nodes'][pcu_id] = {'round': 0, 'values': []} - - node_round = externalState['nodes'][pcu_id]['round'] - if node_round < global_round: + #node_round = fbnodesync.round + node_round = global_round - 1 + if node_round < global_round or config.force: # recreate node stats when refreshed #print "%s" % nodename - req = threadpool.WorkRequest(collectPingAndSSH, [pcuname, cohash], {}, - None, recordPingAndSSH, handle_exception) + req = threadpool.WorkRequest(scanpcu.collectInternal, [int(pcuname), cohash], {}, + None, scanpcu.record, handle_exception) tp.putRequest(req) else: # We just skip it, since it's "up to date" count += 1 - print "%d %s %s" % (count, pcu_id, externalState['nodes'][pcu_id]['values']) - pass + print "%d %s %s" % (count, pcu_id, node_round) # WAIT while all the work requests are processed. + begin = time.time() while 1: try: time.sleep(1) tp.poll() + # if more than two hours + if time.time() - begin > (60*60*1): + print "findbadpcus.py has run out of time!!!!!!" + os._exit(1) except KeyboardInterrupt: print "Interrupted!" break @@ -312,46 +76,139 @@ def checkAndRecordState(l_pcus, cohash): print "All results collected." break + #print FindbadPCURecordSync.query.count() + print FindbadPCURecord.query.count() + session.flush() def main(): - global externalState + global global_round - externalState = soltesz.if_cached_else(1, config.dbname, lambda : externalState) + l_pcus = plccache.l_pcus cohash = {} - if config.increment: - # update global round number to force refreshes across all nodes - externalState['round'] += 1 - - if config.filename == None and config.pcuid == None: - print "Calling API GetPCUs() : refresh(%s)" % config.refresh - l_pcus = soltesz.if_cached_else_refresh(1, - config.refresh, "pculist", lambda : plc.GetPCUs()) + #fbsync = FindbadPCURecordSync.findby_or_create(plc_pcuid=0, + #if_new_set={'round' : global_round}) + + #global_round = fbsync.round + api = plc.getAuthAPI() + + if config.site is not None: + site = plccache.GetSitesByName([config.site]) + l_nodes = plccache.GetNodesByIds(site[0]['node_ids']) + pcus = [] + for node in l_nodes: + pcus += node['pcu_ids'] + # clear out dups. + l_pcus = [pcu for pcu in sets.Set(pcus)] + + elif config.node is not None: + l_nodes = plcacche.GetNodeByName(config.node) + pcus = [] + for node in l_nodes: + pcus += node['pcu_ids'] + # clear out dups. + l_pcus = [pcu for pcu in sets.Set(pcus)] + + elif config.sitelist: + site_list = config.sitelist.split(',') + + sites = plccache.GetSitesByName(site_list) + node_ids = [] + for s in sites: + node_ids += s['node_ids'] + + l_nodes = plccache.GetNodeByIds(node_ids) + pcus = [] + for node in l_nodes: + pcus += node['pcu_ids'] + # clear out dups. + l_pcus = [pcu for pcu in sets.Set(pcus)] + + elif config.pcuselect is not None: + n, pcus = pcu_select(config.pcuselect) + print pcus + # clear out dups. + l_pcus = [pcu for pcu in sets.Set(pcus)] + + elif config.nodelist == None and config.pcuid == None: + print "Calling API GetPCUs() : cachecalls(%s)" % config.cachecalls l_pcus = [pcu['pcu_id'] for pcu in l_pcus] - elif config.filename is not None: - l_pcus = config.getListFromFile(config.filename) + elif config.nodelist is not None: + l_pcus = util.file.getListFromFile(config.nodelist) l_pcus = [int(pcu) for pcu in l_pcus] elif config.pcuid is not None: l_pcus = [ config.pcuid ] l_pcus = [int(pcu) for pcu in l_pcus] - - checkAndRecordState(l_pcus, cohash) + if config.increment: + # update global round number to force refreshes across all nodes + global_round += 1 - return 0 + checkPCUs(l_pcus, cohash) -import logging -logger = logging.getLogger("monitor") -logger.setLevel(logging.DEBUG) -fh = logging.FileHandler("monitor.log", mode = 'a') -fh.setLevel(logging.DEBUG) -formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') -fh.setFormatter(formatter) -logger.addHandler(fh) + if config.increment: + # update global round number to force refreshes across all nodes + #fbsync.round = global_round + #fbsync.flush() + session.flush() + + return 0 +print "main" if __name__ == '__main__': + import logging + logger = logging.getLogger("monitor") + logger.setLevel(logging.DEBUG) + fh = logging.FileHandler("monitor.log", mode = 'a') + fh.setLevel(logging.DEBUG) + formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') + fh.setFormatter(formatter) + logger.addHandler(fh) + from monitor import parser as parsermodule + parser = parsermodule.getParser() + parser.set_defaults(nodelist=None, + increment=False, + pcuid=None, + pcuselect=None, + site=None, + node=None, + sitelist=None, + dbname="findbadpcus", + cachenodes=False, + cachecalls=True, + force=False, + ) + parser.add_option("-f", "--nodelist", dest="nodelist", metavar="FILE", + help="Provide the input file for the node list") + parser.add_option("", "--node", dest="node", metavar="FILE", + help="Get all pcus associated with the given node") + parser.add_option("", "--site", dest="site", metavar="FILE", + help="Get all pcus associated with the given site's nodes") + parser.add_option("", "--sitelist", dest="sitelist", metavar="FILE", + help="Get all pcus associated with the given site's nodes") + parser.add_option("", "--pcuselect", dest="pcuselect", metavar="FILE", + help="Query string to apply to the findbad pcus") + parser.add_option("", "--pcuid", dest="pcuid", metavar="id", + help="Provide the id for a single pcu") + + parser.add_option("", "--cachenodes", action="store_true", + help="Cache node lookup from PLC") + parser.add_option("", "--dbname", dest="dbname", metavar="FILE", + help="Specify the name of the database to which the information is saved") + parser.add_option("", "--nocachecalls", action="store_false", dest="cachecalls", + help="Refresh the cached values") + parser.add_option("-i", "--increment", action="store_true", dest="increment", + help="Increment round number to force refresh or retry") + parser.add_option("", "--force", action="store_true", dest="force", + help="Force probe without incrementing global 'round'.") + parser = parsermodule.getParser(['defaults'], parser) + config = parsermodule.parse_args(parser) + if hasattr(config, 'cachecalls') and not config.cachecalls: + # NOTE: if explicilty asked, refresh cached values. + print "Reloading PLCCache" + plccache.init() try: # NOTE: evidently, there is a bizarre interaction between iLO and ssh # when LANG is set... Do not know why. Unsetting LANG, fixes the problem. @@ -360,7 +217,9 @@ if __name__ == '__main__': main() time.sleep(1) except Exception, err: + traceback.print_exc() + from monitor.common import email_exception + email_exception() print "Exception: %s" % err print "Saving data... exitting." - soltesz.dbDump(config.dbname, externalState) sys.exit(0)