X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=findbadpcu.py;h=8ebd8914806cd965cde1228b04f845a4d92a579f;hb=8e65cdcaaf08982f5f744297c009359ec74d31b5;hp=3ab97a3f1193196117736ee52c51ecfd50d803b1;hpb=8c989c864f4113c3f0969f5ec5fe86f047f84256;p=monitor.git diff --git a/findbadpcu.py b/findbadpcu.py index 3ab97a3..8ebd891 100755 --- a/findbadpcu.py +++ b/findbadpcu.py @@ -13,11 +13,12 @@ import threadpool import threading import monitor -from monitor.pcu import reboot +from pcucontrol import reboot from monitor import config -from monitor.database import FindbadPCURecordSync, FindbadPCURecord +from monitor.database.info.model import FindbadPCURecordSync, FindbadPCURecord, session +from monitor import database from monitor import util -from monitor.wrapper import plc +from monitor.wrapper import plc, plccache from nodequery import pcu_select plc_lock = threading.Lock() @@ -49,7 +50,7 @@ def get_pcu(pcuname): except: try: #print "GetPCU from file %s" % pcuname - l_pcus = database.dbLoad("pculist") + l_pcus = plccache.l_pcus for i in l_pcus: if i['pcu_id'] == pcuname: l_pcu = i @@ -67,7 +68,7 @@ def get_nodes(node_ids): l_node = plc.getNodes(node_ids, ['hostname', 'last_contact', 'node_id', 'ports']) except: try: - plc_nodes = database.dbLoad("l_plcnodes") + plc_nodes = plccache.l_plcnodes for n in plc_nodes: if n['node_id'] in node_ids: l_node.append(n) @@ -123,7 +124,7 @@ def get_plc_site_values(site_id): d_site = d_site[0] except: try: - plc_sites = database.dbLoad("l_plcsites") + plc_sites = plccache.l_plcsites for site in plc_sites: if site['site_id'] == site_id: d_site = site @@ -274,6 +275,7 @@ def recordPingAndSSH(request, result): fbrec = FindbadPCURecord( date_checked=datetime.fromtimestamp(values['date_checked']), + round=fbsync.round, plc_pcuid=pcu_id, plc_pcu_stats=values['plc_pcu_stats'], dns_status=values['dnsmatch'], @@ -282,6 +284,11 @@ def recordPingAndSSH(request, result): reboot_trial_status="%s" % values['reboot'], ) fbnodesync.round = global_round + + fbnodesync.flush() + fbsync.flush() + fbrec.flush() + count += 1 print "%d %s %s" % (count, nodename, values) @@ -307,9 +314,10 @@ def checkAndRecordState(l_pcus, cohash): for pcuname in l_pcus: pcu_id = int(pcuname) fbnodesync = FindbadPCURecordSync.findby_or_create(plc_pcuid=pcu_id, if_new_set={'round' : 0}) + fbnodesync.flush() node_round = fbnodesync.round - if node_round < global_round: + if node_round < global_round or config.force: # recreate node stats when refreshed #print "%s" % nodename req = threadpool.WorkRequest(collectPingAndSSH, [pcuname, cohash], {}, @@ -339,12 +347,14 @@ def checkAndRecordState(l_pcus, cohash): print FindbadPCURecordSync.query.count() print FindbadPCURecord.query.count() + session.flush() def main(): global global_round - l_pcus = monitor.database.if_cached_else_refresh(1, config.refresh, "pculist", lambda : plc.GetPCUs()) + # monitor.database.if_cached_else_refresh(1, config.refresh, "pculist", lambda : plc.GetPCUs()) + l_pcus = plccache.l_pcus cohash = {} fbsync = FindbadPCURecordSync.findby_or_create(plc_pcuid=0, if_new_set={'round' : global_round}) @@ -356,6 +366,8 @@ def main(): global_round += 1 fbsync.round = global_round + fbsync.flush() + if config.site is not None: api = plc.getAuthAPI() site = api.GetSites(config.site) @@ -404,6 +416,7 @@ if __name__ == '__main__': dbname="findbadpcus", cachenodes=False, refresh=False, + force=False, ) parser.add_option("-f", "--nodelist", dest="nodelist", metavar="FILE", help="Provide the input file for the node list") @@ -422,6 +435,8 @@ if __name__ == '__main__': help="Refresh the cached values") parser.add_option("-i", "--increment", action="store_true", dest="increment", help="Increment round number to force refresh or retry") + parser.add_option("", "--force", action="store_true", dest="force", + help="Force probe without incrementing global 'round'.") parser = parsermodule.getParser(['defaults'], parser) config = parsermodule.parse_args(parser) try: