11 from datetime import datetime,timedelta
16 from monitor import config
17 from monitor.database.info.model import FindbadPCURecord, session
18 from monitor import database
19 from monitor import util
20 from monitor.wrapper import plc, plccache
21 from nodequery import pcu_select
22 from monitor.common import nmap_port_status
23 from monitor.scanapi import *
25 plc_lock = threading.Lock()
30 # this will be called when an exception occurs within a thread
31 def handle_exception(request, result):
32 print "Exception occured in request %s" % request.requestID
34 print "Result: %s" % i
36 def checkPCUs(l_pcus, cohash):
40 tp = threadpool.ThreadPool(10)
41 scanpcu = ScanPCU(global_round)
43 # CREATE all the work requests
44 for pcuname in l_pcus:
46 #fbnodesync = FindbadPCURecordSync.findby_or_create(plc_pcuid=pcu_id, if_new_set={'round' : 0})
49 #node_round = fbnodesync.round
50 node_round = global_round - 1
51 if node_round < global_round or config.force:
52 # recreate node stats when refreshed
53 #print "%s" % nodename
54 req = threadpool.WorkRequest(scanpcu.collectInternal, [int(pcuname), cohash], {},
55 None, scanpcu.record, handle_exception)
58 # We just skip it, since it's "up to date"
60 print "%d %s %s" % (count, pcu_id, node_round)
62 # WAIT while all the work requests are processed.
68 # if more than two hours
69 if time.time() - begin > (60*60*1):
70 print "findbadpcus.py has run out of time!!!!!!"
72 except KeyboardInterrupt:
75 except threadpool.NoResultsPending:
76 print "All results collected."
79 #print FindbadPCURecordSync.query.count()
80 print FindbadPCURecord.query.count()
87 l_pcus = plccache.l_pcus
90 #fbsync = FindbadPCURecordSync.findby_or_create(plc_pcuid=0,
91 #if_new_set={'round' : global_round})
93 #global_round = fbsync.round
94 api = plc.getAuthAPI()
96 if config.site is not None:
97 site = api.GetSites(config.site)
98 l_nodes = api.GetNodes(site[0]['node_ids'], ['pcu_ids'])
101 pcus += node['pcu_ids']
103 l_pcus = [pcu for pcu in sets.Set(pcus)]
104 elif config.sitelist:
105 site_list = config.sitelist.split(',')
107 sites = api.GetSites(site_list)
110 node_ids += s['node_ids']
112 l_nodes = api.GetNodes(node_ids, ['pcu_ids'])
115 pcus += node['pcu_ids']
117 l_pcus = [pcu for pcu in sets.Set(pcus)]
119 elif config.pcuselect is not None:
120 n, pcus = pcu_select(config.pcuselect)
123 l_pcus = [pcu for pcu in sets.Set(pcus)]
125 elif config.nodelist == None and config.pcuid == None:
126 print "Calling API GetPCUs() : cachecalls(%s)" % config.cachecalls
127 l_pcus = [pcu['pcu_id'] for pcu in l_pcus]
128 elif config.nodelist is not None:
129 l_pcus = util.file.getListFromFile(config.nodelist)
130 l_pcus = [int(pcu) for pcu in l_pcus]
131 elif config.pcuid is not None:
132 l_pcus = [ config.pcuid ]
133 l_pcus = [int(pcu) for pcu in l_pcus]
136 # update global round number to force refreshes across all nodes
139 checkPCUs(l_pcus, cohash)
142 # update global round number to force refreshes across all nodes
143 #fbsync.round = global_round
151 if __name__ == '__main__':
153 logger = logging.getLogger("monitor")
154 logger.setLevel(logging.DEBUG)
155 fh = logging.FileHandler("monitor.log", mode = 'a')
156 fh.setLevel(logging.DEBUG)
157 formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
158 fh.setFormatter(formatter)
159 logger.addHandler(fh)
160 from monitor import parser as parsermodule
161 parser = parsermodule.getParser()
162 parser.set_defaults(nodelist=None,
168 dbname="findbadpcus",
173 parser.add_option("-f", "--nodelist", dest="nodelist", metavar="FILE",
174 help="Provide the input file for the node list")
175 parser.add_option("", "--site", dest="site", metavar="FILE",
176 help="Get all pcus associated with the given site's nodes")
177 parser.add_option("", "--sitelist", dest="sitelist", metavar="FILE",
178 help="Get all pcus associated with the given site's nodes")
179 parser.add_option("", "--pcuselect", dest="pcuselect", metavar="FILE",
180 help="Query string to apply to the findbad pcus")
181 parser.add_option("", "--pcuid", dest="pcuid", metavar="id",
182 help="Provide the id for a single pcu")
184 parser.add_option("", "--cachenodes", action="store_true",
185 help="Cache node lookup from PLC")
186 parser.add_option("", "--dbname", dest="dbname", metavar="FILE",
187 help="Specify the name of the database to which the information is saved")
188 parser.add_option("", "--nocachecalls", action="store_false", dest="cachecalls",
189 help="Refresh the cached values")
190 parser.add_option("-i", "--increment", action="store_true", dest="increment",
191 help="Increment round number to force refresh or retry")
192 parser.add_option("", "--force", action="store_true", dest="force",
193 help="Force probe without incrementing global 'round'.")
194 parser = parsermodule.getParser(['defaults'], parser)
195 config = parsermodule.parse_args(parser)
196 if hasattr(config, 'cachecalls') and not config.cachecalls:
197 # NOTE: if explicilty asked, refresh cached values.
198 print "Reloading PLCCache"
201 # NOTE: evidently, there is a bizarre interaction between iLO and ssh
202 # when LANG is set... Do not know why. Unsetting LANG, fixes the problem.
203 if 'LANG' in os.environ:
204 del os.environ['LANG']
207 except Exception, err:
208 traceback.print_exc()
209 from monitor.common import email_exception
211 print "Exception: %s" % err
212 print "Saving data... exitting."