15 from nodequery import pcu_select
17 #old_handler = signal.getsignal(signal.SIGCHLD)
19 #def sig_handler(signum, stack):
20 # """ Handle SIGCHLD signal """
22 # if signum == signal.SIGCHLD:
27 # if old_handler != signal.SIG_DFL:
28 # old_handler(signum, stack)
30 #orig_sig_handler = signal.signal(signal.SIGCHLD, sig_handler)
34 COMON_COTOPURL= "http://summer.cs.princeton.edu/status/tabulator.cgi?" + \
35 "table=table_nodeview&" + \
36 "dumpcols='name,resptime,sshstatus,uptime,lastcotop'&" + \
39 #"select='lastcotop!=0'"
42 plc_lock = threading.Lock()
44 externalState = {'round': round, 'nodes': {'a': None}}
49 from reboot import pcu_name
58 def nmap_portstatus(status):
60 l_nmap = status.split()
63 continue_probe = False
65 results = port.split('/')
66 ps[results[0]] = results[1]
67 if results[1] == "open":
69 return (ps, continue_probe)
74 print "GetPCU from PLC %s" % pcuname
75 l_pcu = plc.GetPCUs({'pcu_id' : pcuname})
81 print "GetPCU from file %s" % pcuname
82 l_pcus = database.dbLoad("pculist")
84 if i['pcu_id'] == pcuname:
88 from nodecommon import email_exception
95 def get_nodes(node_ids):
99 l_node = plc.getNodes(node_ids, ['hostname', 'last_contact', 'node_id', 'ports'])
102 plc_nodes = database.dbLoad("l_plcnodes")
104 if n['node_id'] in node_ids:
107 traceback.print_exc()
108 from nodecommon import email_exception
118 def get_plc_pcu_values(pcuname):
120 Try to contact PLC to get the PCU info.
121 If that fails, try a backup copy from the last run.
122 If that fails, return None
126 l_pcu = get_pcu(pcuname)
128 if l_pcu is not None:
129 site_id = l_pcu['site_id']
130 node_ids = l_pcu['node_ids']
131 l_node = get_nodes(node_ids)
133 if l_node is not None:
135 values[node['hostname']] = node['ports'][0]
137 values['nodenames'] = [node['hostname'] for node in l_node]
139 # NOTE: this is for a dry run later. It doesn't matter which node.
140 values['node_id'] = l_node[0]['node_id']
148 def get_plc_site_values(site_id):
149 ### GET PLC SITE ######################
155 d_site = plc.getSites({'site_id': site_id}, ['max_slices', 'slice_ids', 'node_ids', 'login_base'])
160 plc_sites = database.dbLoad("l_plcsites")
161 for site in plc_sites:
162 if site['site_id'] == site_id:
166 traceback.print_exc()
167 from nodecommon import email_exception
173 if d_site is not None:
174 max_slices = d_site['max_slices']
175 num_slices = len(d_site['slice_ids'])
176 num_nodes = len(d_site['node_ids'])
177 loginbase = d_site['login_base']
178 values['plcsite'] = {'num_nodes' : num_nodes,
179 'max_slices' : max_slices,
180 'num_slices' : num_slices,
181 'login_base' : loginbase,
182 'status' : 'SUCCESS'}
190 def collectPingAndSSH(pcuname, cohash):
192 continue_probe = True
195 ### GET PCU ######################
199 v = get_plc_pcu_values(pcuname)
203 continue_probe = False
206 traceback.print_exc()
207 from nodecommon import email_exception
209 continue_probe = False
211 if b_except or not continue_probe: return (None, None, None)
213 if values['hostname'] is not None:
214 values['hostname'] = values['hostname'].strip()
216 if values['ip'] is not None:
217 values['ip'] = values['ip'].strip()
219 #### COMPLETE ENTRY #######################
221 values['complete_entry'] = []
222 #if values['protocol'] is None or values['protocol'] is "":
223 # values['complete_entry'] += ["protocol"]
224 if values['model'] is None or values['model'] is "":
225 values['complete_entry'] += ["model"]
226 # Cannot continue due to this condition
227 continue_probe = False
229 if values['password'] is None or values['password'] is "":
230 values['complete_entry'] += ["password"]
231 # Cannot continue due to this condition
232 continue_probe = False
234 if len(values['complete_entry']) > 0:
235 continue_probe = False
237 if values['hostname'] is None or values['hostname'] is "":
238 values['complete_entry'] += ["hostname"]
239 if values['ip'] is None or values['ip'] is "":
240 values['complete_entry'] += ["ip"]
242 # If there are no nodes associated with this PCU, then we cannot continue.
243 if len(values['node_ids']) == 0:
244 continue_probe = False
245 values['complete_entry'] += ['NoNodeIds']
247 #### DNS and IP MATCH #######################
248 if values['hostname'] is not None and values['hostname'] is not "" and \
249 values['ip'] is not None and values['ip'] is not "":
250 #print "Calling socket.gethostbyname(%s)" % values['hostname']
252 ipaddr = socket.gethostbyname(values['hostname'])
253 if ipaddr == values['ip']:
254 values['dnsmatch'] = "DNS-OK"
256 values['dnsmatch'] = "DNS-MISMATCH"
257 continue_probe = False
259 except Exception, err:
260 values['dnsmatch'] = "DNS-NOENTRY"
261 values['hostname'] = values['ip']
264 if values['ip'] is not None and values['ip'] is not "":
265 values['dnsmatch'] = "NOHOSTNAME"
266 values['hostname'] = values['ip']
268 values['dnsmatch'] = "NO-DNS-OR-IP"
269 values['hostname'] = "No_entry_in_DB"
270 continue_probe = False
272 #### RUN NMAP ###############################
274 nmap = moncommands.CMD()
275 (oval,eval) = nmap.run_noexcept("nmap -oG - -P0 -p22,23,80,443,5869,9100,16992 %s | grep Host:" % pcu_name(values))
276 # NOTE: an empty / error value for oval, will still work.
277 (values['portstatus'], continue_probe) = nmap_portstatus(oval)
279 values['portstatus'] = None
282 ###### DRY RUN ############################
283 if 'node_ids' in values and len(values['node_ids']) > 0:
284 rb_ret = reboot.reboot_test(values['nodenames'][0], values, continue_probe, 1, True)
286 rb_ret = "Not_Run" # No nodes to test"
288 values['reboot'] = rb_ret
290 ### GET PLC SITE ######################
291 v = get_plc_site_values(values['site_id'])
295 values['plcsite'] = {'status' : "GS_FAILED"}
298 print "____________________________________"
301 print "____________________________________"
302 errors['traceback'] = traceback.format_exc()
303 print errors['traceback']
305 values['checked'] = time.time()
306 return (pcuname, values, errors)
308 def recordPingAndSSH(request, result):
312 (nodename, values, errors) = result
314 if values is not None:
315 global_round = externalState['round']
316 pcu_id = "id_%s" % nodename
317 externalState['nodes'][pcu_id]['values'] = values
318 externalState['nodes'][pcu_id]['round'] = global_round
321 print "%d %s %s" % (count, nodename, externalState['nodes'][pcu_id]['values'])
322 database.dbDump(config.dbname, externalState)
324 if errors is not None:
325 pcu_id = "id_%s" % nodename
326 errorState[pcu_id] = errors
327 database.dbDump("findbadpcu_errors", errorState)
329 # this will be called when an exception occurs within a thread
330 def handle_exception(request, result):
331 print "Exception occured in request %s" % request.requestID
333 print "Result: %s" % i
336 def checkAndRecordState(l_pcus, cohash):
339 global_round = externalState['round']
341 tp = threadpool.ThreadPool(10)
343 # CREATE all the work requests
344 for pcuname in l_pcus:
345 pcu_id = "id_%s" % pcuname
346 if pcuname not in externalState['nodes']:
347 #print type(externalState['nodes'])
349 externalState['nodes'][pcu_id] = {'round': 0, 'values': []}
351 node_round = externalState['nodes'][pcu_id]['round']
352 if node_round < global_round:
353 # recreate node stats when refreshed
354 #print "%s" % nodename
355 req = threadpool.WorkRequest(collectPingAndSSH, [pcuname, cohash], {},
356 None, recordPingAndSSH, handle_exception)
359 # We just skip it, since it's "up to date"
361 print "%d %s %s" % (count, pcu_id, externalState['nodes'][pcu_id]['values'])
364 # WAIT while all the work requests are processed.
370 # if more than two hours
371 if time.time() - begin > (60*60*1):
372 print "findbadpcus.py has run out of time!!!!!!"
373 database.dbDump(config.dbname, externalState)
375 except KeyboardInterrupt:
378 except threadpool.NoResultsPending:
379 print "All results collected."
387 l_pcus = database.if_cached_else_refresh(1, config.refresh, "pculist", lambda : plc.GetPCUs())
388 externalState = database.if_cached_else(1, config.dbname, lambda : externalState)
392 # update global round number to force refreshes across all nodes
393 externalState['round'] += 1
395 if config.site is not None:
396 api = plc.getAuthAPI()
397 site = api.GetSites(config.site)
398 l_nodes = api.GetNodes(site[0]['node_ids'], ['pcu_ids'])
401 pcus += node['pcu_ids']
403 l_pcus = [pcu for pcu in sets.Set(pcus)]
404 elif config.pcuselect is not None:
405 n, pcus = pcu_select(config.pcuselect)
407 l_pcus = [pcu for pcu in sets.Set(pcus)]
409 elif config.nodelist == None and config.pcuid == None:
410 print "Calling API GetPCUs() : refresh(%s)" % config.refresh
411 l_pcus = [pcu['pcu_id'] for pcu in l_pcus]
412 elif config.nodelist is not None:
413 l_pcus = util.file.getListFromFile(config.nodelist)
414 l_pcus = [int(pcu) for pcu in l_pcus]
415 elif config.pcuid is not None:
416 l_pcus = [ config.pcuid ]
417 l_pcus = [int(pcu) for pcu in l_pcus]
419 checkAndRecordState(l_pcus, cohash)
424 if __name__ == '__main__':
426 logger = logging.getLogger("monitor")
427 logger.setLevel(logging.DEBUG)
428 fh = logging.FileHandler("monitor.log", mode = 'a')
429 fh.setLevel(logging.DEBUG)
430 formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
431 fh.setFormatter(formatter)
432 logger.addHandler(fh)
433 import parser as parsermodule
434 parser = parsermodule.getParser()
435 parser.set_defaults(nodelist=None,
440 dbname="findbadpcus",
444 parser.add_option("-f", "--nodelist", dest="nodelist", metavar="FILE",
445 help="Provide the input file for the node list")
446 parser.add_option("", "--site", dest="site", metavar="FILE",
447 help="Get all pcus associated with the given site's nodes")
448 parser.add_option("", "--pcuselect", dest="pcuselect", metavar="FILE",
449 help="Query string to apply to the findbad pcus")
450 parser.add_option("", "--pcuid", dest="pcuid", metavar="id",
451 help="Provide the id for a single pcu")
453 parser.add_option("", "--cachenodes", action="store_true",
454 help="Cache node lookup from PLC")
455 parser.add_option("", "--dbname", dest="dbname", metavar="FILE",
456 help="Specify the name of the database to which the information is saved")
457 parser.add_option("", "--refresh", action="store_true", dest="refresh",
458 help="Refresh the cached values")
459 parser.add_option("-i", "--increment", action="store_true", dest="increment",
460 help="Increment round number to force refresh or retry")
461 parser = parsermodule.getParser(['defaults'], parser)
462 config = parsermodule.parse_args(parser)
464 # NOTE: evidently, there is a bizarre interaction between iLO and ssh
465 # when LANG is set... Do not know why. Unsetting LANG, fixes the problem.
466 if 'LANG' in os.environ:
467 del os.environ['LANG']
470 except Exception, err:
471 traceback.print_exc()
472 from nodecommon import email_exception
474 print "Exception: %s" % err
475 print "Saving data... exitting."
476 database.dbDump(config.dbname, externalState)