12 COMON_COTOPURL= "http://summer.cs.princeton.edu/status/tabulator.cgi?" + \
13 "table=table_nodeview&" + \
14 "dumpcols='name,resptime,sshstatus,uptime,lastcotop,cpuspeed,memsize,disksize'&" + \
17 #"select='lastcotop!=0'"
20 plc_lock = threading.Lock()
22 externalState = {'round': round, 'nodes': {}}
31 from nodequery import verify,query_to_dict,node_select
34 api = plc.getAuthAPI()
36 def collectPingAndSSH(nodename, cohash):
37 ### RUN PING ######################
38 ping = moncommands.CMD()
39 (oval,errval) = ping.run_noexcept("ping -c 1 -q %s | grep rtt" % nodename)
45 values['ping'] = "NOPING"
47 values['ping'] = "PING"
50 for port in [22, 806]:
51 ssh = moncommands.SSH('root', nodename, port)
53 (oval, errval) = ssh.run_noexcept2(""" <<\EOF
55 echo ' "kernel":"'`uname -a`'",'
56 echo ' "bmlog":"'`ls /tmp/bm.log`'",'
57 echo ' "bootcd":"'`cat /mnt/cdrom/bootme/ID`'",'
58 echo ' "nm":"'`ps ax | grep nm.py | grep -v grep`'",'
59 echo ' "princeton_comon":"'`ls -d /vservers/princeton_comon`'",'
61 ID=`grep princeton_comon /etc/passwd | awk -F : '{if ( $3 > 500 ) { print $3}}'`
63 echo ' "princeton_comon_running":"'`ls -d /proc/virtual/$ID`'",'
64 echo ' "princeton_comon_procs":"'`vps ax | grep $ID | grep -v grep | wc -l`'",'
69 values.update(eval(oval))
70 values['sshport'] = port
73 values.update({'kernel': "", 'bmlog' : "", 'bootcd' : '', 'nm' :
74 '', 'princeton_comon' : '', 'princeton_comon_running' : '',
75 'princeton_comon_procs' : '', 'sshport' : None})
77 print traceback.print_exc()
80 ### RUN SSH ######################
82 #ssh = moncommands.SSH('root', nodename)
85 #(oval, errval) = ssh.run_noexcept('echo `uname -a ; ls /tmp/bm.log`')
87 oval = values['kernel']
88 if "2.6.17" in oval or "2.6.2" in oval:
90 values['category'] = 'ALPHA'
91 if "bm.log" in values['bmlog']:
92 values['state'] = 'DEBUG'
94 values['state'] = 'BOOT'
95 elif "2.6.12" in oval or "2.6.10" in oval:
97 values['category'] = 'PROD'
98 if "bm.log" in values['bmlog']:
99 values['state'] = 'DEBUG'
101 values['state'] = 'BOOT'
103 # NOTE: on 2.6.8 kernels, with 4.2 bootstrapfs, the chroot command fails. I have no idea why.
104 elif "2.4" in oval or "2.6.8" in oval:
105 b_getbootcd_id = False
106 values['ssh'] = 'SSH'
107 values['category'] = 'OLDBOOTCD'
108 values['state'] = 'DEBUG'
110 values['ssh'] = 'SSH'
111 values['category'] = 'UNKNOWN'
112 if "bm.log" in values['bmlog']:
113 values['state'] = 'DEBUG'
115 values['state'] = 'BOOT'
118 b_getbootcd_id = False
119 values['ssh'] = 'NOSSH'
120 values['category'] = 'ERROR'
121 values['state'] = 'DOWN'
123 values['kernel'] = val
125 #values['kernel'] = val
128 # try to get BootCD for all nodes that are not 2.4 nor inaccessible
129 #(oval, errval) = ssh.run_noexcept('cat /mnt/cdrom/bootme/ID')
130 oval = values['bootcd']
132 values['bootcd'] = oval
133 if "v2" in oval and \
134 ( nodename is not "planetlab1.cs.unc.edu" and \
135 nodename is not "planetlab2.cs.unc.edu" ):
136 values['category'] = 'OLDBOOTCD'
138 values['bootcd'] = ""
140 values['bootcd'] = ""
142 # TODO: get bm.log for debug nodes.
145 #(oval, errval) = ssh.run_noexcept('ps ax | grep nm.py | grep -v grep')
152 continue_slice_check = True
153 #(oval, errval) = ssh.run_noexcept('ls -d /vservers/princeton_comon')
154 oval = values['princeton_comon']
155 if "princeton_comon" in oval:
156 values['princeton_comon'] = "Y"
158 values['princeton_comon'] = "N"
159 continue_slice_check = False
161 if continue_slice_check:
162 #(oval, errval) = ssh.run_noexcept('ID=`grep princeton_comon /etc/passwd | awk -F : "{if ( \\\$3 > 500 ) { print \\\$3}}"`; ls -d /proc/virtual/$ID')
163 oval = values['princeton_comon_running']
164 if len(oval) > len('/proc/virtual/'):
165 values['princeton_comon_running'] = "Y"
167 values['princeton_comon_running'] = "N"
168 continue_slice_check = False
170 values['princeton_comon_running'] = "-"
172 if continue_slice_check:
173 #(oval, errval) = ssh.run_noexcept('ID=`grep princeton_comon /etc/passwd | awk -F : "{if ( \\\$3 > 500 ) { print \\\$3}}"`; vps ax | grep $ID | grep -v grep | wc -l')
174 oval = values['princeton_comon_procs']
175 values['princeton_comon_procs'] = oval
177 values['princeton_comon_procs'] = "-"
180 if nodename in cohash:
181 values['comonstats'] = cohash[nodename]
183 values['comonstats'] = {'resptime': '-1',
190 # include output value
191 ### GET PLC NODE ######################
196 d_node = plc.getNodes({'hostname': nodename}, ['pcu_ids', 'site_id', 'date_created', 'last_updated', 'last_contact', 'boot_state', 'nodegroup_ids'])
199 traceback.print_exc()
202 if b_except: return (None, None)
205 if d_node and len(d_node) > 0:
206 pcu = d_node[0]['pcu_ids']
208 values['pcu'] = "PCU"
210 values['pcu'] = "NOPCU"
211 site_id = d_node[0]['site_id']
212 last_contact = d_node[0]['last_contact']
213 nodegroups = [ i['name'] for i in api.GetNodeGroups(d_node[0]['nodegroup_ids']) ]
214 values['plcnode'] = {'status' : 'SUCCESS',
216 'boot_state' : d_node[0]['boot_state'],
218 'nodegroups' : nodegroups,
219 'last_contact': last_contact,
220 'date_created': d_node[0]['date_created'],
221 'last_updated': d_node[0]['last_updated']}
223 values['pcu'] = "UNKNOWN"
224 values['plcnode'] = {'status' : "GN_FAILED"}
227 ### GET PLC SITE ######################
232 d_site = plc.getSites({'site_id': site_id},
233 ['max_slices', 'slice_ids', 'node_ids', 'login_base'])
236 traceback.print_exc()
239 if b_except: return (None, None)
241 if d_site and len(d_site) > 0:
242 max_slices = d_site[0]['max_slices']
243 num_slices = len(d_site[0]['slice_ids'])
244 num_nodes = len(d_site[0]['node_ids'])
245 loginbase = d_site[0]['login_base']
246 values['plcsite'] = {'num_nodes' : num_nodes,
247 'max_slices' : max_slices,
248 'num_slices' : num_slices,
249 'login_base' : loginbase,
250 'status' : 'SUCCESS'}
252 values['plcsite'] = {'status' : "GS_FAILED"}
254 values['checked'] = time.time()
256 return (nodename, values)
258 def recordPingAndSSH(request, result):
261 (nodename, values) = result
263 if values is not None:
264 global_round = externalState['round']
265 externalState['nodes'][nodename]['values'] = values
266 externalState['nodes'][nodename]['round'] = global_round
269 print "%d %s %s" % (count, nodename, externalState['nodes'][nodename]['values'])
271 database.dbDump(config.dbname, externalState)
273 # this will be called when an exception occurs within a thread
274 def handle_exception(request, result):
275 print "Exception occured in request %s" % request.requestID
277 print "Result: %s" % i
280 def checkAndRecordState(l_nodes, cohash):
283 global_round = externalState['round']
285 tp = threadpool.ThreadPool(20)
287 # CREATE all the work requests
288 for nodename in l_nodes:
289 if nodename not in externalState['nodes']:
290 externalState['nodes'][nodename] = {'round': 0, 'values': []}
292 node_round = externalState['nodes'][nodename]['round']
293 if node_round < global_round:
294 # recreate node stats when refreshed
295 #print "%s" % nodename
296 req = threadpool.WorkRequest(collectPingAndSSH, [nodename, cohash], {},
297 None, recordPingAndSSH, handle_exception)
300 # We just skip it, since it's "up to date"
302 print "%d %s %s" % (count, nodename, externalState['nodes'][nodename]['values'])
305 # WAIT while all the work requests are processed.
311 # if more than two hours
312 if time.time() - begin > (60*60*1.5):
313 print "findbad.py has run out of time!!!!!!"
314 database.dbDump(config.dbname, externalState)
316 except KeyboardInterrupt:
319 except threadpool.NoResultsPending:
320 print "All results collected."
323 database.dbDump(config.dbname, externalState)
330 externalState = database.if_cached_else(1, config.dbname, lambda : externalState)
333 # update global round number to force refreshes across all nodes
334 externalState['round'] += 1
336 cotop = comon.Comon()
337 # lastcotop measures whether cotop is actually running. this is a better
338 # metric than sshstatus, or other values from CoMon
339 cotop_url = COMON_COTOPURL
341 # history information for all nodes
343 cohash = cotop.coget(cotop_url)
344 l_nodes = syncplcdb.create_plcdb()
346 f_nodes = util.file.getListFromFile(config.nodelist)
347 l_nodes = filter(lambda x: x['hostname'] in f_nodes, l_nodes)
349 f_nodes = [config.node]
350 l_nodes = filter(lambda x: x['hostname'] in f_nodes, l_nodes)
351 elif config.nodegroup:
352 ng = api.GetNodeGroups({'name' : config.nodegroup})
353 l_nodes = api.GetNodes(ng[0]['node_ids'])
355 site = api.GetSites(config.site)
356 l_nodes = api.GetNodes(site[0]['node_ids'], ['hostname'])
358 l_nodes = [node['hostname'] for node in l_nodes]
360 # perform this query after the above options, so that the filter above
362 if config.nodeselect:
363 fb = database.dbLoad("findbad")
364 l_nodes = node_select(config.nodeselect, fb['nodes'].keys(), fb)
366 print "fetching %s hosts" % len(l_nodes)
368 checkAndRecordState(l_nodes, cohash)
373 if __name__ == '__main__':
374 import parser as parsermodule
376 parser = parsermodule.getParser(['nodesets'])
378 parser.set_defaults( increment=False, dbname="findbad", cachenodes=False)
379 parser.add_option("", "--cachenodes", action="store_true",
380 help="Cache node lookup from PLC")
381 parser.add_option("", "--dbname", dest="dbname", metavar="FILE",
382 help="Specify the name of the database to which the information is saved")
383 parser.add_option("-i", "--increment", action="store_true", dest="increment",
384 help="Increment round number to force refresh or retry")
386 parser = parsermodule.getParser(['defaults'], parser)
388 cfg = parsermodule.parse_args(parser)
392 except Exception, err:
393 print traceback.print_exc()
394 print "Exception: %s" % err
395 print "Saving data... exitting."
396 database.dbDump(config.dbname, externalState)