12 COMON_COTOPURL= "http://summer.cs.princeton.edu/status/tabulator.cgi?" + \
13 "table=table_nodeview&" + \
14 "dumpcols='name,resptime,sshstatus,uptime,lastcotop,cpuspeed,memsize,disksize'&" + \
17 #"select='lastcotop!=0'"
20 plc_lock = threading.Lock()
22 externalState = {'round': round, 'nodes': {}}
31 from nodequery import verify,query_to_dict,node_select
34 api = plc.getAuthAPI()
36 def collectPingAndSSH(nodename, cohash):
37 ### RUN PING ######################
38 ping = moncommands.CMD()
39 (oval,errval) = ping.run_noexcept("ping -c 1 -q %s | grep rtt" % nodename)
45 values['ping'] = "NOPING"
47 values['ping'] = "PING"
50 for port in [22, 806]:
51 ssh = moncommands.SSH('root', nodename, port)
53 (oval, errval) = ssh.run_noexcept2(""" <<\EOF
55 echo ' "kernel":"'`uname -a`'",'
56 echo ' "bmlog":"'`ls /tmp/bm.log`'",'
57 echo ' "bootcd":"'`cat /mnt/cdrom/bootme/ID`'",'
58 echo ' "nm":"'`ps ax | grep nm.py | grep -v grep`'",'
59 echo ' "readonlyfs":"'`touch /var/log/monitor 2>&1`'",'
60 echo ' "dns":"'`host boot.planet-lab.org 2>&1`'",'
61 echo ' "princeton_comon":"'`ls -d /vservers/princeton_comon`'",'
63 ID=`grep princeton_comon /etc/passwd | awk -F : '{if ( $3 > 500 ) { print $3}}'`
65 echo ' "princeton_comon_running":"'`ls -d /proc/virtual/$ID`'",'
66 echo ' "princeton_comon_procs":"'`vps ax | grep $ID | grep -v grep | wc -l`'",'
71 values.update(eval(oval))
72 values['sshport'] = port
75 values.update({'kernel': "", 'bmlog' : "", 'bootcd' : '',
79 'princeton_comon' : '',
80 'princeton_comon_running' : '',
81 'princeton_comon_procs' : '', 'sshport' : None})
83 print traceback.print_exc()
86 ### RUN SSH ######################
88 #ssh = moncommands.SSH('root', nodename)
91 #(oval, errval) = ssh.run_noexcept('echo `uname -a ; ls /tmp/bm.log`')
93 oval = values['kernel']
94 if "2.6.17" in oval or "2.6.2" in oval:
96 values['category'] = 'PROD'
97 if "bm.log" in values['bmlog']:
98 values['state'] = 'DEBUG'
100 values['state'] = 'BOOT'
101 elif "2.6.12" in oval or "2.6.10" in oval:
102 values['ssh'] = 'SSH'
103 values['category'] = 'OLDPROD'
104 if "bm.log" in values['bmlog']:
105 values['state'] = 'DEBUG'
107 values['state'] = 'BOOT'
109 # NOTE: on 2.6.8 kernels, with 4.2 bootstrapfs, the chroot command fails. I have no idea why.
110 elif "2.4" in oval or "2.6.8" in oval:
111 b_getbootcd_id = False
112 values['ssh'] = 'SSH'
113 values['category'] = 'OLDBOOTCD'
114 values['state'] = 'DEBUG'
116 values['ssh'] = 'SSH'
117 values['category'] = 'UNKNOWN'
118 if "bm.log" in values['bmlog']:
119 values['state'] = 'DEBUG'
121 values['state'] = 'BOOT'
124 b_getbootcd_id = False
125 values['ssh'] = 'NOSSH'
126 values['category'] = 'ERROR'
127 values['state'] = 'DOWN'
129 values['kernel'] = val
131 #values['kernel'] = val
134 # try to get BootCD for all nodes that are not 2.4 nor inaccessible
135 #(oval, errval) = ssh.run_noexcept('cat /mnt/cdrom/bootme/ID')
136 oval = values['bootcd']
138 values['bootcd'] = oval
139 if "v2" in oval and \
140 ( nodename is not "planetlab1.cs.unc.edu" and \
141 nodename is not "planetlab2.cs.unc.edu" ):
142 values['category'] = 'OLDBOOTCD'
144 values['bootcd'] = ""
146 values['bootcd'] = ""
148 # TODO: get bm.log for debug nodes.
151 #(oval, errval) = ssh.run_noexcept('ps ax | grep nm.py | grep -v grep')
158 continue_slice_check = True
159 #(oval, errval) = ssh.run_noexcept('ls -d /vservers/princeton_comon')
160 oval = values['princeton_comon']
161 if "princeton_comon" in oval:
162 values['princeton_comon'] = "Y"
164 values['princeton_comon'] = "N"
165 continue_slice_check = False
167 if continue_slice_check:
168 #(oval, errval) = ssh.run_noexcept('ID=`grep princeton_comon /etc/passwd | awk -F : "{if ( \\\$3 > 500 ) { print \\\$3}}"`; ls -d /proc/virtual/$ID')
169 oval = values['princeton_comon_running']
170 if len(oval) > len('/proc/virtual/'):
171 values['princeton_comon_running'] = "Y"
173 values['princeton_comon_running'] = "N"
174 continue_slice_check = False
176 values['princeton_comon_running'] = "-"
178 if continue_slice_check:
179 #(oval, errval) = ssh.run_noexcept('ID=`grep princeton_comon /etc/passwd | awk -F : "{if ( \\\$3 > 500 ) { print \\\$3}}"`; vps ax | grep $ID | grep -v grep | wc -l')
180 oval = values['princeton_comon_procs']
181 values['princeton_comon_procs'] = oval
183 values['princeton_comon_procs'] = "-"
186 if nodename in cohash:
187 values['comonstats'] = cohash[nodename]
189 values['comonstats'] = {'resptime': '-1',
196 # include output value
197 ### GET PLC NODE ######################
202 d_node = plc.getNodes({'hostname': nodename}, ['pcu_ids', 'site_id', 'date_created', 'last_updated', 'last_contact', 'boot_state', 'nodegroup_ids'])
205 traceback.print_exc()
208 if b_except: return (None, None)
211 if d_node and len(d_node) > 0:
212 pcu = d_node[0]['pcu_ids']
214 values['pcu'] = "PCU"
216 values['pcu'] = "NOPCU"
217 site_id = d_node[0]['site_id']
218 last_contact = d_node[0]['last_contact']
219 nodegroups = [ i['name'] for i in api.GetNodeGroups(d_node[0]['nodegroup_ids']) ]
220 values['plcnode'] = {'status' : 'SUCCESS',
222 'boot_state' : d_node[0]['boot_state'],
224 'nodegroups' : nodegroups,
225 'last_contact': last_contact,
226 'date_created': d_node[0]['date_created'],
227 'last_updated': d_node[0]['last_updated']}
229 values['pcu'] = "UNKNOWN"
230 values['plcnode'] = {'status' : "GN_FAILED"}
233 ### GET PLC SITE ######################
238 d_site = plc.getSites({'site_id': site_id},
239 ['max_slices', 'slice_ids', 'node_ids', 'login_base'])
242 traceback.print_exc()
245 if b_except: return (None, None)
247 if d_site and len(d_site) > 0:
248 max_slices = d_site[0]['max_slices']
249 num_slices = len(d_site[0]['slice_ids'])
250 num_nodes = len(d_site[0]['node_ids'])
251 loginbase = d_site[0]['login_base']
252 values['plcsite'] = {'num_nodes' : num_nodes,
253 'max_slices' : max_slices,
254 'num_slices' : num_slices,
255 'login_base' : loginbase,
256 'status' : 'SUCCESS'}
258 values['plcsite'] = {'status' : "GS_FAILED"}
260 values['checked'] = time.time()
262 return (nodename, values)
264 def recordPingAndSSH(request, result):
267 (nodename, values) = result
269 if values is not None:
270 global_round = externalState['round']
271 externalState['nodes'][nodename]['values'] = values
272 externalState['nodes'][nodename]['round'] = global_round
275 print "%d %s %s" % (count, nodename, externalState['nodes'][nodename]['values'])
277 database.dbDump(config.dbname, externalState)
279 # this will be called when an exception occurs within a thread
280 def handle_exception(request, result):
281 print "Exception occured in request %s" % request.requestID
283 print "Result: %s" % i
286 def checkAndRecordState(l_nodes, cohash):
289 global_round = externalState['round']
291 tp = threadpool.ThreadPool(20)
293 # CREATE all the work requests
294 for nodename in l_nodes:
295 if nodename not in externalState['nodes']:
296 externalState['nodes'][nodename] = {'round': 0, 'values': []}
298 node_round = externalState['nodes'][nodename]['round']
299 if node_round < global_round:
300 # recreate node stats when refreshed
301 #print "%s" % nodename
302 req = threadpool.WorkRequest(collectPingAndSSH, [nodename, cohash], {},
303 None, recordPingAndSSH, handle_exception)
306 # We just skip it, since it's "up to date"
308 print "%d %s %s" % (count, nodename, externalState['nodes'][nodename]['values'])
311 # WAIT while all the work requests are processed.
317 # if more than two hours
318 if time.time() - begin > (60*60*1.5):
319 print "findbad.py has run out of time!!!!!!"
320 database.dbDump(config.dbname, externalState)
322 except KeyboardInterrupt:
325 except threadpool.NoResultsPending:
326 print "All results collected."
329 database.dbDump(config.dbname, externalState)
336 externalState = database.if_cached_else(1, config.dbname, lambda : externalState)
339 # update global round number to force refreshes across all nodes
340 externalState['round'] += 1
342 cotop = comon.Comon()
343 # lastcotop measures whether cotop is actually running. this is a better
344 # metric than sshstatus, or other values from CoMon
345 cotop_url = COMON_COTOPURL
347 # history information for all nodes
349 cohash = cotop.coget(cotop_url)
350 l_nodes = syncplcdb.create_plcdb()
352 f_nodes = util.file.getListFromFile(config.nodelist)
353 l_nodes = filter(lambda x: x['hostname'] in f_nodes, l_nodes)
355 f_nodes = [config.node]
356 l_nodes = filter(lambda x: x['hostname'] in f_nodes, l_nodes)
357 elif config.nodegroup:
358 ng = api.GetNodeGroups({'name' : config.nodegroup})
359 l_nodes = api.GetNodes(ng[0]['node_ids'])
361 site = api.GetSites(config.site)
362 l_nodes = api.GetNodes(site[0]['node_ids'], ['hostname'])
364 l_nodes = [node['hostname'] for node in l_nodes]
366 # perform this query after the above options, so that the filter above
368 if config.nodeselect:
369 fb = database.dbLoad("findbad")
370 l_nodes = node_select(config.nodeselect, fb['nodes'].keys(), fb)
372 print "fetching %s hosts" % len(l_nodes)
374 checkAndRecordState(l_nodes, cohash)
379 if __name__ == '__main__':
380 import parser as parsermodule
382 parser = parsermodule.getParser(['nodesets'])
384 parser.set_defaults( increment=False, dbname="findbad", cachenodes=False)
385 parser.add_option("", "--cachenodes", action="store_true",
386 help="Cache node lookup from PLC")
387 parser.add_option("", "--dbname", dest="dbname", metavar="FILE",
388 help="Specify the name of the database to which the information is saved")
389 parser.add_option("-i", "--increment", action="store_true", dest="increment",
390 help="Increment round number to force refresh or retry")
392 parser = parsermodule.getParser(['defaults'], parser)
394 cfg = parsermodule.parse_args(parser)
398 except Exception, err:
399 print traceback.print_exc()
400 print "Exception: %s" % err
401 print "Saving data... exitting."
402 database.dbDump(config.dbname, externalState)