10 COMON_COTOPURL= "http://summer.cs.princeton.edu/status/tabulator.cgi?" + \
11 "table=table_nodeview&" + \
12 "dumpcols='name,resptime,sshstatus,uptime,lastcotop,cpuspeed,memsize,disksize'&" + \
15 #"select='lastcotop!=0'"
18 plc_lock = threading.Lock()
20 externalState = {'round': round, 'nodes': {}}
28 from nodequery import verify,query_to_dict,node_select
32 api = plc.PLC(auth.auth, auth.plc)
34 def collectPingAndSSH(nodename, cohash):
35 ### RUN PING ######################
37 (oval,errval) = ping.run_noexcept("ping -c 1 -q %s | grep rtt" % nodename)
43 values['ping'] = "NOPING"
45 values['ping'] = "PING"
48 for port in [22, 806]:
49 ssh = soltesz.SSH('root', nodename, port)
51 (oval, errval) = ssh.run_noexcept2(""" <<\EOF
53 echo ' "kernel":"'`uname -a`'",'
54 echo ' "bmlog":"'`ls /tmp/bm.log`'",'
55 echo ' "bootcd":"'`cat /mnt/cdrom/bootme/ID`'",'
56 echo ' "nm":"'`ps ax | grep nm.py | grep -v grep`'",'
57 echo ' "princeton_comon":"'`ls -d /vservers/princeton_comon`'",'
59 ID=`grep princeton_comon /etc/passwd | awk -F : '{if ( $3 > 500 ) { print $3}}'`
61 echo ' "princeton_comon_running":"'`ls -d /proc/virtual/$ID`'",'
62 echo ' "princeton_comon_procs":"'`vps ax | grep $ID | grep -v grep | wc -l`'",'
67 values.update(eval(oval))
68 values['sshport'] = port
71 values.update({'kernel': "", 'bmlog' : "", 'bootcd' : '', 'nm' :
72 '', 'princeton_comon' : '', 'princeton_comon_running' : '',
73 'princeton_comon_procs' : '', 'sshport' : None})
75 print traceback.print_exc()
78 ### RUN SSH ######################
80 #ssh = soltesz.SSH('root', nodename)
83 #(oval, errval) = ssh.run_noexcept('echo `uname -a ; ls /tmp/bm.log`')
85 oval = values['kernel']
86 if "2.6.17" in oval or "2.6.2" in oval:
88 values['category'] = 'ALPHA'
89 if "bm.log" in values['bmlog']:
90 values['state'] = 'DEBUG'
92 values['state'] = 'BOOT'
93 elif "2.6.12" in oval or "2.6.10" in oval:
95 values['category'] = 'PROD'
96 if "bm.log" in values['bmlog']:
97 values['state'] = 'DEBUG'
99 values['state'] = 'BOOT'
101 # NOTE: on 2.6.8 kernels, with 4.2 bootstrapfs, the chroot command fails. I have no idea why.
102 elif "2.4" in oval or "2.6.8" in oval:
103 b_getbootcd_id = False
104 values['ssh'] = 'SSH'
105 values['category'] = 'OLDBOOTCD'
106 values['state'] = 'DEBUG'
108 values['ssh'] = 'SSH'
109 values['category'] = 'UNKNOWN'
110 if "bm.log" in values['bmlog']:
111 values['state'] = 'DEBUG'
113 values['state'] = 'BOOT'
116 b_getbootcd_id = False
117 values['ssh'] = 'NOSSH'
118 values['category'] = 'ERROR'
119 values['state'] = 'DOWN'
121 values['kernel'] = val
123 #values['kernel'] = val
126 # try to get BootCD for all nodes that are not 2.4 nor inaccessible
127 #(oval, errval) = ssh.run_noexcept('cat /mnt/cdrom/bootme/ID')
128 oval = values['bootcd']
130 values['bootcd'] = oval
131 if "v2" in oval and \
132 ( nodename is not "planetlab1.cs.unc.edu" and \
133 nodename is not "planetlab2.cs.unc.edu" ):
134 values['category'] = 'OLDBOOTCD'
136 values['bootcd'] = ""
138 values['bootcd'] = ""
140 # TODO: get bm.log for debug nodes.
143 #(oval, errval) = ssh.run_noexcept('ps ax | grep nm.py | grep -v grep')
150 continue_slice_check = True
151 #(oval, errval) = ssh.run_noexcept('ls -d /vservers/princeton_comon')
152 oval = values['princeton_comon']
153 if "princeton_comon" in oval:
154 values['princeton_comon'] = "Y"
156 values['princeton_comon'] = "N"
157 continue_slice_check = False
159 if continue_slice_check:
160 #(oval, errval) = ssh.run_noexcept('ID=`grep princeton_comon /etc/passwd | awk -F : "{if ( \\\$3 > 500 ) { print \\\$3}}"`; ls -d /proc/virtual/$ID')
161 oval = values['princeton_comon_running']
162 if len(oval) > len('/proc/virtual/'):
163 values['princeton_comon_running'] = "Y"
165 values['princeton_comon_running'] = "N"
166 continue_slice_check = False
168 values['princeton_comon_running'] = "-"
170 if continue_slice_check:
171 #(oval, errval) = ssh.run_noexcept('ID=`grep princeton_comon /etc/passwd | awk -F : "{if ( \\\$3 > 500 ) { print \\\$3}}"`; vps ax | grep $ID | grep -v grep | wc -l')
172 oval = values['princeton_comon_procs']
173 values['princeton_comon_procs'] = oval
175 values['princeton_comon_procs'] = "-"
178 if nodename in cohash:
179 values['comonstats'] = cohash[nodename]
181 values['comonstats'] = {'resptime': '-1',
188 # include output value
189 ### GET PLC NODE ######################
194 d_node = plc.getNodes({'hostname': nodename}, ['pcu_ids', 'site_id', 'date_created', 'last_updated', 'last_contact', 'boot_state', 'nodegroup_ids'])
197 traceback.print_exc()
200 if b_except: return (None, None)
203 if d_node and len(d_node) > 0:
204 pcu = d_node[0]['pcu_ids']
206 values['pcu'] = "PCU"
208 values['pcu'] = "NOPCU"
209 site_id = d_node[0]['site_id']
210 last_contact = d_node[0]['last_contact']
211 nodegroups = [ i['name'] for i in api.GetNodeGroups(d_node[0]['nodegroup_ids']) ]
212 values['plcnode'] = {'status' : 'SUCCESS',
214 'boot_state' : d_node[0]['boot_state'],
216 'nodegroups' : nodegroups,
217 'last_contact': last_contact,
218 'date_created': d_node[0]['date_created'],
219 'last_updated': d_node[0]['last_updated']}
221 values['pcu'] = "UNKNOWN"
222 values['plcnode'] = {'status' : "GN_FAILED"}
225 ### GET PLC SITE ######################
230 d_site = plc.getSites({'site_id': site_id},
231 ['max_slices', 'slice_ids', 'node_ids', 'login_base'])
234 traceback.print_exc()
237 if b_except: return (None, None)
239 if d_site and len(d_site) > 0:
240 max_slices = d_site[0]['max_slices']
241 num_slices = len(d_site[0]['slice_ids'])
242 num_nodes = len(d_site[0]['node_ids'])
243 loginbase = d_site[0]['login_base']
244 values['plcsite'] = {'num_nodes' : num_nodes,
245 'max_slices' : max_slices,
246 'num_slices' : num_slices,
247 'login_base' : loginbase,
248 'status' : 'SUCCESS'}
250 values['plcsite'] = {'status' : "GS_FAILED"}
252 values['checked'] = time.time()
254 return (nodename, values)
256 def recordPingAndSSH(request, result):
259 (nodename, values) = result
261 if values is not None:
262 global_round = externalState['round']
263 externalState['nodes'][nodename]['values'] = values
264 externalState['nodes'][nodename]['round'] = global_round
267 print "%d %s %s" % (count, nodename, externalState['nodes'][nodename]['values'])
269 soltesz.dbDump(config.dbname, externalState)
271 # this will be called when an exception occurs within a thread
272 def handle_exception(request, result):
273 print "Exception occured in request %s" % request.requestID
275 print "Result: %s" % i
278 def checkAndRecordState(l_nodes, cohash):
281 global_round = externalState['round']
283 tp = threadpool.ThreadPool(20)
285 # CREATE all the work requests
286 for nodename in l_nodes:
287 if nodename not in externalState['nodes']:
288 externalState['nodes'][nodename] = {'round': 0, 'values': []}
290 node_round = externalState['nodes'][nodename]['round']
291 if node_round < global_round:
292 # recreate node stats when refreshed
293 #print "%s" % nodename
294 req = threadpool.WorkRequest(collectPingAndSSH, [nodename, cohash], {},
295 None, recordPingAndSSH, handle_exception)
298 # We just skip it, since it's "up to date"
300 print "%d %s %s" % (count, nodename, externalState['nodes'][nodename]['values'])
303 # WAIT while all the work requests are processed.
308 except KeyboardInterrupt:
311 except threadpool.NoResultsPending:
312 print "All results collected."
315 soltesz.dbDump(config.dbname, externalState)
322 externalState = soltesz.if_cached_else(1, config.dbname, lambda : externalState)
325 # update global round number to force refreshes across all nodes
326 externalState['round'] += 1
328 cotop = comon.Comon()
329 # lastcotop measures whether cotop is actually running. this is a better
330 # metric than sshstatus, or other values from CoMon
331 cotop_url = COMON_COTOPURL
333 # history information for all nodes
335 cohash = cotop.coget(cotop_url)
336 l_nodes = syncplcdb.create_plcdb()
338 f_nodes = config.getListFromFile(config.filename)
339 l_nodes = filter(lambda x: x['hostname'] in f_nodes, l_nodes)
341 f_nodes = [config.node]
342 l_nodes = filter(lambda x: x['hostname'] in f_nodes, l_nodes)
343 elif config.nodegroup:
344 ng = api.GetNodeGroups({'name' : config.nodegroup})
345 l_nodes = api.GetNodes(ng[0]['node_ids'])
347 site = api.GetSites(config.site)
348 l_nodes = api.GetNodes(site[0]['node_ids'], ['hostname'])
350 l_nodes = [node['hostname'] for node in l_nodes]
352 # perform this query after the above options, so that the filter above
354 if config.nodeselect:
355 l_nodes = node_select(config.nodeselect)
357 print "fetching %s hosts" % len(l_nodes)
359 checkAndRecordState(l_nodes, cohash)
364 if __name__ == '__main__':
365 from config import config
366 from optparse import OptionParser
367 parser = OptionParser()
368 parser.set_defaults(filename=None, node=None, site=None, nodeselect=False, nodegroup=None,
369 increment=False, dbname="findbadnodes", cachenodes=False)
370 parser.add_option("", "--node", dest="node", metavar="hostname",
371 help="Provide a single node to operate on")
372 parser.add_option("-f", "--nodelist", dest="filename", metavar="FILE",
373 help="Provide the input file for the node list")
374 parser.add_option("", "--nodeselect", dest="nodeselect", metavar="query string",
375 help="Provide a selection string to return a node list.")
376 parser.add_option("", "--nodegroup", dest="nodegroup", metavar="FILE",
377 help="Provide the nodegroup for the list of nodes.")
378 parser.add_option("", "--site", dest="site", metavar="site name",
379 help="Specify a site to view node status")
381 parser.add_option("", "--cachenodes", action="store_true",
382 help="Cache node lookup from PLC")
383 parser.add_option("", "--dbname", dest="dbname", metavar="FILE",
384 help="Specify the name of the database to which the information is saved")
385 parser.add_option("-i", "--increment", action="store_true", dest="increment",
386 help="Increment round number to force refresh or retry")
387 config = config(parser)
392 except Exception, err:
393 print traceback.print_exc()
394 print "Exception: %s" % err
395 print "Saving data... exitting."
396 soltesz.dbDump(config.dbname, externalState)