7 from datetime import datetime,timedelta
12 from pcucontrol import reboot
14 from pcucontrol.util import command
15 from monitor import config
17 from monitor.database.info.model import *
19 from monitor.sources import comon
20 from monitor.wrapper import plc, plccache
23 from monitor.common import nmap_port_status, email_exception
25 COMON_COTOPURL= "http://summer.cs.princeton.edu/status/tabulator.cgi?" + \
26 "table=table_nodeview&" + \
27 "dumpcols='name,resptime,sshstatus,uptime,lastcotop,cpuspeed,memsize,disksize'&" + \
30 api = plc.getAuthAPI()
31 plc_lock = threading.Lock()
40 #print "GetPCU from PLC %s" % pcuname
41 l_pcu = plc.GetPCUs({'pcu_id' : pcuname})
47 #print "GetPCU from file %s" % pcuname
48 l_pcus = plccache.l_pcus
50 if i['pcu_id'] == pcuname:
59 def get_nodes(node_ids):
63 l_node = plc.getNodes(node_ids, ['hostname', 'last_contact', 'node_id', 'ports'])
66 plc_nodes = plccache.l_nodes
68 if n['node_id'] in node_ids:
80 def get_plc_pcu_values(pcuname):
82 Try to contact PLC to get the PCU info.
83 If that fails, try a backup copy from the last run.
84 If that fails, return None
88 l_pcu = get_pcu(pcuname)
91 site_id = l_pcu['site_id']
92 node_ids = l_pcu['node_ids']
93 l_node = get_nodes(node_ids)
95 if l_node is not None:
97 values[node['hostname']] = node['ports'][0]
99 values['nodenames'] = [node['hostname'] for node in l_node]
101 # NOTE: this is for a dry run later. It doesn't matter which node.
102 values['node_id'] = l_node[0]['node_id']
110 class ScanInterface(object):
113 primarykey = 'hostname'
115 def __init__(self, round=1):
119 def __getattr__(self, name):
120 if 'collect' in name or 'record' in name:
121 method = getattr(self, name, None)
123 raise Exception("No such method %s" % name)
126 raise Exception("No such method %s" % name)
128 def collect(self, nodename, data):
131 def record(self, request, (nodename, values) ):
138 fbnodesync = self.syncclass.findby_or_create(
139 #if_new_set={'round' : self.round},
140 **{ self.primarykey : nodename})
141 # NOTE: This code will either add a new record for the new self.round,
142 # OR it will find the previous value, and update it with new information.
143 # The data that is 'lost' is not that important, b/c older
144 # history still exists.
145 fbrec = self.recordclass.findby_or_create(
146 **{ self.primarykey:nodename})
148 fbrec.set( **values )
152 fbnodesync.round = self.round
155 print "%d %s %s" % (self.count, nodename, values)
160 email_exception(str(nodename))
161 print traceback.print_exc()
164 class ScanNodeInternal(ScanInterface):
165 recordclass = FindbadNodeRecord
166 #syncclass = FindbadNodeRecordSync
168 primarykey = 'hostname'
170 def collectNMAP(self, nodename, cohash):
171 #### RUN NMAP ###############################
174 print "nmap -oG - -P0 -p22,80,806 %s | grep Host:" % nodename
175 (oval,eval) = nmap.run_noexcept("nmap -oG - -P0 -p22,80,806 %s | grep Host:" % nodename)
176 # NOTE: an empty / error value for oval, will still work.
177 (values['port_status'], continue_probe) = nmap_port_status(oval)
179 values['date_checked'] = datetime.now()
181 return (nodename, values)
183 def collectInternal(self, nodename, cohash):
184 ### RUN PING ######################
186 (oval,errval) = ping.run_noexcept("ping -c 1 -q %s | grep rtt" % nodename)
193 values['ping_status'] = False
195 values['ping_status'] = True
198 for port in [22, 806]:
199 ssh = command.SSH('root', nodename, port)
201 (oval, errval) = ssh.run_noexcept2(""" <<\EOF
203 echo ' "kernel_version":"'`uname -a`'",'
204 echo ' "bmlog":"'`ls /tmp/bm.log`'",'
205 echo ' "bootcd_version":"'`cat /mnt/cdrom/bootme/ID`'",'
206 echo ' "nm_status":"'`ps ax | grep nm.py | grep -v grep`'",'
207 echo ' "dns_status":"'`host boot.planet-lab.org 2>&1`'",'
208 echo ' "princeton_comon_dir":"'`ls -d /vservers/princeton_comon`'",'
209 echo ' "uptime":"'`uptime`'",'
211 ID=`grep princeton_comon /etc/passwd | awk -F : '{if ( $3 > 500 ) { print $3}}'`
212 echo ' "princeton_comon_running":"'`ls -d /proc/virtual/$ID`'",'
213 echo ' "princeton_comon_procs":"'`vps ax | grep $ID | grep -v grep | wc -l`'",'
214 echo ' "fs_status":"'`grep proc /proc/mounts | grep ro, ; if [ -x /usr/bin/timeout.pl ] ; then timeout.pl 20 touch /var/log/monitor 2>&1 ; if [ -d /vservers/ ] ; then timeout.pl 20 touch /vservers/monitor.log 2>&1 ; fi ; fi`'",'
215 echo ' "rpm_version":"'`if [ -x /usr/bin/timeout.pl ] ; then timeout.pl 30 rpm -q NodeManager ; fi`'",'
216 echo ' "rpm_versions":"'`if [ -x /usr/bin/timeout.pl ] ; then timeout.pl 45 rpm -q -a ; fi`'",'
220 values['ssh_error'] = errval
222 #print "OVAL: %s" % oval
223 values.update(eval(oval))
224 values['ssh_portused'] = port
227 values.update({'kernel_version': "", 'bmlog' : "", 'bootcd_version' : '',
234 'princeton_comon_dir' : "",
235 'princeton_comon_running' : "",
236 'princeton_comon_procs' : "", 'ssh_portused' : None})
238 print traceback.print_exc()
241 values['fs_status'] = ""
242 print "ALLVERSIONS: %s %s" % (nodename, values['rpm_versions'])
244 print "RPMVERSION: %s %s" % (nodename, values['rpm_version'])
245 print "UPTIME: %s %s" % (nodename, values['uptime'])
246 ### RUN SSH ######################
247 b_getbootcd_id = True
249 oval = values['kernel_version']
250 if "2.6.17" in oval or "2.6.2" in oval:
251 values['ssh_status'] = True
252 values['observed_category'] = 'PROD'
253 if "bm.log" in values['bmlog']:
254 values['observed_status'] = 'DEBUG'
256 values['observed_status'] = 'BOOT'
257 elif "2.6.12" in oval or "2.6.10" in oval:
258 values['ssh_status'] = True
259 values['observed_category'] = 'OLDPROD'
260 if "bm.log" in values['bmlog']:
261 values['observed_status'] = 'DEBUG'
263 values['observed_status'] = 'BOOT'
265 # NOTE: on 2.6.8 kernels, with 4.2 bootstrapfs, the chroot
266 # command fails. I have no idea why.
267 elif "2.4" in oval or "2.6.8" in oval:
268 b_getbootcd_id = False
269 values['ssh_status'] = True
270 values['observed_category'] = 'OLDBOOTCD'
271 values['observed_status'] = 'DEBUG'
273 values['ssh_status'] = True
274 values['observed_category'] = 'UNKNOWN'
275 if "bm.log" in values['bmlog']:
276 values['observed_status'] = 'DEBUG'
278 values['observed_status'] = 'BOOT'
281 b_getbootcd_id = False
282 values['ssh_status'] = False
283 values['observed_category'] = 'ERROR'
284 values['observed_status'] = 'DOWN'
286 values['ssh_error'] = val
287 values['kernel_version'] = ""
290 # try to get BootCD for all nodes that are not 2.4 nor inaccessible
291 oval = values['bootcd_version']
293 values['bootcd_version'] = oval
294 if "v2" in oval and \
295 ( nodename is not "planetlab1.cs.unc.edu" and \
296 nodename is not "planetlab2.cs.unc.edu" ):
297 values['observed_category'] = 'OLDBOOTCD'
299 values['bootcd_version'] = ""
301 values['bootcd_version'] = ""
303 oval = values['nm_status']
305 values['nm_status'] = "Y"
307 values['nm_status'] = "N"
309 continue_slice_check = True
310 oval = values['princeton_comon_dir']
311 if "princeton_comon_dir" in oval:
312 values['princeton_comon_dir'] = True
314 values['princeton_comon_dir'] = False
315 continue_slice_check = False
317 if continue_slice_check:
318 oval = values['princeton_comon_running']
319 if len(oval) > len('/proc/virtual/'):
320 values['princeton_comon_running'] = True
322 values['princeton_comon_running'] = False
323 continue_slice_check = False
325 values['princeton_comon_running'] = False
327 if continue_slice_check:
328 oval = values['princeton_comon_procs']
329 values['princeton_comon_procs'] = int(oval)
331 values['princeton_comon_procs'] = None
334 if nodename in cohash:
335 values['comon_stats'] = cohash[nodename]
337 values['comon_stats'] = {'resptime': '-1',
344 # include output value
345 ### GET PLC NODE ######################
346 d_node = plccache.GetNodeByName(nodename)
347 values['plc_node_stats'] = d_node
349 ##### NMAP ###################
350 (n, v) = self.collectNMAP(nodename, None)
353 ### GET PLC PCU ######################
357 pcu = d_node['pcu_ids']
361 site_id = d_node['site_id']
363 values['plc_pcuid'] = d_pcu
365 ### GET PLC SITE ######################
368 values['loginbase'] = ""
370 d_site = plccache.GetSitesById([ site_id ])[0]
371 #d_site = plc.getSites({'site_id': site_id},
372 # ['max_slices', 'slice_ids', 'node_ids', 'login_base'])[0]
373 values['loginbase'] = d_site['login_base']
375 traceback.print_exc()
378 values['plc_site_stats'] = d_site
379 values['date_checked'] = datetime.now()
381 print traceback.print_exc()
383 return (nodename, values)
385 def internalprobe(hostname):
386 #fbsync = FindbadNodeRecordSync.findby_or_create(hostname="global",
387 # if_new_set={'round' : 1})
388 scannode = ScanNodeInternal() # fbsync.round)
390 (nodename, values) = scannode.collectInternal(hostname, {})
391 scannode.record(None, (nodename, values))
395 print traceback.print_exc()
398 def externalprobe(hostname):
399 #fbsync = FindbadNodeRecordSync.findby_or_create(hostname="global",
400 # if_new_set={'round' : 1})
401 scannode = ScanNodeInternal() # fbsync.round)
403 (nodename, values) = scannode.collectNMAP(hostname, {})
404 scannode.record(None, (nodename, values))
408 print traceback.print_exc()
411 class ScanPCU(ScanInterface):
412 recordclass = FindbadPCURecord
414 primarykey = 'plc_pcuid'
416 def collectInternal(self, pcuname, cohash):
418 continue_probe = True
420 values = {'reboot_trial_status' : 'novalue'}
421 ### GET PCU ######################
425 v = get_plc_pcu_values(pcuname)
426 if v['hostname'] is not None: v['hostname'] = v['hostname'].strip()
427 if v['ip'] is not None: v['ip'] = v['ip'].strip()
430 values['plc_pcu_stats'] = v
432 continue_probe = False
435 traceback.print_exc()
436 continue_probe = False
438 if b_except or not continue_probe: return (None, None, None)
440 #### RUN NMAP ###############################
443 print "nmap -oG - -P0 -p22,23,80,443,5869,9100,16992 %s | grep Host:" % reboot.pcu_name(values['plc_pcu_stats'])
444 (oval,eval) = nmap.run_noexcept("nmap -oG - -P0 -p22,23,80,443,5869,9100,16992 %s | grep Host:" % reboot.pcu_name(values['plc_pcu_stats']))
445 # NOTE: an empty / error value for oval, will still work.
446 (values['port_status'], continue_probe) = nmap_port_status(oval)
448 values['port_status'] = None
450 #### COMPLETE ENTRY #######################
452 values['entry_complete'] = []
453 #if values['protocol'] is None or values['protocol'] is "":
454 # values['entry_complete'] += ["protocol"]
455 if values['plc_pcu_stats']['model'] is None or values['plc_pcu_stats']['model'] is "":
456 values['entry_complete'] += ["model"]
457 # Cannot continue due to this condition
458 continue_probe = False
460 if values['plc_pcu_stats']['password'] is None or values['plc_pcu_stats']['password'] is "":
461 values['entry_complete'] += ["password"]
462 # Cannot continue due to this condition
463 continue_probe = False
465 if len(values['entry_complete']) > 0:
466 continue_probe = False
468 if values['plc_pcu_stats']['hostname'] is None or values['plc_pcu_stats']['hostname'] is "":
469 values['entry_complete'] += ["hostname"]
470 if values['plc_pcu_stats']['ip'] is None or values['plc_pcu_stats']['ip'] is "":
471 values['entry_complete'] += ["ip"]
473 # If there are no nodes associated with this PCU, then we cannot continue.
474 if len(values['plc_pcu_stats']['node_ids']) == 0:
475 continue_probe = False
476 values['entry_complete'] += ['nodeids']
479 #### DNS and IP MATCH #######################
480 if values['plc_pcu_stats']['hostname'] is not None and values['plc_pcu_stats']['hostname'] is not "" and \
481 values['plc_pcu_stats']['ip'] is not None and values['plc_pcu_stats']['ip'] is not "":
483 ipaddr = socket.gethostbyname(values['plc_pcu_stats']['hostname'])
484 if ipaddr == values['plc_pcu_stats']['ip']:
485 values['dns_status'] = "DNS-OK"
487 values['dns_status'] = "DNS-MISMATCH"
488 values['plc_pcu_stats']['hostname'] = values['plc_pcu_stats']['ip']
490 except Exception, err:
491 values['dns_status'] = "DNS-NOENTRY"
492 values['plc_pcu_stats']['hostname'] = values['plc_pcu_stats']['ip']
494 if values['plc_pcu_stats']['ip'] is not None and values['plc_pcu_stats']['ip'] is not "":
495 values['dns_status'] = "NOHOSTNAME"
496 values['plc_pcu_stats']['hostname'] = values['plc_pcu_stats']['ip']
498 values['dns_status'] = "NO-DNS-OR-IP"
499 values['plc_pcu_stats']['hostname'] = "No_entry_in_DB"
500 continue_probe = False
503 ###### DRY RUN ############################
504 if continue_probe and 'node_ids' in values['plc_pcu_stats'] and \
505 len(values['plc_pcu_stats']['node_ids']) > 0:
506 rb_ret = reboot.reboot_test_new(values['plc_pcu_stats']['nodenames'][0],
509 rb_ret = "Not_Run" # No nodes to test"
511 values['reboot_trial_status'] = rb_ret
514 print "____________________________________"
517 print "____________________________________"
518 errors['traceback'] = traceback.format_exc()
519 print errors['traceback']
520 values['reboot_trial_status'] = str(errors['traceback'])
523 values['entry_complete']=" ".join(values['entry_complete'])
525 values['date_checked'] = datetime.now()
526 return (pcuname, values)