7 from datetime import datetime,timedelta
12 from pcucontrol import reboot
14 from pcucontrol.util import command
15 from monitor import config
17 from monitor.database.info.model import *
19 from monitor.sources import comon
20 from monitor.wrapper import plc, plccache
23 from monitor.common import nmap_port_status, email_exception
25 COMON_COTOPURL= "http://summer.cs.princeton.edu/status/tabulator.cgi?" + \
26 "table=table_nodeview&" + \
27 "dumpcols='name,resptime,sshstatus,uptime,lastcotop,cpuspeed,memsize,disksize'&" + \
30 api = plc.getAuthAPI()
31 plc_lock = threading.Lock()
40 #print "GetPCU from PLC %s" % pcuname
41 l_pcu = plc.GetPCUs({'pcu_id' : pcuname})
47 #print "GetPCU from file %s" % pcuname
48 l_pcus = plccache.l_pcus
50 if i['pcu_id'] == pcuname:
59 def get_nodes(node_ids):
63 l_node = plc.getNodes(node_ids, ['hostname', 'last_contact', 'node_id', 'ports'])
66 plc_nodes = plccache.l_nodes
68 if n['node_id'] in node_ids:
80 def get_plc_pcu_values(pcuname):
82 Try to contact PLC to get the PCU info.
83 If that fails, try a backup copy from the last run.
84 If that fails, return None
88 l_pcu = get_pcu(pcuname)
91 site_id = l_pcu['site_id']
92 node_ids = l_pcu['node_ids']
93 l_node = get_nodes(node_ids)
95 if l_node is not None:
97 values[node['hostname']] = node['ports'][0]
99 values['nodenames'] = [node['hostname'] for node in l_node]
101 # NOTE: this is for a dry run later. It doesn't matter which node.
102 values['node_id'] = l_node[0]['node_id']
110 class ScanInterface(object):
113 primarykey = 'hostname'
115 def __init__(self, round=1):
119 def __getattr__(self, name):
120 if 'collect' in name or 'record' in name:
121 method = getattr(self, name, None)
123 raise Exception("No such method %s" % name)
126 raise Exception("No such method %s" % name)
128 def collect(self, nodename, data):
131 def record(self, request, (nodename, values) ):
138 fbnodesync = self.syncclass.findby_or_create(
139 #if_new_set={'round' : self.round},
140 **{ self.primarykey : nodename})
141 # NOTE: This code will either add a new record for the new self.round,
142 # OR it will find the previous value, and update it with new information.
143 # The data that is 'lost' is not that important, b/c older
144 # history still exists.
145 fbrec = self.recordclass.findby_or_create(
146 **{ self.primarykey:nodename})
148 fbrec.set( **values )
152 fbnodesync.round = self.round
155 print "%d %s %s" % (self.count, nodename, values)
160 email_exception(str(nodename))
161 print traceback.print_exc()
164 class ScanNodeInternal(ScanInterface):
165 recordclass = FindbadNodeRecord
166 #syncclass = FindbadNodeRecordSync
168 primarykey = 'hostname'
170 def collectPorts(self, nodename, port_list=[22,80,806]):
172 for port in port_list:
173 ret = os.system("nc -w 5 -z %s %s > /dev/null" % (nodename, port) )
175 values[str(port)] = "open"
177 values[str(port)] = "closed"
178 return {'port_status' : values }
180 def collectNMAP(self, nodename, cohash):
181 #### RUN NMAP ###############################
182 # NOTE: run the same command three times and take the best of three
183 # runs. NMAP can drop packets, and especially so when it runs many
187 print "nmap -oG - -P0 -p22,80,806 %s | grep Host:" % nodename
188 (oval1,eval) = nmap.run_noexcept("nmap -oG - -P0 -p22,80,806 %s | grep Host:" % nodename)
189 (oval2,eval) = nmap.run_noexcept("nmap -oG - -P0 -p22,80,806 %s | grep Host:" % nodename)
190 (oval3,eval) = nmap.run_noexcept("nmap -oG - -P0 -p22,80,806 %s | grep Host:" % nodename)
191 # NOTE: an empty / error value for oval, will still work.
192 values['port_status'] = {}
193 (o1,continue_probe) = nmap_port_status(oval1)
194 (o2,continue_probe) = nmap_port_status(oval2)
195 (o3,continue_probe) = nmap_port_status(oval3)
196 for p in ['22', '80', '806']:
197 l = [ o1[p], o2[p], o3[p] ]
198 if len(filter(lambda x: x == 'open', l)) > 1:
199 values['port_status'][p] = 'open'
201 values['port_status'][p] = o1[p]
203 print values['port_status']
204 return (nodename, values)
206 def collectPING(self, nodename, cohash):
209 (oval,errval) = ping.run_noexcept("ping -c 1 -q %s | grep rtt" % nodename)
214 values['ping_status'] = False
216 values['ping_status'] = True
220 def collectTRACEROUTE(self, nodename, cohash):
222 trace = command.CMD()
223 (oval,errval) = trace.run_noexcept("traceroute %s" % nodename)
225 values['traceroute'] = oval
229 def collectSSH(self, nodename, cohash):
232 for port in [22, 806]:
233 ssh = command.SSH('root', nodename, port)
235 (oval, errval) = ssh.run_noexcept2(""" <<\EOF
237 echo ' "kernel_version":"'`uname -a`'",'
238 echo ' "bmlog":"'`ls /tmp/bm.log`'",'
239 echo ' "bootcd_version":"'`cat /mnt/cdrom/bootme/ID`'",'
240 echo ' "nm_status":"'`ps ax | grep nm.py | grep -v grep`'",'
241 echo ' "dns_status":"'`host boot.planet-lab.org 2>&1`'",'
242 echo ' "princeton_comon_dir":"'`ls -d /vservers/princeton_comon`'",'
243 echo ' "uptime":"'`cat /proc/uptime`'",'
245 ID=`grep princeton_comon /etc/passwd | awk -F : '{if ( $3 > 500 ) { print $3}}'`
246 echo ' "princeton_comon_running":"'`ls -d /proc/virtual/$ID`'",'
247 echo ' "princeton_comon_procs":"'`vps ax | grep $ID | grep -v grep | wc -l`'",'
248 echo ' "fs_status":"'`grep proc /proc/mounts | grep ro, ; if [ -x /usr/bin/timeout.pl ] ; then timeout.pl 20 touch /var/log/monitor 2>&1 ; if [ -d /vservers/ ] ; then timeout.pl 20 touch /vservers/monitor.log 2>&1 ; fi ; fi`'",'
249 echo ' "rpm_version":"'`if [ -x /usr/bin/timeout.pl ] ; then timeout.pl 30 rpm -q NodeManager ; fi`'",'
250 echo ' "rpm_versions":"'`if [ -x /usr/bin/timeout.pl ] ; then timeout.pl 45 rpm -q -a ; fi`'",'
254 values['ssh_error'] = errval
256 #print "OVAL: %s" % oval
257 values.update(eval(oval))
258 values['ssh_portused'] = port
261 values.update({'kernel_version': "", 'bmlog' : "", 'bootcd_version' : '',
268 'princeton_comon_dir' : "",
269 'princeton_comon_running' : "",
270 'princeton_comon_procs' : "", 'ssh_portused' : None})
272 oval = values['nm_status']
274 values['nm_status'] = "Y"
276 values['nm_status'] = "N"
278 continue_slice_check = True
279 oval = values['princeton_comon_dir']
280 if "princeton_comon_dir" in oval:
281 values['princeton_comon_dir'] = True
283 values['princeton_comon_dir'] = False
284 continue_slice_check = False
286 if continue_slice_check:
287 oval = values['princeton_comon_running']
288 if len(oval) > len('/proc/virtual/'):
289 values['princeton_comon_running'] = True
291 values['princeton_comon_running'] = False
292 continue_slice_check = False
294 values['princeton_comon_running'] = False
296 if continue_slice_check:
297 oval = values['princeton_comon_procs']
298 values['princeton_comon_procs'] = int(oval)
300 values['princeton_comon_procs'] = None
302 print traceback.print_exc()
307 def collectPLC(self, nodename, cohash):
309 ### GET PLC NODE ######################
310 d_node = plccache.GetNodeByName(nodename)
311 values['plc_node_stats'] = d_node
313 ### GET PLC PCU ######################
316 if d_node and len(d_node['pcu_ids']) > 0:
317 d_pcu = d_node['pcu_ids'][0]
319 site_id = d_node['site_id']
321 values['plc_pcuid'] = d_pcu
323 ### GET PLC SITE ######################
324 print "SITEID: %s" % site_id
325 d_site = plccache.GetSitesById([ site_id ])[0]
326 values['loginbase'] = d_site['login_base']
327 values['plc_site_stats'] = d_site
331 def evaluate(self, nodename, values):
332 # TODO: this section can probably be reduced to a policy statement
333 # using patterns and values collected so far.
334 # NOTE: A node is "DOWN" if
335 # * cannot ssh into it.
336 # * all ports are not open for a 'BOOT' node
337 # * dns for hostname does not exist.
338 b_getbootcd_id = True
340 oval = values['kernel_version']
341 values['ssh_status'] = True
342 if "2.6.17" in oval or "2.6.2" in oval:
343 values['observed_category'] = 'PROD'
344 if "bm.log" in values['bmlog']:
345 values['observed_status'] = 'DEBUG'
347 values['observed_status'] = 'BOOT'
348 elif "2.6.12" in oval or "2.6.10" in oval:
349 values['observed_category'] = 'OLDPROD'
350 if "bm.log" in values['bmlog']:
351 values['observed_status'] = 'DEBUG'
353 values['observed_status'] = 'BOOT'
355 # NOTE: on 2.6.8 kernels, with 4.2 bootstrapfs, the chroot
356 # command fails. I have no idea why.
357 elif "2.4" in oval or "2.6.8" in oval:
358 b_getbootcd_id = False
359 values['observed_category'] = 'OLDBOOTCD'
360 values['observed_status'] = 'DEBUG'
362 values['observed_category'] = 'UNKNOWN'
363 if "bm.log" in values['bmlog']:
364 values['observed_status'] = 'DEBUG'
366 values['observed_status'] = 'BOOT'
369 b_getbootcd_id = False
370 values['ssh_status'] = False
371 values['observed_category'] = 'ERROR'
372 values['observed_status'] = 'DOWN'
373 values['kernel_version'] = ""
375 values['firewall'] = False
377 # NOTE: A node is down if some of the public ports are not open
378 if values['observed_status'] == "BOOT":
379 # verify that all ports are open. Else, report node as down.
380 if not ( values['port_status']['22'] == "open" and \
381 values['port_status']['80'] == "open" and \
382 values['port_status']['806'] == "open") :
383 #email_exception(nodename, "%s FILTERED HOST" % nodename)
384 values['observed_status'] = 'DOWN'
385 values['firewall'] = True
387 #if values['port_status']['22'] == "open" and \
388 # values['port_status']['80'] == "closed" and \
389 # values['port_status']['806'] == "open" :
390 # email_exception("%s port 80 blocked" % nodename, "possible VSERVER ref blocked")
392 #if not values['external_dns_status']:
393 # email_exception("%s DNS down" % nodename)
396 # try to get BootCD for all nodes that are not 2.4 nor inaccessible
397 oval = values['bootcd_version']
399 values['bootcd_version'] = oval
400 if "v2" in oval and \
401 ( nodename is not "planetlab1.cs.unc.edu" and \
402 nodename is not "planetlab2.cs.unc.edu" ):
403 values['observed_category'] = 'OLDBOOTCD'
405 values['bootcd_version'] = ""
407 values['bootcd_version'] = ""
411 def collectDNS(self, nodename, cohash):
414 ipaddr = socket.gethostbyname(nodename)
415 # TODO: check that IP returned matches IP in plc db.
416 values['external_dns_status'] = True
417 except Exception, err:
418 values['external_dns_status'] = False
422 def collectInternal(self, nodename, cohash):
426 v = self.collectPING(nodename, cohash)
429 v = self.collectPorts(nodename)
432 v = self.collectSSH(nodename, cohash)
435 v = self.collectDNS(nodename, cohash)
438 v = self.collectTRACEROUTE(nodename, cohash)
441 v = self.collectPLC(nodename, cohash)
444 if nodename in cohash:
445 values['comon_stats'] = cohash[nodename]
447 values['comon_stats'] = {'resptime': '-1',
455 values['rpms'] = values['rpm_versions']
456 print "ALLVERSIONS: %s %s" % (nodename, values['rpm_versions'])
457 print "RPMVERSION: %s %s" % (nodename, values['rpm_version'])
458 print "UPTIME: %s %s" % (nodename, values['uptime'])
460 values = self.evaluate(nodename, values)
461 values['date_checked'] = datetime.now()
464 print traceback.print_exc()
466 return (nodename, values)
469 def internalprobe(hostname):
470 scannode = ScanNodeInternal()
472 (nodename, values) = scannode.collectInternal(hostname, {})
473 scannode.record(None, (nodename, values))
477 print traceback.print_exc()
480 def externalprobe(hostname):
481 scannode = ScanNodeInternal()
483 values = self.collectPorts(hostname)
484 scannode.record(None, (hostname, values))
488 print traceback.print_exc()
491 class ScanPCU(ScanInterface):
492 recordclass = FindbadPCURecord
494 primarykey = 'plc_pcuid'
496 def collectInternal(self, pcuname, cohash):
498 continue_probe = True
500 values = {'reboot_trial_status' : 'novalue'}
501 ### GET PCU ######################
505 v = get_plc_pcu_values(pcuname)
506 if v['hostname'] is not None: v['hostname'] = v['hostname'].strip()
507 if v['ip'] is not None: v['ip'] = v['ip'].strip()
510 values['plc_pcu_stats'] = v
512 continue_probe = False
515 traceback.print_exc()
516 continue_probe = False
518 if b_except or not continue_probe: return (None, None, None)
520 #### RUN NMAP ###############################
523 print "nmap -oG - -P0 -p22,23,80,443,5869,9100,16992 %s | grep Host:" % reboot.pcu_name(values['plc_pcu_stats'])
524 (oval,eval) = nmap.run_noexcept("nmap -oG - -P0 -p22,23,80,443,5869,9100,16992 %s | grep Host:" % reboot.pcu_name(values['plc_pcu_stats']))
525 # NOTE: an empty / error value for oval, will still work.
526 (values['port_status'], continue_probe) = nmap_port_status(oval)
528 values['port_status'] = None
530 #### COMPLETE ENTRY #######################
532 values['entry_complete'] = []
533 #if values['protocol'] is None or values['protocol'] is "":
534 # values['entry_complete'] += ["protocol"]
535 if values['plc_pcu_stats']['model'] is None or values['plc_pcu_stats']['model'] is "":
536 values['entry_complete'] += ["model"]
537 # Cannot continue due to this condition
538 continue_probe = False
540 if values['plc_pcu_stats']['password'] is None or values['plc_pcu_stats']['password'] is "":
541 values['entry_complete'] += ["password"]
542 # Cannot continue due to this condition
543 continue_probe = False
545 if len(values['entry_complete']) > 0:
546 continue_probe = False
548 if values['plc_pcu_stats']['hostname'] is None or values['plc_pcu_stats']['hostname'] is "":
549 values['entry_complete'] += ["hostname"]
550 if values['plc_pcu_stats']['ip'] is None or values['plc_pcu_stats']['ip'] is "":
551 values['entry_complete'] += ["ip"]
553 # If there are no nodes associated with this PCU, then we cannot continue.
554 if len(values['plc_pcu_stats']['node_ids']) == 0:
555 continue_probe = False
556 values['entry_complete'] += ['nodeids']
559 #### DNS and IP MATCH #######################
560 if values['plc_pcu_stats']['hostname'] is not None and values['plc_pcu_stats']['hostname'] is not "" and \
561 values['plc_pcu_stats']['ip'] is not None and values['plc_pcu_stats']['ip'] is not "":
563 ipaddr = socket.gethostbyname(values['plc_pcu_stats']['hostname'])
564 if ipaddr == values['plc_pcu_stats']['ip']:
565 values['dns_status'] = "DNS-OK"
567 values['dns_status'] = "DNS-MISMATCH"
568 values['plc_pcu_stats']['hostname'] = values['plc_pcu_stats']['ip']
570 except Exception, err:
571 values['dns_status'] = "DNS-NOENTRY"
572 values['plc_pcu_stats']['hostname'] = values['plc_pcu_stats']['ip']
574 if values['plc_pcu_stats']['ip'] is not None and values['plc_pcu_stats']['ip'] is not "":
575 values['dns_status'] = "NOHOSTNAME"
576 values['plc_pcu_stats']['hostname'] = values['plc_pcu_stats']['ip']
578 values['dns_status'] = "NO-DNS-OR-IP"
579 values['plc_pcu_stats']['hostname'] = "No_entry_in_DB"
580 continue_probe = False
583 ###### DRY RUN ############################
584 if continue_probe and 'node_ids' in values['plc_pcu_stats'] and \
585 len(values['plc_pcu_stats']['node_ids']) > 0:
586 rb_ret = reboot.reboot_test_new(values['plc_pcu_stats']['nodenames'][0],
589 rb_ret = "Not_Run" # No nodes to test"
591 values['reboot_trial_status'] = rb_ret
594 print "____________________________________"
597 print "____________________________________"
598 errors['traceback'] = traceback.format_exc()
599 print errors['traceback']
600 values['reboot_trial_status'] = str(errors['traceback'])
603 values['entry_complete']=" ".join(values['entry_complete'])
605 values['date_checked'] = datetime.now()
606 return (pcuname, values)