7 from datetime import datetime,timedelta
12 from pcucontrol import reboot
14 from pcucontrol.util import command
15 from monitor import config
17 from monitor.database.info.model import *
19 from monitor.sources import comon
20 from monitor.wrapper import plc, plccache
23 from monitor.common import nmap_port_status, email_exception
25 COMON_COTOPURL= "http://summer.cs.princeton.edu/status/tabulator.cgi?" + \
26 "table=table_nodeview&" + \
27 "dumpcols='name,resptime,sshstatus,uptime,lastcotop,cpuspeed,memsize,disksize'&" + \
30 api = plc.getAuthAPI()
31 plc_lock = threading.Lock()
40 #print "GetPCU from PLC %s" % pcuname
41 l_pcu = plc.GetPCUs({'pcu_id' : pcuname})
47 #print "GetPCU from file %s" % pcuname
48 l_pcus = plccache.l_pcus
50 if i['pcu_id'] == pcuname:
59 def get_nodes(node_ids):
63 l_node = plc.getNodes(node_ids, ['hostname', 'last_contact', 'node_id', 'ports'])
66 plc_nodes = plccache.l_nodes
68 if n['node_id'] in node_ids:
80 def get_plc_pcu_values(pcuname):
82 Try to contact PLC to get the PCU info.
83 If that fails, try a backup copy from the last run.
84 If that fails, return None
88 l_pcu = get_pcu(pcuname)
91 site_id = l_pcu['site_id']
92 node_ids = l_pcu['node_ids']
93 l_node = get_nodes(node_ids)
95 if l_node is not None:
97 values[node['hostname']] = node['ports'][0]
99 values['nodenames'] = [node['hostname'] for node in l_node]
101 # NOTE: this is for a dry run later. It doesn't matter which node.
102 values['node_id'] = l_node[0]['node_id']
110 class ScanInterface(object):
113 primarykey = 'hostname'
115 def __init__(self, round=1):
119 def __getattr__(self, name):
120 if 'collect' in name or 'record' in name:
121 method = getattr(self, name, None)
123 raise Exception("No such method %s" % name)
126 raise Exception("No such method %s" % name)
128 def collect(self, nodename, data):
131 def record(self, request, (nodename, values) ):
138 fbnodesync = self.syncclass.findby_or_create(
139 #if_new_set={'round' : self.round},
140 **{ self.primarykey : nodename})
141 # NOTE: This code will either add a new record for the new self.round,
142 # OR it will find the previous value, and update it with new information.
143 # The data that is 'lost' is not that important, b/c older
144 # history still exists.
145 fbrec = self.recordclass.findby_or_create(
146 **{ self.primarykey:nodename})
148 fbrec.set( **values )
152 fbnodesync.round = self.round
155 print "%d %s %s" % (self.count, nodename, values)
160 email_exception(str(nodename))
161 print traceback.print_exc()
164 class ScanNodeInternal(ScanInterface):
165 recordclass = FindbadNodeRecord
166 #syncclass = FindbadNodeRecordSync
168 primarykey = 'hostname'
170 def collectPorts(self, nodename, port_list=[22,80,806]):
172 for port in port_list:
173 ret = os.system("nc -w 5 -z %s %s > /dev/null" % (nodename, port) )
175 values[str(port)] = "open"
177 values[str(port)] = "closed"
178 return {'port_status' : values }
180 def collectNMAP(self, nodename, cohash):
181 #### RUN NMAP ###############################
182 # NOTE: run the same command three times and take the best of three
183 # runs. NMAP can drop packets, and especially so when it runs many
187 print "nmap -oG - -P0 -p22,80,806 %s | grep Host:" % nodename
188 (oval1,eval) = nmap.run_noexcept("nmap -oG - -P0 -p22,80,806 %s | grep Host:" % nodename)
189 (oval2,eval) = nmap.run_noexcept("nmap -oG - -P0 -p22,80,806 %s | grep Host:" % nodename)
190 (oval3,eval) = nmap.run_noexcept("nmap -oG - -P0 -p22,80,806 %s | grep Host:" % nodename)
191 # NOTE: an empty / error value for oval, will still work.
192 values['port_status'] = {}
193 (o1,continue_probe) = nmap_port_status(oval1)
194 (o2,continue_probe) = nmap_port_status(oval2)
195 (o3,continue_probe) = nmap_port_status(oval3)
196 for p in ['22', '80', '806']:
197 l = [ o1[p], o2[p], o3[p] ]
198 if len(filter(lambda x: x == 'open', l)) > 1:
199 values['port_status'][p] = 'open'
201 values['port_status'][p] = o1[p]
203 print values['port_status']
204 return (nodename, values)
206 def collectPING(self, nodename, cohash):
209 (oval,errval) = ping.run_noexcept("ping -c 1 -q %s | grep rtt" % nodename)
214 values['ping_status'] = False
216 values['ping_status'] = True
220 def collectTRACEROUTE(self, nodename, cohash):
222 trace = command.CMD()
223 (oval,errval) = trace.run_noexcept("traceroute %s" % nodename)
225 values['traceroute'] = oval
229 def collectSSH(self, nodename, cohash):
232 for port in [22, 806]:
233 ssh = command.SSH('root', nodename, port)
235 (oval, errval) = ssh.run_noexcept2(""" <<\EOF
237 echo ' "kernel_version":"'`uname -a`'",'
238 echo ' "bmlog":"'`ls /tmp/bm.log`'",'
239 echo ' "bootcd_version":"'`cat /mnt/cdrom/bootme/ID`'",'
240 echo ' "boot_server":"'`cat /mnt/cdrom/bootme/BOOTSERVER`'",'
241 echo ' "nm_status":"'`ps ax | grep nm.py | grep -v grep`'",'
242 echo ' "dns_status":"'`host boot.planet-lab.org 2>&1`'",'
243 echo ' "iptables_status":"'`iptables -t mangle -nL | awk '$1~/^[A-Z]+$/ {modules[$1]=1;}END{for (k in modules) {if (k) printf "%s ",k;}}'`'",'
244 echo ' "princeton_comon_dir":"'`ls -d /vservers/princeton_comon`'",'
245 echo ' "uptime":"'`cat /proc/uptime`'",'
247 ID=`grep princeton_comon /etc/passwd | awk -F : '{if ( $3 > 500 ) { print $3}}'`
248 echo ' "princeton_comon_running":"'`ls -d /proc/virtual/$ID`'",'
249 echo ' "princeton_comon_procs":"'`vps ax | grep $ID | grep -v grep | wc -l`'",'
250 echo ' "fs_status":"'`grep proc /proc/mounts | grep ro, ; if [ -x /usr/bin/timeout.pl ] ; then timeout.pl 20 touch /var/log/monitor 2>&1 ; if [ -d /vservers/ ] ; then timeout.pl 20 touch /vservers/monitor.log 2>&1 ; fi ; fi`'",'
251 echo ' "rpm_version":"'`if [ -x /usr/bin/timeout.pl ] ; then timeout.pl 30 rpm -q NodeManager ; fi`'",'
252 echo ' "rpm_versions":"'`if [ -x /usr/bin/timeout.pl ] ; then timeout.pl 45 rpm -q -a ; fi`'",'
256 values['ssh_error'] = errval
258 #print "OVAL: %s" % oval
259 values.update(eval(oval))
260 values['ssh_portused'] = port
263 values.update({'kernel_version': "", 'bmlog' : "", 'bootcd_version' : '',
271 'princeton_comon_dir' : "",
272 'princeton_comon_running' : "",
273 'princeton_comon_procs' : "", 'ssh_portused' : None})
275 oval = values['nm_status']
277 values['nm_status'] = "Y"
279 values['nm_status'] = "N"
281 continue_slice_check = True
282 oval = values['princeton_comon_dir']
283 if "princeton_comon" in oval:
284 values['princeton_comon_dir'] = True
286 values['princeton_comon_dir'] = False
287 continue_slice_check = False
289 if continue_slice_check:
290 oval = values['princeton_comon_running']
291 if len(oval) > len('/proc/virtual/'):
292 values['princeton_comon_running'] = True
294 values['princeton_comon_running'] = False
295 continue_slice_check = False
297 values['princeton_comon_running'] = False
299 if continue_slice_check:
300 oval = values['princeton_comon_procs']
301 values['princeton_comon_procs'] = int(oval)
303 values['princeton_comon_procs'] = None
305 print traceback.print_exc()
310 def collectPLC(self, nodename, cohash):
312 ### GET PLC NODE ######################
313 d_node = plccache.GetNodeByName(nodename)
314 values['plc_node_stats'] = d_node
316 ### GET PLC PCU ######################
319 if d_node and len(d_node['pcu_ids']) > 0:
320 d_pcu = d_node['pcu_ids'][0]
322 site_id = d_node['site_id']
324 values['plc_pcuid'] = d_pcu
326 ### GET PLC SITE ######################
327 print "SITEID: %s" % site_id
328 d_site = plccache.GetSitesById([ site_id ])[0]
329 values['loginbase'] = d_site['login_base']
330 values['plc_site_stats'] = d_site
334 def evaluate(self, nodename, values):
335 # TODO: this section can probably be reduced to a policy statement
336 # using patterns and values collected so far.
337 # NOTE: A node is "DOWN" if
338 # * cannot ssh into it.
339 # * all ports are not open for a 'BOOT' node
340 # * dns for hostname does not exist.
341 b_getbootcd_id = True
343 oval = values['kernel_version']
344 values['ssh_status'] = True
345 if "2.6.17" in oval or "2.6.2" in oval:
346 values['observed_category'] = 'PROD'
347 if "bm.log" in values['bmlog']:
348 values['observed_status'] = 'DEBUG'
350 values['observed_status'] = 'BOOT'
351 elif "2.6.12" in oval or "2.6.10" in oval:
352 values['observed_category'] = 'OLDPROD'
353 if "bm.log" in values['bmlog']:
354 values['observed_status'] = 'DEBUG'
356 values['observed_status'] = 'BOOT'
358 # NOTE: on 2.6.8 kernels, with 4.2 bootstrapfs, the chroot
359 # command fails. I have no idea why.
360 elif "2.4" in oval or "2.6.8" in oval:
361 b_getbootcd_id = False
362 values['observed_category'] = 'OLDBOOTCD'
363 values['observed_status'] = 'DEBUG'
365 values['observed_category'] = 'UNKNOWN'
366 if "bm.log" in values['bmlog']:
367 values['observed_status'] = 'DEBUG'
369 values['observed_status'] = 'BOOT'
372 b_getbootcd_id = False
373 values['ssh_status'] = False
374 values['observed_category'] = 'ERROR'
375 values['observed_status'] = 'DOWN'
376 values['kernel_version'] = ""
378 values['firewall'] = False
380 # NOTE: A node is down if some of the public ports are not open
381 if values['observed_status'] == "BOOT":
382 # verify that all ports are open. Else, report node as down.
383 if not ( values['port_status']['22'] == "open" and \
384 values['port_status']['80'] == "open" and \
385 values['port_status']['806'] == "open") :
386 #email_exception(nodename, "%s FILTERED HOST" % nodename)
387 values['observed_status'] = 'DOWN'
388 values['firewall'] = True
390 #if values['port_status']['22'] == "open" and \
391 # values['port_status']['80'] == "closed" and \
392 # values['port_status']['806'] == "open" :
393 # email_exception("%s port 80 blocked" % nodename, "possible VSERVER ref blocked")
395 #if not values['external_dns_status']:
396 # email_exception("%s DNS down" % nodename)
399 # try to get BootCD for all nodes that are not 2.4 nor inaccessible
400 oval = values['bootcd_version']
402 values['bootcd_version'] = oval
403 if "v2" in oval and \
404 ( nodename is not "planetlab1.cs.unc.edu" and \
405 nodename is not "planetlab2.cs.unc.edu" ):
406 values['observed_category'] = 'OLDBOOTCD'
408 values['bootcd_version'] = ""
410 values['bootcd_version'] = ""
414 def collectDNS(self, nodename, cohash):
417 ipaddr = socket.gethostbyname(nodename)
418 # TODO: check that IP returned matches IP in plc db.
419 values['external_dns_status'] = True
420 except Exception, err:
421 values['external_dns_status'] = False
425 def collectInternal(self, nodename, cohash):
429 v = self.collectPING(nodename, cohash)
432 v = self.collectPorts(nodename)
435 v = self.collectSSH(nodename, cohash)
438 v = self.collectDNS(nodename, cohash)
441 v = self.collectTRACEROUTE(nodename, cohash)
444 v = self.collectPLC(nodename, cohash)
447 if nodename in cohash:
448 values['comon_stats'] = cohash[nodename]
450 values['comon_stats'] = {'resptime': '-1',
458 values['rpms'] = values['rpm_versions']
459 print "ALLVERSIONS: %s %s" % (nodename, values['rpm_versions'])
460 print "RPMVERSION: %s %s" % (nodename, values['rpm_version'])
461 print "UPTIME: %s %s" % (nodename, values['uptime'])
463 values = self.evaluate(nodename, values)
464 values['date_checked'] = datetime.now()
467 print traceback.print_exc()
469 return (nodename, values)
472 def internalprobe(hostname):
473 scannode = ScanNodeInternal()
475 (nodename, values) = scannode.collectInternal(hostname, {})
476 scannode.record(None, (nodename, values))
480 print traceback.print_exc()
483 def externalprobe(hostname):
484 scannode = ScanNodeInternal()
486 values = self.collectPorts(hostname)
487 scannode.record(None, (hostname, values))
491 print traceback.print_exc()
494 class ScanPCU(ScanInterface):
495 recordclass = FindbadPCURecord
497 primarykey = 'plc_pcuid'
499 def collectInternal(self, pcuname, cohash):
501 continue_probe = True
503 values = {'reboot_trial_status' : 'novalue'}
504 ### GET PCU ######################
508 v = get_plc_pcu_values(pcuname)
509 if v['hostname'] is not None: v['hostname'] = v['hostname'].strip()
510 if v['ip'] is not None: v['ip'] = v['ip'].strip()
513 values['plc_pcu_stats'] = v
515 continue_probe = False
518 traceback.print_exc()
519 continue_probe = False
521 if b_except or not continue_probe: return (None, None, None)
523 #### RUN NMAP ###############################
526 print "nmap -oG - -P0 -p22,23,80,443,5869,9100,16992 %s | grep Host:" % reboot.pcu_name(values['plc_pcu_stats'])
527 (oval,eval) = nmap.run_noexcept("nmap -oG - -P0 -p22,23,80,443,5869,9100,16992 %s | grep Host:" % reboot.pcu_name(values['plc_pcu_stats']))
528 # NOTE: an empty / error value for oval, will still work.
529 (values['port_status'], continue_probe) = nmap_port_status(oval)
531 values['port_status'] = None
533 #### COMPLETE ENTRY #######################
535 values['entry_complete'] = []
536 #if values['protocol'] is None or values['protocol'] is "":
537 # values['entry_complete'] += ["protocol"]
538 if values['plc_pcu_stats']['model'] is None or values['plc_pcu_stats']['model'] is "":
539 values['entry_complete'] += ["model"]
540 # Cannot continue due to this condition
541 continue_probe = False
543 if values['plc_pcu_stats']['password'] is None or values['plc_pcu_stats']['password'] is "":
544 values['entry_complete'] += ["password"]
545 # Cannot continue due to this condition
546 continue_probe = False
548 if len(values['entry_complete']) > 0:
549 continue_probe = False
551 if values['plc_pcu_stats']['hostname'] is None or values['plc_pcu_stats']['hostname'] is "":
552 values['entry_complete'] += ["hostname"]
553 if values['plc_pcu_stats']['ip'] is None or values['plc_pcu_stats']['ip'] is "":
554 values['entry_complete'] += ["ip"]
556 # If there are no nodes associated with this PCU, then we cannot continue.
557 if len(values['plc_pcu_stats']['node_ids']) == 0:
558 continue_probe = False
559 values['entry_complete'] += ['nodeids']
562 #### DNS and IP MATCH #######################
563 if values['plc_pcu_stats']['hostname'] is not None and values['plc_pcu_stats']['hostname'] is not "" and \
564 values['plc_pcu_stats']['ip'] is not None and values['plc_pcu_stats']['ip'] is not "":
566 ipaddr = socket.gethostbyname(values['plc_pcu_stats']['hostname'])
567 if ipaddr == values['plc_pcu_stats']['ip']:
568 values['dns_status'] = "DNS-OK"
570 values['dns_status'] = "DNS-MISMATCH"
571 values['plc_pcu_stats']['hostname'] = values['plc_pcu_stats']['ip']
573 except Exception, err:
574 values['dns_status'] = "DNS-NOENTRY"
575 values['plc_pcu_stats']['hostname'] = values['plc_pcu_stats']['ip']
577 if values['plc_pcu_stats']['ip'] is not None and values['plc_pcu_stats']['ip'] is not "":
578 values['dns_status'] = "NOHOSTNAME"
579 values['plc_pcu_stats']['hostname'] = values['plc_pcu_stats']['ip']
581 values['dns_status'] = "NO-DNS-OR-IP"
582 values['plc_pcu_stats']['hostname'] = "No_entry_in_DB"
583 continue_probe = False
586 ###### DRY RUN ############################
587 if continue_probe and 'node_ids' in values['plc_pcu_stats'] and \
588 len(values['plc_pcu_stats']['node_ids']) > 0:
589 rb_ret = reboot.reboot_test_new(values['plc_pcu_stats']['nodenames'][0],
592 rb_ret = "Not_Run" # No nodes to test"
594 values['reboot_trial_status'] = rb_ret
597 print "____________________________________"
600 print "____________________________________"
601 errors['traceback'] = traceback.format_exc()
602 print errors['traceback']
603 values['reboot_trial_status'] = str(errors['traceback'])
606 values['entry_complete']=" ".join(values['entry_complete'])
608 values['date_checked'] = datetime.now()
609 return (pcuname, values)