# Attempt to reboot a node in debug state.
-
-
import os
import sys
import time
from sets import Set
from monitor.getsshkeys import SSHKnownHosts
-
from monitor.Rpyc import SocketConnection, Async
from monitor.Rpyc.Utils import *
from pcucontrol.transports.ssh import fdpexpect as fdpexpect
from pcucontrol.transports.ssh import pexpect as pexpect
-
-
api = plc.getAuthAPI()
fb = None
+def bootmanager_log_name(hostname):
+ t_stamp = time.strftime("%Y-%m-%d-%H:%M")
+ base_filename = "%s-bm.%s.log" % (t_stamp, hostname)
+ short_target_filename = os.path.join('history', base_filename)
+ return short_target_filename
+
+def bootmanager_log_action(hostname, short_log_path, logtype="bm.log"):
+ try:
+ node = FindbadNodeRecord.get_latest_by(hostname=hostname)
+ loginbase = PlcSite.query.get(node.plc_node_stats['site_id']).plc_site_stats['login_base']
+ err = ""
+ except:
+ loginbase = "unknown"
+ err = traceback.format_exc()
+
+ act = ActionRecord(loginbase=loginbase,
+ hostname=hostname,
+ action='log',
+ action_type=logtype,
+ log_path=short_log_path,
+ error_string=err)
+ return
+
class ExceptionDoubleSSHError(Exception): pass
class NodeConnection:
def __init__(self, connection, node, config):
+ print "init nodeconnection"
self.node = node
self.c = connection
self.config = config
def get_boot_state(self):
+ print "get_boot_state(self)"
try:
if self.c.modules.os.path.exists('/tmp/source'):
return "debug"
return log
def get_bootmanager_log(self):
- t_stamp = time.strftime("%Y-%m-%d-%H:%M")
- download(self.c, "/tmp/bm.log", "%s/history/%s-bm.%s.log" % (config.MONITOR_BOOTMANAGER_LOG, t_stamp, self.node))
- os.system("cp %s/history/%s-bm.%s.log %s/bm.%s.log" % (config.MONITOR_BOOTMANAGER_LOG, t_stamp, self.node, config.MONITOR_BOOTMANAGER_LOG, self.node))
+ bm_name = bootmanager_log_name(self.node)
+ download(self.c, "/tmp/bm.log", "%s/%s" % (config.MONITOR_BOOTMANAGER_LOG, bm_name))
+ #email_exception(self.node, "collected BM log for %s" % self.node)
+ bootmanager_log_action(self.node, bm_name, "collected_bm.log")
+ os.system("cp %s/%s %s/bm.%s.log" % (config.MONITOR_BOOTMANAGER_LOG, bm_name, config.MONITOR_BOOTMANAGER_LOG, self.node))
log = open("%s/bm.%s.log" % (config.MONITOR_BOOTMANAGER_LOG, self.node), 'r')
return log
-
-# def get_dmesg(self):
-# self.c.modules.os.system("dmesg > /var/log/dmesg.bm.log")
-# download(self.c, "/var/log/dmesg.bm.log", "log/dmesg.%s.log" % self.node)
-# log = open("log/dmesg.%s.log" % self.node, 'r')
-# return log
-#
-# def get_bootmanager_log(self):
-# download(self.c, "/tmp/bm.log", "log/bm.%s.log.gz" % self.node)
-# #os.system("zcat log/bm.%s.log.gz > log/bm.%s.log" % (self.node, self.node))
-# os.system("cp log/bm.%s.log.gz log/bm.%s.log" % (self.node, self.node))
-# log = open("log/bm.%s.log" % self.node, 'r')
-# return log
-
def dump_plconf_file(self):
c = self.c
self.c.modules.sys.path.append("/tmp/source/")
else:
print " Unable to read Node Configuration"
+ def fsck_repair_node(self):
+ c = self.c
+ self.c.modules.sys.path.append("/tmp/source/")
+ self.c.modules.os.chdir('/tmp/source')
+ # TODO: restart
+ # TODO: set boot state to node's actually boot state.
+ # could be 'boot' or 'safeboot'
+ self.c.modules.os.chdir('/tmp/source')
+ if self.c.modules.os.path.exists('/tmp/BM_RUNNING'):
+ print "Running MANUAL FSCK already... try again soon."
+ else:
+ print "Running MANUAL fsck on %s" % self.node
+ cmd = "( touch /tmp/BM_RUNNING ; " + \
+ " fsck -v -f -y /dev/planetlab/root &> out.fsck ; " + \
+ " fsck -v -f -y /dev/planetlab/vservers >> out.fsck 2>&1 ; " + \
+ " python ./BootManager.py %s &> server.log < /dev/null ; " + \
+ " rm -f /tmp/BM_RUNNING " + \
+ ") &"
+ cmd = cmd % self.get_nodestate()
+ self.c.modules.os.system(cmd)
+ #self.restart_bootmanager('boot')
+ pass
def compare_and_repair_nodekeys(self):
c = self.c
def set_nodestate(self, state='boot'):
return api.UpdateNode(self.node, {'boot_state' : state})
+ def get_nodestate(self):
+ try:
+ return api.GetNodes(self.node, ['boot_state'])[0]['boot_state']
+ except:
+ traceback.print_exc()
+ # NOTE: use last cached value from plc
+ fbnode = FindbadNodeRecord.get_latest_by(hostname=self.node).to_dict()
+ return fbnode['plc_node_stats']['boot_state']
+
+
def restart_node(self, state='boot'):
api.UpdateNode(self.node, {'boot_state' : state})
self.setup_host()
def get_connection(self, config):
- conn = NodeConnection(SocketConnection("localhost", self.port), self.node, config)
- #i = 0
- #while i < 3:
- # print i, conn.c.modules.sys.path
- # print conn.c.modules.os.path.exists('/tmp/source')
- # i+=1
- # time.sleep(1)
+ try:
+ print "SocketConnection(localhost, %s" % self.port
+ sc = SocketConnection("localhost", self.port)
+ print "NodeConnection(%s, %s)" % (sc, self.node)
+ conn = NodeConnection(sc, self.node, config)
+ except:
+ # NOTE: try twice since this can sometimes fail the first time. If
+ # it fails again, let it go.
+ conn = NodeConnection(SocketConnection("localhost", self.port), self.node, config)
return conn
def setup_host(self):
return
# COPY Rpyc files to host
- cmd = "rsync -qv -az -e ssh %(monitordir)s/Rpyc/ %(user)s@%(hostname)s:Rpyc 2> /dev/null" % args
+ #cmd = "rsync -vvv -az -e ssh %(monitordir)s/Rpyc/ %(user)s@%(hostname)s:Rpyc 2> /dev/null" % args
+ cmd = """rsync -vvv -az -e "ssh -o BatchMode=yes" %(monitordir)s/Rpyc/ %(user)s@%(hostname)s:Rpyc""" % args
if self.verbose: print cmd
print cmd
# TODO: Add timeout
print "\tUNKNOWN SSH KEY FOR %s; making an exception" % self.node
#print "MAKE EXPLICIT EXCEPTION FOR %s" % self.node
k = SSHKnownHosts(); k.updateDirect(self.node); k.write(); del k
+ print "trying: ", cmd
+ print [ "%s=%s" % (a, os.environ[a]) for a in filter(lambda x: 'SSH' in x, os.environ.keys()) ]
ret = localos.system(cmd, timeout)
print ret
if ret != 0:
print "\tFAILED TWICE"
- #sys.exit(1)
+ #email_exception("%s rsync failed twice" % self.node)
raise ExceptionDoubleSSHError("Failed twice trying to login with updated ssh host key")
t1 = time.time()
(ov,ev) = ssh.run_noexcept2("""<<\EOF
rm -f out.log
echo "kill server" >> out.log
+ netstat -ap | grep python | grep 18812 | awk '{print $7}' | awk -F / '{print $1}' | xargs kill
ps ax | grep Rpyc | grep -v grep | awk '{print $1}' | xargs kill 2> /dev/null ;
echo "export" >> out.log
export PYTHONPATH=$HOME ;
python Rpyc/Servers/forking_server.py &> server.log &
echo "done" >> out.log
EOF""")
- #cmd = """ssh %(user)s@%(hostname)s """ + \
- # """'ps ax | grep Rpyc | grep -v grep | awk "{print \$1}" | xargs kill 2> /dev/null' """
- #cmd = cmd % args
- #if self.verbose: print cmd
- ## TODO: Add timeout
- #print localos.system(cmd,timeout)
-
- ## START a new rpyc server.
- #cmd = """ssh -n %(user)s@%(hostname)s "export PYTHONPATH=\$HOME; """ + \
- # """python Rpyc/Servers/forking_server.py &> server.log < /dev/null &" """
- #cmd = cmd % args
- #if self.verbose: print cmd
- #print localos.system(cmd,timeout)
print "setup rpyc server over ssh"
print ssh.ret
print traceback.print_exc()
return False
+ msg = "ERROR setting up session for %s" % self.hostname
try:
if config == None:
self.session = PlanetLabSession(self.hostname, False, True)
else:
self.session = PlanetLabSession(self.hostname, config.nosetup, config.verbose)
except ExceptionDoubleSSHError, e:
- msg = "ERROR setting up session for %s" % self.hostname
print msg
return False
except Exception, e:
email_exception(msg)
return False
+ print "Getting connection: 1st try"
try:
conn = self.session.get_connection(config)
except EOFError:
# NOTE: sometimes the wait in setup_host() is not long enough.
# So, here we try to wait a little longer before giving up entirely.
try:
+ print "Getting connection: 2nd try"
time.sleep(self.session.timeout*5)
conn = self.session.get_connection(config)
except EOFError:
# failed twice... no need to report this really, it's just in a
# weird state...
+ print "Getting connection: failed"
+ email_exception(self.hostname, "failed twice to get connection")
return False
except:
traceback.print_exc()
email_exception(self.hostname)
return False
+ print "Getting connection: ok"
#print "trying to use conn before returning it."
#print conn.c.modules.sys.path
#print conn.c.modules.os.path.exists('/tmp/source')
def getSequences(self):
- # TODO: This can be replaced with a DB definition at a future time.
- # This would make it possible for an admin to introduce new
- # patterns without touching code.
-
+ # NOTE: The DB is now the autoritative record for all BM sequences.
+ # An admin can introduce new patterns and actions without touching code.
sequences = {}
- # restart_bootmanager_boot
- for n in ["bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-update3-disk-update4-done",
- "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-disk-update4-update3-exception-protoerror-update-protoerror-debug-done",
- "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-disk-update4-update3-update3-implementerror-bootupdatefail-update-debug-done",
-
- "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-disk-update4-update3-update3-exception-protoerror-update-protoerror-debug-done",
-
- "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-disk-update4-update3-exception-protoerror-update-debug-done",
- "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-disk-update4-update3-exception-chrootfail-update-debug-done",
- "bminit-cfg-auth-getplc-update-debug-done",
- "bminit-cfg-auth-getplc-exception-protoerror-update-protoerror-debug-done",
- "bminit-cfg-auth-protoerror-exception-update-protoerror-debug-done",
- "bminit-cfg-auth-protoerror-exception-update-bootupdatefail-authfail-debug-done",
- "bminit-cfg-auth-protoerror-exception-update-debug-done",
- "bminit-cfg-auth-getplc-exception-protoerror-update-debug-done",
- "bminit-cfg-auth-getplc-implementerror-update-debug-done",
- ]:
- sequences.update({n : "restart_bootmanager_boot"})
-
- # conn.restart_bootmanager('reinstall')
- for n in [ "bminit-cfg-auth-getplc-installinit-validate-exception-modulefail-update-debug-done",
- "bminit-cfg-auth-getplc-update-installinit-validate-exception-modulefail-update-debug-done",
- "bminit-cfg-auth-getplc-installinit-validate-bmexceptmount-exception-noinstall-update-debug-done",
- "bminit-cfg-auth-getplc-update-installinit-validate-bmexceptmount-exception-noinstall-update-debug-done",
- "bminit-cfg-auth-getplc-installinit-validate-bmexceptvgscan-exception-noinstall-update-debug-done",
- "bminit-cfg-auth-getplc-update-installinit-validate-exception-noinstall-update-debug-done",
- "bminit-cfg-auth-getplc-hardware-installinit-installdisk-bziperror-exception-update-debug-done",
- "bminit-cfg-auth-getplc-update-hardware-installinit-installdisk-installbootfs-exception-update-debug-done",
- "bminit-cfg-auth-getplc-update-installinit-validate-bmexceptvgscan-exception-noinstall-update-debug-done",
- "bminit-cfg-auth-getplc-hardware-installinit-installdisk-installbootfs-exception-update-debug-done",
- "bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-nofilereference-update-debug-done",
- "bminit-cfg-auth-getplc-update-hardware-installinit-installdisk-exception-mkfsfail-update-debug-done",
- "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-exception-chrootfail-update-debug-done",
- "bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-disk-update4-exception-chrootfail-update-debug-done",
- "bminit-cfg-auth-getplc-update-hardware-installinit-installdisk-installbootfs-installcfg-installstop-update-installinit-validate-rebuildinitrd-netcfg-disk-update4-update3-update3-kernelcopyfail-exception-update-debug-done",
- "bminit-cfg-auth-getplc-hardware-installinit-installdisk-installbootfs-installcfg-installstop-update-installinit-validate-rebuildinitrd-netcfg-disk-update4-update3-update3-kernelcopyfail-exception-update-debug-done",
- "bminit-cfg-auth-getplc-installinit-validate-exception-noinstall-update-debug-done",
- # actual solution appears to involve removing the bad files, and
- # continually trying to boot the node.
- "bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-disk-update4-update3-update3-implementerror-update-debug-done",
- "bminit-cfg-auth-getplc-installinit-validate-exception-bmexceptmount-exception-noinstall-update-debug-done",
- "bminit-cfg-auth-getplc-update-installinit-validate-exception-bmexceptmount-exception-noinstall-update-debug-done",
- "bminit-cfg-auth-getplc-update-installinit-validate-bmexceptvgscan-exception-noinstall-update-debug-validate-bmexceptvgscan-done",
- "bminit-cfg-auth-getplc-update-installinit-validate-exception-noinstall-update-debug-validate-done",
- ]:
- sequences.update({n : "restart_bootmanager_rins"})
-
- # repair_node_keys
- for n in ["bminit-cfg-auth-bootcheckfail-authfail-exception-update-bootupdatefail-authfail-debug-validate-exception-done",
- "bminit-cfg-auth-bootcheckfail-authfail-exception-update-bootupdatefail-authfail-debug-done",
- ]:
- sequences.update({n: "repair_node_keys"})
-
- # conn.restart_node('reinstall')
- for n in ["bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-exception-chrootfail-update-debug-done",
- "bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-update3-disk-update4-exception-chrootfail-update-debug-done",
- "bminit-cfg-auth-getplc-hardware-installinit-installdisk-installbootfs-installcfg-exception-chrootfail-update-debug-done",
- "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-writeerror-exception-chrootfail-update-debug-done",
- "bminit-cfg-auth-getplc-update-hardware-installinit-exception-bmexceptrmfail-update-debug-done",
- "bminit-cfg-auth-getplc-hardware-installinit-exception-bmexceptrmfail-update-debug-done",
- "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-disk-update4-update3-implementerror-bootupdatefail-update-debug-done",
- "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-readonlyfs-update-debug-done",
- "bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-update3-nospace-exception-update-debug-done",
- "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-nospace-update-debug-done",
- "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-update-debug-done",
- "bminit-cfg-auth-getplc-update-hardware-installinit-installdisk-installbootfs-exception-downloadfail-update-debug-done",
- ]:
- sequences.update({n : "restart_node_rins"})
-
- # restart_node_boot
- for n in ["bminit-cfg-auth-getplc-implementerror-bootupdatefail-update-debug-done",
- "bminit-cfg-auth-implementerror-bootcheckfail-update-debug-done",
- "bminit-cfg-auth-implementerror-bootcheckfail-update-implementerror-bootupdatefail-done",
- "bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-nospace-update-debug-done",
- "bminit-cfg-auth-getplc-hardware-installinit-installdisk-installbootfs-exception-downloadfail-update-debug-done",
- "bminit-cfg-auth-getplc-update-installinit-validate-implementerror-update-debug-done",
- "bminit-cfg-auth-getplc-exception-update-bootupdatefail-debug-done",
- ]:
- sequences.update({n: "restart_node_boot"})
-
- # update_node_config_email
- for n in ["bminit-cfg-exception-nocfg-update-bootupdatefail-nonode-debug-done",
- "bminit-cfg-exception-update-bootupdatefail-nonode-debug-done",
- "bminit-cfg-exception-update-bootupdatefail-nonode-debug-validate-exception-done",
- "bminit-cfg-auth-bootcheckfail-nonode-exception-update-bootupdatefail-nonode-debug-done",
- ]:
- sequences.update({n : "update_node_config_email"})
-
- for n in [ "bminit-cfg-exception-nodehostname-update-debug-done",
- "bminit-cfg-update-exception-nodehostname-update-debug-done",
- ]:
- sequences.update({n : "nodenetwork_email"})
-
- # update_bootcd_email
- for n in ["bminit-cfg-auth-getplc-update-hardware-exception-noblockdev-hardwarerequirefail-update-debug-done",
- "bminit-cfg-auth-getplc-hardware-exception-noblockdev-hardwarerequirefail-update-debug-done",
- "bminit-cfg-auth-getplc-update-hardware-noblockdev-exception-hardwarerequirefail-update-debug-done",
- "bminit-cfg-auth-getplc-hardware-noblockdev-exception-hardwarerequirefail-update-debug-done",
- "bminit-cfg-auth-getplc-hardware-exception-hardwarerequirefail-update-debug-done",
- ]:
- sequences.update({n : "update_bootcd_email"})
-
- for n in [ "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-nofilereference-update-debug-done",
- ]:
- sequences.update({n: "suspect_error_email"})
-
- # update_hardware_email
- sequences.update({"bminit-cfg-auth-getplc-hardware-exception-disktoosmall-hardwarerequirefail-update-debug-done" : "update_hardware_email"})
- sequences.update({"bminit-cfg-auth-getplc-hardware-disktoosmall-exception-hardwarerequirefail-update-debug-done" : "update_hardware_email"})
-
- # broken_hardware_email
- sequences.update({"bminit-cfg-auth-getplc-update-hardware-exception-hardwarerequirefail-update-debug-done" : "broken_hardware_email"})
-
- # bad_dns_email
- for n in [
- "bminit-cfg-update-implementerror-bootupdatefail-dnserror-update-implementerror-bootupdatefail-dnserror-done",
- "bminit-cfg-auth-implementerror-bootcheckfail-dnserror-update-implementerror-bootupdatefail-dnserror-done",
- ]:
- sequences.update( { n : "bad_dns_email"})
+ bms = BootmanSequenceRecord.query.all()
+ for s in bms:
+ sequences[s.sequence] = s.action
+
return sequences
def getDiskSteps(self):
steps = [
('scsierror' , 'SCSI error : <\d+ \d+ \d+ \d+> return code = 0x\d+'),
('ioerror' , 'end_request: I/O error, dev sd\w+, sector \d+'),
- ('ccisserror' , 'cciss: cmd \w+ has CHECK CONDITION byte \w+ = \w+'),
+ ('ccisserror' , 'cciss: cmd \w+ has CHECK CONDITION'),
('buffererror', 'Buffer I/O error on device dm-\d, logical block \d+'),
('bmexceptrmfail', 'Unable to remove directory tree: /tmp/mnt'),
('exception' , 'Exception'),
('nocfg' , 'Found configuration file planet.cnf on floppy, but was unable to parse it.'),
+ ('protoerror2' , '500 Internal Server Error'),
('protoerror' , 'XML RPC protocol error'),
('nodehostname' , 'Configured node hostname does not resolve'),
('implementerror', 'Implementation Error'),
- ('readonlyfs' , '[Errno 30] Read-only file system'),
- ('baddisk' , "IOError: [Errno 13] Permission denied: '/tmp/mnt/sysimg//vservers/\w+/etc/hosts'"),
+ ('fsckabort' , 'is mounted. e2fsck: Cannot continue, aborting'),
+ ('fsckfail' , 'Running e2fsck -v -p /dev/planetlab/root failed'),
+ ('fsckfail2' , 'Running e2fsck -v -p /dev/planetlab/vservers failed'),
+ ('readonlyfs' , '\[Errno 30\] Read-only file system'),
+ ('baddisk' , "IOError: \[Errno 13\] Permission denied: '/tmp/mnt/sysimg//vservers/\w+/etc/hosts'"),
('noinstall' , 'notinstalled'),
('bziperror' , 'bzip2: Data integrity error when decompressing.'),
('noblockdev' , "No block devices detected."),
+ ('missingkernel', "missingkernel"),
('dnserror' , 'Name or service not known'),
+ ('noparseconfig', "Found configuration file plnode.txt on floppy, but was unable to parse it"),
+ ('noconfig' , "Unable to find and read a node configuration file"),
('downloadfail' , 'Unable to download main tarball /boot/bootstrapfs-planetlab-i386.tar.bz2 from server.'),
('disktoosmall' , 'The total usable disk size of all disks is insufficient to be usable as a PlanetLab node.'),
('hardwarerequirefail' , 'Hardware requirements not met'),
('nospace' , "No space left on device"),
('nonode' , 'Failed to authenticate call: No such node'),
('authfail' , 'Failed to authenticate call: Call could not be authenticated'),
- ('bootcheckfail' , 'BootCheckAuthentication'),
- ('bootupdatefail' , 'BootUpdateNode'),
+ ('authfail2' , 'Authentication Failed'),
+ ('bootcheckfail' , 'BootCheckAuthentication'),
+ ('bootupdatefail' , 'BootUpdateNode'),
]
return steps
# NOTE: Nothing works if the bootcd is REALLY old.
# So, this is the first step.
+ bootman_action = "unknown"
+
fbnode = FindbadNodeRecord.get_latest_by(hostname=hostname).to_dict()
recent_actions = sitehist.getRecentActions(hostname=hostname)
if fbnode['observed_category'] == "OLDBOOTCD":
print "\t...Notify owner to update BootImage!!!"
- if not found_within(recent_actions, 'newbootcd_notice', 3):
+ if not found_within(recent_actions, 'newbootcd_notice', 3.5):
sitehist.sendMessage('newbootcd_notice', hostname=hostname)
print "\tDisabling %s due to out-of-date BootImage" % hostname
api.UpdateNode(hostname, {'boot_state' : 'disabled'})
# NOTE: nothing else is possible.
- return True
+ return "disabled"
debugnode = DebugInterface(hostname)
conn = debugnode.getConnection()
- if type(conn) == type(False): return False
+ if type(conn) == type(False): return "connect_failed"
boot_state = conn.get_boot_state()
if boot_state != "debug":
print "... %s in %s state: skipping..." % (hostname , boot_state)
- return boot_state == "boot"
+ return "skipped" #boot_state == "boot"
if conn.bootmanager_running():
print "...BootManager is currently running. Skipping host %s" %hostname
- return True
+ return "skipped" # True
# Read persistent flags, tagged on one week intervals.
print "...Should investigate. Skipping node."
# TODO: send message related to these errors.
- if not found_within(recent_actions, 'baddisk_notice', 3):
+ if not found_within(recent_actions, 'baddisk_notice', 7):
+ print "baddisk_notice not found recently"
log=conn.get_dmesg().read()
sitehist.sendMessage('baddisk_notice', hostname=hostname, log=log)
- conn.set_nodestate('disabled')
+ return "skipping_baddisk"
+ else:
+ # NOTE: "" does not add a new action record
+ return ""
- return False
print "...Downloading bm.log from %s" %hostname
log = conn.get_bootmanager_log()
+ bm_log_data = log.read() # get data
+ log.seek(0) # reset fd pointer for fdspawn
child = fdpexpect.fdspawn(log)
- if hasattr(config, 'collect') and config.collect: return True
+ if hasattr(config, 'collect') and config.collect: return "collect"
if config and not config.quiet: print "...Scanning bm.log for errors"
args = {}
args['hostname'] = hostname
args['sequence'] = s
- args['bmlog'] = conn.get_bootmanager_log().read()
+ args['bmlog'] = bm_log_data
args['viart'] = False
+ args['saveact'] = True
+ args['ccemail'] = True
sitehist.sendMessage('unknownsequence_notice', **args)
conn.restart_bootmanager('boot')
+ bootman_action = "restart_bootmanager"
+
# NOTE: Do not set the pflags value for this sequence if it's unknown.
# This way, we can check it again after we've fixed it.
flag_set = False
else:
+ bootman_action = sequences[s]
if sequences[s] == "restart_bootmanager_boot":
print "...Restarting BootManager.py on %s "%hostname
conn.restart_node('reinstall')
elif sequences[s] == "restart_node_boot":
conn.restart_node('boot')
+ elif sequences[s] == "fsck_repair":
+ conn.fsck_repair_node()
elif sequences[s] == "repair_node_keys":
if conn.compare_and_repair_nodekeys():
# the keys either are in sync or were forced in sync.
- # so try to reboot the node again.
- # TODO: why was this originally 'reinstall' instead of 'boot'??
- conn.restart_bootmanager('boot')
- pass
+ # so try to start BM again.
+ conn.restart_bootmanager(conn.get_nodestate())
else:
# there was some failure to synchronize the keys.
print "...Unable to repair node keys on %s" %hostname
+ if not found_within(recent_actions, 'nodeconfig_notice', 3.5):
+ args = {}
+ args['hostname'] = hostname
+ sitehist.sendMessage('nodeconfig_notice', **args)
+ conn.dump_plconf_file()
+ else:
+ # NOTE: do not add a new action record
+ return ""
- elif sequences[s] == "suspect_error_email":
+ elif sequences[s] == "unknownsequence_notice":
args = {}
args['hostname'] = hostname
args['sequence'] = s
- args['bmlog'] = conn.get_bootmanager_log().read()
+ args['bmlog'] = bm_log_data
args['viart'] = False
+ args['saveact'] = True
+ args['ccemail'] = True
sitehist.sendMessage('unknownsequence_notice', **args)
conn.restart_bootmanager('boot')
- # TODO: differentiate this and the 'nodenetwork_email' actions.
- elif sequences[s] == "update_node_config_email":
+ elif sequences[s] == "nodeconfig_notice":
- if not found_within(recent_actions, 'nodeconfig_notice', 3):
+ if not found_within(recent_actions, 'nodeconfig_notice', 3.5):
args = {}
args['hostname'] = hostname
sitehist.sendMessage('nodeconfig_notice', **args)
conn.dump_plconf_file()
+ else:
+ # NOTE: do not add a new action record
+ return ""
elif sequences[s] == "nodenetwork_email":
- if not found_within(recent_actions, 'nodeconfig_notice', 3):
+ if not found_within(recent_actions, 'nodeconfig_notice', 3.5):
args = {}
args['hostname'] = hostname
- args['bmlog'] = conn.get_bootmanager_log().read()
+ args['bmlog'] = bm_log_data
sitehist.sendMessage('nodeconfig_notice', **args)
conn.dump_plconf_file()
+ else:
+ # NOTE: do not add a new action record
+ return ""
- elif sequences[s] == "update_bootcd_email":
+ elif sequences[s] == "noblockdevice_notice":
- if not found_within(recent_actions, 'newalphacd_notice', 3):
+ if not found_within(recent_actions, 'noblockdevice_notice', 3.5):
args = {}
- args.update(getconf.getconf(hostname)) # NOTE: Generates boot images for the user:
+ #args.update(getconf.getconf(hostname)) # NOTE: Generates boot images for the user:
args['hostname'] = hostname
- sitehist.sendMessage('newalphacd_notice', **args)
-
- print "\tDisabling %s due to out-of-date BOOTCD" % hostname
+ sitehist.sendMessage('noblockdevice_notice', **args)
+ else:
+ # NOTE: do not add a new action record
+ return ""
- elif sequences[s] == "broken_hardware_email":
+ elif sequences[s] == "baddisk_notice":
# MAKE An ACTION record that this host has failed hardware. May
# require either an exception "/minhw" or other manual intervention.
# Definitely need to send out some more EMAIL.
# TODO: email notice of broken hardware
- if not found_within(recent_actions, 'baddisk_notice', 1):
+ if not found_within(recent_actions, 'baddisk_notice', 7):
print "...NOTIFYING OWNERS OF BROKEN HARDWARE on %s!!!" % hostname
args = {}
args['hostname'] = hostname
args['log'] = conn.get_dmesg().read()
sitehist.sendMessage('baddisk_notice', **args)
- conn.set_nodestate('disabled')
+ #conn.set_nodestate('disabled')
+ else:
+ # NOTE: do not add a new action record
+ return ""
- elif sequences[s] == "update_hardware_email":
- if not found_within(recent_actions, 'minimalhardware_notice', 1):
+ elif sequences[s] == "minimalhardware_notice":
+ if not found_within(recent_actions, 'minimalhardware_notice', 7):
print "...NOTIFYING OWNERS OF MINIMAL HARDWARE FAILURE on %s!!!" % hostname
args = {}
args['hostname'] = hostname
- args['bmlog'] = conn.get_bootmanager_log().read()
+ args['bmlog'] = bm_log_data
sitehist.sendMessage('minimalhardware_notice', **args)
+ else:
+ # NOTE: do not add a new action record
+ return ""
- elif sequences[s] == "bad_dns_email":
+ elif sequences[s] == "baddns_notice":
if not found_within(recent_actions, 'baddns_notice', 1):
print "...NOTIFYING OWNERS OF DNS FAILURE on %s!!!" % hostname
args = {}
print traceback.print_exc()
# TODO: api error. skip email, b/c all info is not available,
# flag_set will not be recorded.
- return False
+ return "exception"
nodenet_str = network_config_to_str(net)
args['hostname'] = hostname
args['interface_id'] = net['interface_id']
sitehist.sendMessage('baddns_notice', **args)
+ else:
+ # NOTE: do not add a new action record
+ return ""
- return True
+ return bootman_action
# MAIN -------------------------------------------------------------------