clearer names for actions, and infer actions better
[monitor.git] / monitor / bootman.py
index effd750..9754218 100755 (executable)
@@ -2,8 +2,6 @@
 
 # Attempt to reboot a node in debug state.
 
-
-
 import os
 import sys
 import time
@@ -13,8 +11,7 @@ import traceback
 import subprocess
 from sets import Set
 
-from monitor.getsshkeys import SSHKnownHosts
-
+from monitor.util.sshknownhosts import SSHKnownHosts
 from monitor.Rpyc import SocketConnection, Async
 from monitor.Rpyc.Utils import *
 
@@ -36,19 +33,44 @@ from pcucontrol.transports.ssh import pxssh as pxssh
 from pcucontrol.transports.ssh import fdpexpect as fdpexpect
 from pcucontrol.transports.ssh import pexpect as pexpect
 
-
-
 api = plc.getAuthAPI()
 fb = None
 
+def bootmanager_log_name(hostname):
+       t_stamp = time.strftime("%Y-%m-%d-%H:%M")
+       base_filename = "%s-bm.%s.log" % (t_stamp, hostname)
+       short_target_filename = os.path.join('history', base_filename)
+       return short_target_filename
+
+def bootmanager_log_action(hostname, short_log_path, logtype="bm.log"):
+       try:
+               node = FindbadNodeRecord.get_latest_by(hostname=hostname)
+               loginbase = PlcSite.query.get(node.plc_node_stats['site_id']).plc_site_stats['login_base']
+               err = ""
+       except:
+               loginbase = "unknown"
+               err = traceback.format_exc()
+
+       act = ActionRecord(loginbase=loginbase,
+                                               hostname=hostname,
+                                               action='log',
+                                               action_type=logtype,
+                                               log_path=short_log_path,
+                                               error_string=err)
+       return
+       
+
+class ExceptionDoubleSSHError(Exception): pass
 
 class NodeConnection:
        def __init__(self, connection, node, config):
+               print "init nodeconnection"
                self.node = node
                self.c = connection
                self.config = config
 
        def get_boot_state(self):
+               print "get_boot_state(self)"
                try:
                        if self.c.modules.os.path.exists('/tmp/source'):
                                return "debug"
@@ -66,16 +88,20 @@ class NodeConnection:
                return "unknown"
 
        def get_dmesg(self):
+               t_stamp = time.strftime("%Y-%m-%d-%H:%M")
                self.c.modules.os.system("dmesg > /var/log/dmesg.bm.log")
-               download(self.c, "/var/log/dmesg.bm.log", "log/dmesg.%s.log" % self.node)
-               log = open("log/dmesg.%s.log" % self.node, 'r')
+               download(self.c, "/var/log/dmesg.bm.log", "%s/history/%s-dmesg.%s.log" % (config.MONITOR_BOOTMANAGER_LOG, t_stamp, self.node))
+               os.system("cp %s/history/%s-dmesg.%s.log %s/dmesg.%s.log" % (config.MONITOR_BOOTMANAGER_LOG, t_stamp, self.node, config.MONITOR_BOOTMANAGER_LOG, self.node))
+               log = open("%s/dmesg.%s.log" % (config.MONITOR_BOOTMANAGER_LOG, self.node), 'r')
                return log
 
        def get_bootmanager_log(self):
-               download(self.c, "/tmp/bm.log", "log/bm.%s.log.gz" % self.node)
-               #os.system("zcat log/bm.%s.log.gz > log/bm.%s.log" % (self.node, self.node))
-               os.system("cp log/bm.%s.log.gz log/bm.%s.log" % (self.node, self.node))
-               log = open("log/bm.%s.log" % self.node, 'r')
+               bm_name = bootmanager_log_name(self.node)
+               download(self.c, "/tmp/bm.log", "%s/%s" % (config.MONITOR_BOOTMANAGER_LOG, bm_name))
+               #email_exception(self.node, "collected BM log for %s" % self.node)
+               bootmanager_log_action(self.node, bm_name, "collected_bm.log")
+               os.system("cp %s/%s %s/bm.%s.log" % (config.MONITOR_BOOTMANAGER_LOG, bm_name, config.MONITOR_BOOTMANAGER_LOG, self.node))
+               log = open("%s/bm.%s.log" % (config.MONITOR_BOOTMANAGER_LOG, self.node), 'r')
                return log
 
        def dump_plconf_file(self):
@@ -103,7 +129,44 @@ class NodeConnection:
                                print key, " == ", bm.VARS[key]
                else:
                        print "   Unable to read Node Configuration"
+
+       def fprobe_repair_node(self):
+               # When fprobe data gets too much, it fills the root partition and
+               # fails to boot
+               c = self.c
+               self.c.modules.sys.path.append("/tmp/source/")
+
+               # NOTE: assume that the root fs is already mounted...
+               if self.c.modules.os.path.exists('/tmp/mnt/sysimg/var/local/fprobe'):
+                       print "CLEARING FPROBE DATA on %s" % self.node
+                       self.c.modules.os.chdir('/tmp/mnt/sysimg/var/local/fprobe')
+                       cmd = """ ls -lrt . | awk '{if (i<NR/2 && $9) {print "rm "$9;i=i+1;}}' | sh """
+                       self.c.modules.os.system(cmd)
+               else:
+                       print "COULD NOT CLEAR FPROBE DATA on %s" % self.node
                
+       def fsck_repair_node(self):
+               c = self.c
+               self.c.modules.sys.path.append("/tmp/source/")
+               self.c.modules.os.chdir('/tmp/source')
+               # TODO: restart
+               # TODO: set boot state to node's actually boot state.
+               # could be 'boot' or 'safeboot'
+               self.c.modules.os.chdir('/tmp/source')
+               if self.c.modules.os.path.exists('/tmp/BM_RUNNING'):
+                       print "Running MANUAL FSCK already... try again soon."
+               else:
+                       print "Running MANUAL fsck on %s" % self.node
+                       cmd = "( touch /tmp/BM_RUNNING ;  " + \
+                                 "  fsck -v -f -y /dev/planetlab/root &> out.fsck ; " + \
+                                 "  fsck -v -f -y /dev/planetlab/vservers >> out.fsck 2>&1 ; " + \
+                                 "  python ./BootManager.py %s &> server.log < /dev/null ; " + \
+                                 "  rm -f /tmp/BM_RUNNING " + \
+                                 ") &" 
+                       cmd = cmd % self.get_nodestate()
+                       self.c.modules.os.system(cmd)
+               #self.restart_bootmanager('boot')       
+               pass
 
        def compare_and_repair_nodekeys(self):
                c = self.c
@@ -155,6 +218,16 @@ class NodeConnection:
        def set_nodestate(self, state='boot'):
                return api.UpdateNode(self.node, {'boot_state' : state})
 
+       def get_nodestate(self):
+               try:
+                       return api.GetNodes(self.node, ['boot_state'])[0]['boot_state']
+               except:
+                       traceback.print_exc()
+                       # NOTE: use last cached value from plc
+                       fbnode = FindbadNodeRecord.get_latest_by(hostname=self.node).to_dict()
+                       return fbnode['plc_node_stats']['boot_state']
+
+
        def restart_node(self, state='boot'):
                api.UpdateNode(self.node, {'boot_state' : state})
 
@@ -205,13 +278,15 @@ class PlanetLabSession:
                self.setup_host()
 
        def get_connection(self, config):
-               conn = NodeConnection(SocketConnection("localhost", self.port), self.node, config)
-               #i = 0
-               #while i < 3: 
-               #       print i, conn.c.modules.sys.path
-               #       print conn.c.modules.os.path.exists('/tmp/source')
-               #       i+=1
-               #       time.sleep(1)
+               try:
+                       print "SocketConnection(localhost, %s" % self.port
+                       sc = SocketConnection("localhost", self.port)
+                       print "NodeConnection(%s, %s)" % (sc, self.node)
+                       conn = NodeConnection(sc, self.node, config)
+               except:
+                       # NOTE: try twice since this can sometimes fail the first time. If
+                       #               it fails again, let it go.
+                       conn = NodeConnection(SocketConnection("localhost", self.port), self.node, config)
                return conn
        
        def setup_host(self):
@@ -230,7 +305,8 @@ class PlanetLabSession:
                        return 
 
                # COPY Rpyc files to host
-               cmd = "rsync -qv -az -e ssh %(monitordir)s/Rpyc/ %(user)s@%(hostname)s:Rpyc 2> /dev/null" % args
+               #cmd = "rsync -vvv -az -e ssh %(monitordir)s/Rpyc/ %(user)s@%(hostname)s:Rpyc 2> /dev/null" % args
+               cmd = """rsync -vvv -az -e "ssh -o BatchMode=yes" %(monitordir)s/monitor/Rpyc/ %(user)s@%(hostname)s:Rpyc""" % args
                if self.verbose: print cmd
                print cmd
                # TODO: Add timeout
@@ -243,12 +319,14 @@ class PlanetLabSession:
                        print "\tUNKNOWN SSH KEY FOR %s; making an exception" % self.node
                        #print "MAKE EXPLICIT EXCEPTION FOR %s" % self.node
                        k = SSHKnownHosts(); k.updateDirect(self.node); k.write(); del k
+                       print "trying: ", cmd
+                       print [ "%s=%s" % (a, os.environ[a]) for a in filter(lambda x: 'SSH' in x, os.environ.keys()) ]
                        ret = localos.system(cmd, timeout)
                        print ret
                        if ret != 0:
                                print "\tFAILED TWICE"
-                               #sys.exit(1)
-                               raise Exception("Failed twice trying to login with updated ssh host key")
+                               #email_exception("%s rsync failed twice" % self.node)
+                               raise ExceptionDoubleSSHError("Failed twice trying to login with updated ssh host key")
 
                t1 = time.time()
                # KILL any already running servers.
@@ -256,6 +334,7 @@ class PlanetLabSession:
                (ov,ev) = ssh.run_noexcept2("""<<\EOF
             rm -f out.log
             echo "kill server" >> out.log
+                       netstat -ap | grep python | grep 18812 | awk '{print $7}' | awk -F / '{print $1}' | xargs kill
             ps ax | grep Rpyc | grep -v grep | awk '{print $1}' | xargs kill 2> /dev/null ; 
             echo "export" >> out.log
             export PYTHONPATH=$HOME  ;
@@ -263,19 +342,6 @@ class PlanetLabSession:
             python Rpyc/Servers/forking_server.py &> server.log &
             echo "done" >> out.log
 EOF""")
-               #cmd = """ssh %(user)s@%(hostname)s """ + \
-               #        """'ps ax | grep Rpyc | grep -v grep | awk "{print \$1}" | xargs kill 2> /dev/null' """
-               #cmd = cmd % args
-               #if self.verbose: print cmd
-               ## TODO: Add timeout
-               #print localos.system(cmd,timeout)
-
-               ## START a new rpyc server.
-               #cmd = """ssh -n %(user)s@%(hostname)s "export PYTHONPATH=\$HOME; """ + \
-               #        """python Rpyc/Servers/forking_server.py &> server.log < /dev/null &" """ 
-               #cmd = cmd % args
-               #if self.verbose: print cmd
-               #print localos.system(cmd,timeout)
                print "setup rpyc server over ssh"
                print ssh.ret
 
@@ -341,30 +407,41 @@ class DebugInterface:
                        print traceback.print_exc()
                        return False
 
+               msg = "ERROR setting up session for %s" % self.hostname
                try:
                        if config == None:
                                self.session = PlanetLabSession(self.hostname, False, True)
                        else:
                                self.session = PlanetLabSession(self.hostname, config.nosetup, config.verbose)
-               except Exception, e:
-                       msg = "ERROR setting up session for %s" % self.hostname
+               except ExceptionDoubleSSHError, e:
                        print msg
+                       return False
+               except Exception, e:
                        traceback.print_exc()
                        email_exception(msg)
                        return False
 
+               print "Getting connection: 1st try"
                try:
                        conn = self.session.get_connection(config)
                except EOFError:
                        # NOTE: sometimes the wait in setup_host() is not long enough.  
                        # So, here we try to wait a little longer before giving up entirely.
                        try:
+                               print "Getting connection: 2nd try"
                                time.sleep(self.session.timeout*5)
                                conn = self.session.get_connection(config)
+                       except EOFError:
+                               # failed twice... no need to report this really, it's just in a
+                               # weird state...
+                               print "Getting connection: failed"
+                               email_exception(self.hostname, "failed twice to get connection")
+                               return False
                        except:
                                traceback.print_exc()
                                email_exception(self.hostname)
                                return False
+               print "Getting connection: ok"
                #print "trying to use conn before returning it."
                #print conn.c.modules.sys.path
                #print conn.c.modules.os.path.exists('/tmp/source')
@@ -375,131 +452,22 @@ class DebugInterface:
 
        def getSequences(self):
 
-               # TODO: This can be replaced with a DB definition at a future time.
-               #               This would make it possible for an admin to introduce new
-               #               patterns without touching code.
-               
+               # NOTE: The DB is now the autoritative record for all BM sequences. 
+               #               An admin can introduce new patterns and actions without touching code.
                sequences = {}
-               # restart_bootmanager_boot
-               for n in ["bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-update3-disk-update4-done",
-                               "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-disk-update4-update3-exception-protoerror-update-protoerror-debug-done",
-                               "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-disk-update4-update3-update3-implementerror-bootupdatefail-update-debug-done",
-
-                               "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-disk-update4-update3-update3-exception-protoerror-update-protoerror-debug-done",
-
-                               "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-disk-update4-update3-exception-protoerror-update-debug-done",
-                               "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-disk-update4-update3-exception-chrootfail-update-debug-done",
-                               "bminit-cfg-auth-getplc-update-debug-done",
-                               "bminit-cfg-auth-getplc-exception-protoerror-update-protoerror-debug-done",
-                               "bminit-cfg-auth-protoerror-exception-update-protoerror-debug-done",
-                               "bminit-cfg-auth-protoerror-exception-update-bootupdatefail-authfail-debug-done",
-                               "bminit-cfg-auth-protoerror-exception-update-debug-done",
-                               "bminit-cfg-auth-getplc-exception-protoerror-update-debug-done",
-                               "bminit-cfg-auth-getplc-implementerror-update-debug-done",
-                               ]:
-                       sequences.update({n : "restart_bootmanager_boot"})
-
-               #       conn.restart_bootmanager('rins')
-               for n in [ "bminit-cfg-auth-getplc-installinit-validate-exception-modulefail-update-debug-done",
-                               "bminit-cfg-auth-getplc-update-installinit-validate-exception-modulefail-update-debug-done",
-                               "bminit-cfg-auth-getplc-installinit-validate-bmexceptmount-exception-noinstall-update-debug-done",
-                               "bminit-cfg-auth-getplc-update-installinit-validate-bmexceptmount-exception-noinstall-update-debug-done",
-                               "bminit-cfg-auth-getplc-installinit-validate-bmexceptvgscan-exception-noinstall-update-debug-done",
-                               "bminit-cfg-auth-getplc-update-installinit-validate-exception-noinstall-update-debug-done",
-                               "bminit-cfg-auth-getplc-hardware-installinit-installdisk-bziperror-exception-update-debug-done",
-                               "bminit-cfg-auth-getplc-update-hardware-installinit-installdisk-installbootfs-exception-update-debug-done",
-                               "bminit-cfg-auth-getplc-update-installinit-validate-bmexceptvgscan-exception-noinstall-update-debug-done",
-                               "bminit-cfg-auth-getplc-hardware-installinit-installdisk-installbootfs-exception-update-debug-done",
-                               "bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-nofilereference-update-debug-done",
-                               "bminit-cfg-auth-getplc-update-hardware-installinit-installdisk-exception-mkfsfail-update-debug-done",
-                               "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-exception-chrootfail-update-debug-done",
-                               "bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-disk-update4-exception-chrootfail-update-debug-done",
-                               "bminit-cfg-auth-getplc-update-hardware-installinit-installdisk-installbootfs-installcfg-installstop-update-installinit-validate-rebuildinitrd-netcfg-disk-update4-update3-update3-kernelcopyfail-exception-update-debug-done",
-                               "bminit-cfg-auth-getplc-hardware-installinit-installdisk-installbootfs-installcfg-installstop-update-installinit-validate-rebuildinitrd-netcfg-disk-update4-update3-update3-kernelcopyfail-exception-update-debug-done",
-                               "bminit-cfg-auth-getplc-installinit-validate-exception-noinstall-update-debug-done",
-                               # actual solution appears to involve removing the bad files, and
-                               # continually trying to boot the node.
-                               "bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-disk-update4-update3-update3-implementerror-update-debug-done",
-                               "bminit-cfg-auth-getplc-installinit-validate-exception-bmexceptmount-exception-noinstall-update-debug-done",
-                               "bminit-cfg-auth-getplc-update-installinit-validate-exception-bmexceptmount-exception-noinstall-update-debug-done",
-                               ]:
-                       sequences.update({n : "restart_bootmanager_rins"})
-
-               # repair_node_keys
-               sequences.update({"bminit-cfg-auth-bootcheckfail-authfail-exception-update-bootupdatefail-authfail-debug-done": "repair_node_keys"})
-
-               #   conn.restart_node('rins')
-               for n in ["bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-exception-chrootfail-update-debug-done",
-                               "bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-update3-disk-update4-exception-chrootfail-update-debug-done",
-                               "bminit-cfg-auth-getplc-hardware-installinit-installdisk-installbootfs-installcfg-exception-chrootfail-update-debug-done",
-                               "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-writeerror-exception-chrootfail-update-debug-done",
-                               "bminit-cfg-auth-getplc-update-hardware-installinit-exception-bmexceptrmfail-update-debug-done",
-                               "bminit-cfg-auth-getplc-hardware-installinit-exception-bmexceptrmfail-update-debug-done",
-                               "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-disk-update4-update3-implementerror-bootupdatefail-update-debug-done",
-                               "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-readonlyfs-update-debug-done",
-                               "bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-update3-nospace-exception-update-debug-done",
-                               "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-nospace-update-debug-done",
-                               "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-update-debug-done",
-                               "bminit-cfg-auth-getplc-update-hardware-installinit-installdisk-installbootfs-exception-downloadfail-update-debug-done",
-                               ]:
-                       sequences.update({n : "restart_node_rins"})
-
-               #       restart_node_boot
-               for n in ["bminit-cfg-auth-getplc-implementerror-bootupdatefail-update-debug-done",
-                                "bminit-cfg-auth-implementerror-bootcheckfail-update-debug-done",
-                                "bminit-cfg-auth-implementerror-bootcheckfail-update-implementerror-bootupdatefail-done",
-                                "bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-nospace-update-debug-done",
-                                "bminit-cfg-auth-getplc-hardware-installinit-installdisk-installbootfs-exception-downloadfail-update-debug-done",
-                                "bminit-cfg-auth-getplc-update-installinit-validate-implementerror-update-debug-done",
-                                ]:
-                       sequences.update({n: "restart_node_boot"})
-
-               # update_node_config_email
-               for n in ["bminit-cfg-exception-nocfg-update-bootupdatefail-nonode-debug-done",
-                                 "bminit-cfg-exception-update-bootupdatefail-nonode-debug-done",
-                                 "bminit-cfg-auth-bootcheckfail-nonode-exception-update-bootupdatefail-nonode-debug-done",
-                               ]:
-                       sequences.update({n : "update_node_config_email"})
-
-               for n in [ "bminit-cfg-exception-nodehostname-update-debug-done", 
-                                  "bminit-cfg-update-exception-nodehostname-update-debug-done", 
-                               ]:
-                       sequences.update({n : "nodenetwork_email"})
-
-               # update_bootcd_email
-               for n in ["bminit-cfg-auth-getplc-update-hardware-exception-noblockdev-hardwarerequirefail-update-debug-done",
-                               "bminit-cfg-auth-getplc-hardware-exception-noblockdev-hardwarerequirefail-update-debug-done",
-                               "bminit-cfg-auth-getplc-update-hardware-noblockdev-exception-hardwarerequirefail-update-debug-done",
-                               "bminit-cfg-auth-getplc-hardware-noblockdev-exception-hardwarerequirefail-update-debug-done",
-                               "bminit-cfg-auth-getplc-hardware-exception-hardwarerequirefail-update-debug-done",
-                               ]:
-                       sequences.update({n : "update_bootcd_email"})
-
-               for n in [ "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-nofilereference-update-debug-done",
-                               ]:
-                       sequences.update({n: "suspect_error_email"})
-
-               # update_hardware_email
-               sequences.update({"bminit-cfg-auth-getplc-hardware-exception-disktoosmall-hardwarerequirefail-update-debug-done" : "update_hardware_email"})
-               sequences.update({"bminit-cfg-auth-getplc-hardware-disktoosmall-exception-hardwarerequirefail-update-debug-done" : "update_hardware_email"})
-
-               # broken_hardware_email
-               sequences.update({"bminit-cfg-auth-getplc-update-hardware-exception-hardwarerequirefail-update-debug-done" : "broken_hardware_email"})
-
-               # bad_dns_email
-               for n in [ 
-                "bminit-cfg-update-implementerror-bootupdatefail-dnserror-update-implementerror-bootupdatefail-dnserror-done",
-                       "bminit-cfg-auth-implementerror-bootcheckfail-dnserror-update-implementerror-bootupdatefail-dnserror-done",
-                       ]:
-                       sequences.update( { n : "bad_dns_email"})
 
+               bms = BootmanSequenceRecord.query.all()
+               for s in bms:
+                       sequences[s.sequence] = s.action
+               
                return sequences
 
        def getDiskSteps(self):
                steps = [
+                       ('scsierror2' , 'sd \d:\d:\d:\d: ioctl_internal_command return code = \d+'),
                        ('scsierror'  , 'SCSI error : <\d+ \d+ \d+ \d+> return code = 0x\d+'),
                        ('ioerror'    , 'end_request: I/O error, dev sd\w+, sector \d+'),
-                       ('ccisserror' , 'cciss: cmd \w+ has CHECK CONDITION  byte \w+ = \w+'),
+                       ('ccisserror' , 'cciss: cmd \w+ has CHECK CONDITION'),
 
                        ('buffererror', 'Buffer I/O error on device dm-\d, logical block \d+'),
 
@@ -570,15 +538,22 @@ class DebugInterface:
                        ('bmexceptrmfail', 'Unable to remove directory tree: /tmp/mnt'),
                        ('exception'    , 'Exception'),
                        ('nocfg'        , 'Found configuration file planet.cnf on floppy, but was unable to parse it.'),
+                       ('protoerror2'  , '500 Internal Server Error'),
                        ('protoerror'   , 'XML RPC protocol error'),
                        ('nodehostname' , 'Configured node hostname does not resolve'),
                        ('implementerror', 'Implementation Error'),
-                       ('readonlyfs'   , '[Errno 30] Read-only file system'),
-                       ('baddisk'      , "IOError: [Errno 13] Permission denied: '/tmp/mnt/sysimg//vservers/\w+/etc/hosts'"),
+                       ('fsckabort'    , 'is mounted.  e2fsck: Cannot continue, aborting'),
+                       ('fsckfail'             , 'Running e2fsck -v -p /dev/planetlab/root failed'),
+                       ('fsckfail2'    , 'Running e2fsck -v -p /dev/planetlab/vservers failed'),
+                       ('readonlyfs'   , '\[Errno 30\] Read-only file system'),
+                       ('baddisk'      , "IOError: \[Errno 13\] Permission denied: '/tmp/mnt/sysimg//vservers/\w+/etc/hosts'"),
                        ('noinstall'    , 'notinstalled'),
                        ('bziperror'    , 'bzip2: Data integrity error when decompressing.'),
                        ('noblockdev'   , "No block devices detected."),
+                       ('missingkernel', "missingkernel"),
                        ('dnserror'     , 'Name or service not known'),
+                       ('noparseconfig', "Found configuration file plnode.txt on floppy, but was unable to parse it"),
+                       ('noconfig'             , "Unable to find and read a node configuration file"),
                        ('downloadfail' , 'Unable to download main tarball /boot/bootstrapfs-planetlab-i386.tar.bz2 from server.'),
                        ('disktoosmall' , 'The total usable disk size of all disks is insufficient to be usable as a PlanetLab node.'),
                        ('hardwarerequirefail' , 'Hardware requirements not met'),
@@ -591,8 +566,9 @@ class DebugInterface:
                        ('nospace'      , "No space left on device"),
                        ('nonode'       , 'Failed to authenticate call: No such node'),
                        ('authfail'     , 'Failed to authenticate call: Call could not be authenticated'),
-                       ('bootcheckfail'     , 'BootCheckAuthentication'),
-                       ('bootupdatefail'   , 'BootUpdateNode'),
+                       ('authfail2'    , 'Authentication Failed'),
+                       ('bootcheckfail'  , 'BootCheckAuthentication'),
+                       ('bootupdatefail' , 'BootUpdateNode'),
                ]
                return steps
 
@@ -612,47 +588,45 @@ class DebugInterface:
 
                return sequence
                
-
 def restore(sitehist, hostname, config=None, forced_action=None):
+       ret = restore_basic(sitehist, hostname, config, forced_action)
+       session.flush()
+       return ret
+
+def restore_basic(sitehist, hostname, config=None, forced_action=None):
 
        # NOTE: Nothing works if the bootcd is REALLY old.
        #       So, this is the first step.
 
+       bootman_action = "unknown"
+
        fbnode = FindbadNodeRecord.get_latest_by(hostname=hostname).to_dict()
        recent_actions = sitehist.getRecentActions(hostname=hostname)
 
        if fbnode['observed_category'] == "OLDBOOTCD":
                print "\t...Notify owner to update BootImage!!!"
 
-               if not found_within(recent_actions, 'newbootcd_notice', 3):
+               if not found_within(recent_actions, 'newbootcd_notice', 3.5):
                        sitehist.sendMessage('newbootcd_notice', hostname=hostname)
 
                        print "\tDisabling %s due to out-of-date BootImage" % hostname
-                       api.UpdateNode(hostname, {'boot_state' : 'disable'})
+                       api.UpdateNode(hostname, {'boot_state' : 'disabled'})
 
                # NOTE: nothing else is possible.
-               return True
+               return "disabled"
 
        debugnode = DebugInterface(hostname)
        conn = debugnode.getConnection()
-       #print "conn: %s" % conn
-       #print "trying to use conn after returning it."
-       #print conn.c.modules.sys.path
-       #print conn.c.modules.os.path.exists('/tmp/source')
-       if type(conn) == type(False): return False
-
-       #if forced_action == "reboot":
-       #       conn.restart_node('rins')
-       #       return True
+       if type(conn) == type(False): return "connect_failed"
 
        boot_state = conn.get_boot_state()
        if boot_state != "debug":
                print "... %s in %s state: skipping..." % (hostname , boot_state)
-               return boot_state == "boot"
+               return "skipped" #boot_state == "boot"
 
        if conn.bootmanager_running():
                print "...BootManager is currently running.  Skipping host %s" %hostname 
-               return True
+               return "skipped" # True
 
        # Read persistent flags, tagged on one week intervals.
 
@@ -674,19 +648,24 @@ def restore(sitehist, hostname, config=None, forced_action=None):
                        print "...Should investigate.  Skipping node."
                        # TODO: send message related to these errors.
 
-                       if not found_within(recent_actions, 'newbootcd_notice', 3):
+                       if not found_within(recent_actions, 'baddisk_notice', 7):
+                               print "baddisk_notice not found recently"
 
                                log=conn.get_dmesg().read()
                                sitehist.sendMessage('baddisk_notice', hostname=hostname, log=log)
-                               conn.set_nodestate('disable')
+                               return "skipping_baddisk"
+                       else:
+                               # NOTE: "" does not add a new action record
+                               return ""
 
-                       return False
 
        print "...Downloading bm.log from %s" %hostname 
        log = conn.get_bootmanager_log()
+       bm_log_data = log.read() # get data
+       log.seek(0)     # reset fd pointer for fdspawn
        child = fdpexpect.fdspawn(log)
 
-       if hasattr(config, 'collect') and config.collect: return True
+       if hasattr(config, 'collect') and config.collect: return "collect"
 
        if config and not config.quiet: print "...Scanning bm.log for errors"
 
@@ -713,164 +692,157 @@ def restore(sitehist, hostname, config=None, forced_action=None):
                args = {}
                args['hostname'] = hostname
                args['sequence'] = s
-               args['bmlog'] = conn.get_bootmanager_log().read()
+               args['bmlog'] = bm_log_data
                args['viart'] = False
+               args['saveact'] = True
+               args['ccemail'] = True
 
-               sitehist.sendMessage('unknownsequence_notice', **args)
+               if 'nospace' in s:
+                       # NOTE: sequence is unknown and contains nospace, so try the
+                       # fprobe repair trick first.
+                       conn.fprobe_repair_node()
 
+               sitehist.sendMessage('unknownsequence_notice', **args)
                conn.restart_bootmanager('boot')
+               bootman_action = "restart_bootmanager"
 
                # NOTE: Do not set the pflags value for this sequence if it's unknown.
                # This way, we can check it again after we've fixed it.
                flag_set = False
 
        else:
+               bootman_action = sequences[s]
 
                if   sequences[s] == "restart_bootmanager_boot":
                        print "...Restarting BootManager.py on %s "%hostname 
                        conn.restart_bootmanager('boot')
                elif sequences[s] == "restart_bootmanager_rins":
                        print "...Restarting BootManager.py on %s "%hostname 
-                       conn.restart_bootmanager('rins')
+                       conn.restart_bootmanager('reinstall')
                elif sequences[s] == "restart_node_rins":
-                       conn.restart_node('rins')
+                       conn.restart_node('reinstall')
                elif sequences[s] == "restart_node_boot":
                        conn.restart_node('boot')
+               elif sequences[s] == "fsck_repair":
+                       conn.fsck_repair_node()
                elif sequences[s] == "repair_node_keys":
                        if conn.compare_and_repair_nodekeys():
                                # the keys either are in sync or were forced in sync.
-                               # so try to reboot the node again.
-                               conn.restart_bootmanager('rins')
-                               pass
+                               # so try to start BM again.
+                               conn.restart_bootmanager(conn.get_nodestate())
                        else:
                                # there was some failure to synchronize the keys.
                                print "...Unable to repair node keys on %s" %hostname 
+                               if not found_within(recent_actions, 'nodeconfig_notice', 3.5):
+                                       args = {}
+                                       args['hostname'] = hostname
+                                       sitehist.sendMessage('nodeconfig_notice', **args)
+                                       conn.dump_plconf_file()
+                               else:
+                                       # NOTE: do not add a new action record
+                                       return ""
 
-               elif sequences[s] == "suspect_error_email":
+               elif sequences[s] == "unknownsequence_notice":
                        args = {}
                        args['hostname'] = hostname
                        args['sequence'] = s
-                       args['bmlog'] = conn.get_bootmanager_log().read()
+                       args['bmlog'] = bm_log_data
                        args['viart'] = False
+                       args['saveact'] = True
+                       args['ccemail'] = True
 
                        sitehist.sendMessage('unknownsequence_notice', **args)
                        conn.restart_bootmanager('boot')
 
-               # TODO: differentiate this and the 'nodenetwork_email' actions.
-               elif sequences[s] == "update_node_config_email":
+               elif sequences[s] == "nodeconfig_notice":
 
-                       if not found_within(recent_actions, 'nodeconfig_notice', 3):
+                       if not found_within(recent_actions, 'nodeconfig_notice', 3.5):
                                args = {}
                                args['hostname'] = hostname
                                sitehist.sendMessage('nodeconfig_notice', **args)
                                conn.dump_plconf_file()
+                       else:
+                               # NOTE: do not add a new action record
+                               return ""
 
                elif sequences[s] == "nodenetwork_email":
 
-                       if not found_within(recent_actions, 'nodeconfig_notice', 3):
+                       if not found_within(recent_actions, 'nodeconfig_notice', 3.5):
                                args = {}
                                args['hostname'] = hostname
-                               args['bmlog'] = conn.get_bootmanager_log().read()
+                               args['bmlog'] = bm_log_data
                                sitehist.sendMessage('nodeconfig_notice', **args)
                                conn.dump_plconf_file()
+                       else:
+                               # NOTE: do not add a new action record
+                               return ""
 
-               elif sequences[s] == "update_bootcd_email":
+               elif sequences[s] == "noblockdevice_notice":
 
-                       if not found_within(recent_actions, 'newalphacd_notice', 3):
+                       if not found_within(recent_actions, 'noblockdevice_notice', 3.5):
                                args = {}
-                               args.update(getconf.getconf(hostname)) # NOTE: Generates boot images for the user:
+                               #args.update(getconf.getconf(hostname)) # NOTE: Generates boot images for the user:
                                args['hostname'] = hostname
                        
-                               sitehist.sendMessage('newalphacd_notice', **args)
-
-                               print "\tDisabling %s due to out-of-date BOOTCD" % hostname
+                               sitehist.sendMessage('noblockdevice_notice', **args)
+                       else:
+                               # NOTE: do not add a new action record
+                               return ""
 
-               elif sequences[s] == "broken_hardware_email":
+               elif sequences[s] == "baddisk_notice":
                        # MAKE An ACTION record that this host has failed hardware.  May
                        # require either an exception "/minhw" or other manual intervention.
                        # Definitely need to send out some more EMAIL.
                        # TODO: email notice of broken hardware
-                       if not found_within(recent_actions, 'baddisk_notice', 1):
+                       if not found_within(recent_actions, 'baddisk_notice', 7):
                                print "...NOTIFYING OWNERS OF BROKEN HARDWARE on %s!!!" % hostname
                                args = {}
                                args['hostname'] = hostname
                                args['log'] = conn.get_dmesg().read()
 
                                sitehist.sendMessage('baddisk_notice', **args)
-                               conn.set_nodestate('disable')
+                               #conn.set_nodestate('disabled')
+                       else:
+                               # NOTE: do not add a new action record
+                               return ""
 
-               elif sequences[s] == "update_hardware_email":
-                       if not found_within(recent_actions, 'minimalhardware_notice', 1):
+               elif sequences[s] == "minimalhardware_notice":
+                       if not found_within(recent_actions, 'minimalhardware_notice', 7):
                                print "...NOTIFYING OWNERS OF MINIMAL HARDWARE FAILURE on %s!!!" % hostname
                                args = {}
                                args['hostname'] = hostname
-                               args['bmlog'] = conn.get_bootmanager_log().read()
+                               args['bmlog'] = bm_log_data
                                sitehist.sendMessage('minimalhardware_notice', **args)
+                       else:
+                               # NOTE: do not add a new action record
+                               return ""
 
-               elif sequences[s] == "bad_dns_email":
+               elif sequences[s] == "baddns_notice":
                        if not found_within(recent_actions, 'baddns_notice', 1):
                                print "...NOTIFYING OWNERS OF DNS FAILURE on %s!!!" % hostname
                                args = {}
                                try:
                                        node = plccache.GetNodeByName(hostname)
-                                       net = api.GetNodeNetworks(node['nodenetwork_ids'])[0]
+                                       net = api.GetInterfaces(node['interface_ids'])[0]
                                except:
                                        email_exception()
                                        print traceback.print_exc()
                                        # TODO: api error. skip email, b/c all info is not available,
                                        # flag_set will not be recorded.
-                                       return False
+                                       return "exception"
                                nodenet_str = network_config_to_str(net)
 
                                args['hostname'] = hostname
                                args['network_config'] = nodenet_str
-                               args['nodenetwork_id'] = net['nodenetwork_id']
+                               args['interface_id'] = net['interface_id']
 
                                sitehist.sendMessage('baddns_notice', **args)
+                       else:
+                               # NOTE: do not add a new action record
+                               return ""
 
-       return True
+       return bootman_action
        
 
-# MAIN -------------------------------------------------------------------
-
-def main():
-       from monitor import parser as parsermodule
-       parser = parsermodule.getParser()
-
-       parser.set_defaults(child=False, collect=False, nosetup=False, verbose=False, 
-                                               force=None, quiet=False)
-       parser.add_option("", "--child", dest="child", action="store_true", 
-                                               help="This is the child mode of this process.")
-       parser.add_option("", "--force", dest="force", metavar="boot_state",
-                                               help="Force a boot state passed to BootManager.py.")
-       parser.add_option("", "--quiet", dest="quiet", action="store_true", 
-                                               help="Extra quiet output messages.")
-       parser.add_option("", "--verbose", dest="verbose", action="store_true", 
-                                               help="Extra debug output messages.")
-       parser.add_option("", "--nonet", dest="nonet", action="store_true", 
-                                               help="Do not setup the network, use existing log files to re-run a test pass.")
-       parser.add_option("", "--collect", dest="collect", action="store_true", 
-                                               help="No action, just collect dmesg, and bm.log")
-       parser.add_option("", "--nosetup", dest="nosetup", action="store_true", 
-                                               help="Do not perform the orginary setup phase.")
-
-       parser = parsermodule.getParser(['nodesets', 'defaults'], parser)
-       config = parsermodule.parse_args(parser)
-
-       if config.nodelist:
-               nodes = config.getListFromFile(config.nodelist)
-       elif config.node:
-               nodes = [ config.node ]
-       else:
-               parser.print_help()
-               sys.exit(1)
-
-       for node in nodes:
-               # get sitehist
-               lb = plccache.plcdb_hn2lb[node]
-               sitehist = SiteInterface.get_or_make(loginbase=lb)
-               #reboot(node, config)
-               restore(sitehist, node, config=None, forced_action=None)
-
 if __name__ == "__main__":
-       main()
+       print "ERROR: Can not execute module as a command! Please use commands/%s.py" % os.path.splitext(__file__)[0]