return "unknown"
def get_dmesg(self):
+ t_stamp = time.strftime("%Y-%m-%d-%H:%M")
self.c.modules.os.system("dmesg > /var/log/dmesg.bm.log")
- download(self.c, "/var/log/dmesg.bm.log", "log/dmesg.%s.log" % self.node)
- log = open("log/dmesg.%s.log" % self.node, 'r')
+ download(self.c, "/var/log/dmesg.bm.log", "%s/history/%s-dmesg.%s.log" % (config.MONITOR_BOOTMANAGER_LOG, t_stamp, self.node))
+ os.system("cp %s/history/%s-dmesg.%s.log %s/dmesg.%s.log" % (config.MONITOR_BOOTMANAGER_LOG, t_stamp, self.node, config.MONITOR_BOOTMANAGER_LOG, self.node))
+ log = open("%s/dmesg.%s.log" % (config.MONITOR_BOOTMANAGER_LOG, self.node), 'r')
return log
def get_bootmanager_log(self):
- download(self.c, "/tmp/bm.log", "log/bm.%s.log.gz" % self.node)
- #os.system("zcat log/bm.%s.log.gz > log/bm.%s.log" % (self.node, self.node))
- os.system("cp log/bm.%s.log.gz log/bm.%s.log" % (self.node, self.node))
- log = open("log/bm.%s.log" % self.node, 'r')
+ t_stamp = time.strftime("%Y-%m-%d-%H:%M")
+ download(self.c, "/tmp/bm.log", "%s/history/%s-bm.%s.log" % (config.MONITOR_BOOTMANAGER_LOG, t_stamp, self.node))
+ os.system("cp %s/history/%s-bm.%s.log %s/bm.%s.log" % (config.MONITOR_BOOTMANAGER_LOG, t_stamp, self.node, config.MONITOR_BOOTMANAGER_LOG, self.node))
+ log = open("%s/bm.%s.log" % (config.MONITOR_BOOTMANAGER_LOG, self.node), 'r')
return log
+
+# def get_dmesg(self):
+# self.c.modules.os.system("dmesg > /var/log/dmesg.bm.log")
+# download(self.c, "/var/log/dmesg.bm.log", "log/dmesg.%s.log" % self.node)
+# log = open("log/dmesg.%s.log" % self.node, 'r')
+# return log
+#
+# def get_bootmanager_log(self):
+# download(self.c, "/tmp/bm.log", "log/bm.%s.log.gz" % self.node)
+# #os.system("zcat log/bm.%s.log.gz > log/bm.%s.log" % (self.node, self.node))
+# os.system("cp log/bm.%s.log.gz log/bm.%s.log" % (self.node, self.node))
+# log = open("log/bm.%s.log" % self.node, 'r')
+# return log
+
def dump_plconf_file(self):
c = self.c
self.c.modules.sys.path.append("/tmp/source/")
else:
print " Unable to read Node Configuration"
+ def fsck_repair_node(self):
+ c = self.c
+ self.c.modules.sys.path.append("/tmp/source/")
+ self.c.modules.os.chdir('/tmp/source')
+ # TODO: restart
+ # TODO: set boot state to node's actually boot state.
+ # could be 'boot' or 'safeboot'
+ self.c.modules.os.chdir('/tmp/source')
+ if self.c.modules.os.path.exists('/tmp/BM_RUNNING'):
+ print "Running MANUAL FSCK already... try again soon."
+ else:
+ print "Running MANUAL fsck on %s" % self.node
+ cmd = "( touch /tmp/BM_RUNNING ; " + \
+ " fsck -v -f -y /dev/planetlab/root &> out.fsck ; " + \
+ " fsck -v -f -y /dev/planetlab/vservers >> out.fsck 2>&1 ; " + \
+ " python ./BootManager.py %s &> server.log < /dev/null ; " + \
+ " rm -f /tmp/BM_RUNNING " + \
+ ") &"
+ cmd = cmd % self.get_nodestate()
+ self.c.modules.os.system(cmd)
+ #self.restart_bootmanager('boot')
+ pass
def compare_and_repair_nodekeys(self):
c = self.c
def set_nodestate(self, state='boot'):
return api.UpdateNode(self.node, {'boot_state' : state})
+ def get_nodestate(self):
+ try:
+ return api.GetNodes(self.node, ['boot_state'])[0]['boot_state']
+ except:
+ traceback.print_exc()
+ # NOTE: use last cached value from plc
+ fbnode = FindbadNodeRecord.get_latest_by(hostname=self.node).to_dict()
+ return fbnode['plc_node_stats']['boot_state']
+
+
def restart_node(self, state='boot'):
api.UpdateNode(self.node, {'boot_state' : state})
return
# COPY Rpyc files to host
- cmd = "rsync -qv -az -e ssh %(monitordir)s/Rpyc/ %(user)s@%(hostname)s:Rpyc 2> /dev/null" % args
+ #cmd = "rsync -vvv -az -e ssh %(monitordir)s/Rpyc/ %(user)s@%(hostname)s:Rpyc 2> /dev/null" % args
+ cmd = """rsync -vvv -az -e "ssh -o BatchMode=yes" %(monitordir)s/Rpyc/ %(user)s@%(hostname)s:Rpyc""" % args
if self.verbose: print cmd
print cmd
# TODO: Add timeout
print "\tUNKNOWN SSH KEY FOR %s; making an exception" % self.node
#print "MAKE EXPLICIT EXCEPTION FOR %s" % self.node
k = SSHKnownHosts(); k.updateDirect(self.node); k.write(); del k
+ print "trying: ", cmd
+ print [ "%s=%s" % (a, os.environ[a]) for a in filter(lambda x: 'SSH' in x, os.environ.keys()) ]
ret = localos.system(cmd, timeout)
print ret
if ret != 0:
print "\tFAILED TWICE"
- #sys.exit(1)
+ #email_exception("%s rsync failed twice" % self.node)
raise ExceptionDoubleSSHError("Failed twice trying to login with updated ssh host key")
t1 = time.time()
python Rpyc/Servers/forking_server.py &> server.log &
echo "done" >> out.log
EOF""")
- #cmd = """ssh %(user)s@%(hostname)s """ + \
- # """'ps ax | grep Rpyc | grep -v grep | awk "{print \$1}" | xargs kill 2> /dev/null' """
- #cmd = cmd % args
- #if self.verbose: print cmd
- ## TODO: Add timeout
- #print localos.system(cmd,timeout)
-
- ## START a new rpyc server.
- #cmd = """ssh -n %(user)s@%(hostname)s "export PYTHONPATH=\$HOME; """ + \
- # """python Rpyc/Servers/forking_server.py &> server.log < /dev/null &" """
- #cmd = cmd % args
- #if self.verbose: print cmd
- #print localos.system(cmd,timeout)
print "setup rpyc server over ssh"
print ssh.ret
print traceback.print_exc()
return False
+ msg = "ERROR setting up session for %s" % self.hostname
try:
if config == None:
self.session = PlanetLabSession(self.hostname, False, True)
else:
self.session = PlanetLabSession(self.hostname, config.nosetup, config.verbose)
except ExceptionDoubleSSHError, e:
- msg = "ERROR setting up session for %s" % self.hostname
print msg
return False
except Exception, e:
"bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-disk-update4-update3-exception-protoerror-update-debug-done",
"bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-disk-update4-update3-exception-chrootfail-update-debug-done",
+ "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-disk-update4-update3-update3-exception-protoerror-protoerror-debug-validate-done",
+ "bminit-cfg-auth-protoerror-exception-update-debug-validate-exception-done",
"bminit-cfg-auth-getplc-update-debug-done",
"bminit-cfg-auth-getplc-exception-protoerror-update-protoerror-debug-done",
"bminit-cfg-auth-protoerror-exception-update-protoerror-debug-done",
"bminit-cfg-auth-getplc-update-installinit-validate-exception-bmexceptmount-exception-noinstall-update-debug-done",
"bminit-cfg-auth-getplc-update-installinit-validate-bmexceptvgscan-exception-noinstall-update-debug-validate-bmexceptvgscan-done",
"bminit-cfg-auth-getplc-update-installinit-validate-exception-noinstall-update-debug-validate-done",
+ "bminit-cfg-auth-getplc-installinit-validate-bmexceptvgscan-exception-noinstall-update-debug-validate-bmexceptvgscan-done",
+ "bminit-cfg-auth-getplc-installinit-validate-bmexceptvgscan-exception-noinstall-debug-validate-bmexceptvgscan-done",
]:
sequences.update({n : "restart_bootmanager_rins"})
# repair_node_keys
for n in ["bminit-cfg-auth-bootcheckfail-authfail-exception-update-bootupdatefail-authfail-debug-validate-exception-done",
"bminit-cfg-auth-bootcheckfail-authfail-exception-update-bootupdatefail-authfail-debug-done",
+ "bminit-cfg-auth-bootcheckfail-authfail-exception-update-debug-validate-exception-done",
+ "bminit-cfg-auth-bootcheckfail-authfail-exception-authfail-debug-validate-exception-done",
]:
sequences.update({n: "repair_node_keys"})
]:
sequences.update({n: "restart_node_boot"})
- # update_node_config_email
+ # fsck_repair
+ for n in ["bminit-cfg-auth-getplc-update-installinit-validate-fsckabort-exception-fsckfail-bmexceptmount-exception-noinstall-update-debug-validate-fsckabort-exception-fsckfail-bmexceptmount-done",
+ "bminit-cfg-auth-getplc-installinit-validate-exception-fsckfail-exception-noinstall-update-debug-validate-exception-fsckfail-done",
+ "bminit-cfg-auth-getplc-update-installinit-validate-exception-fsckfail-exception-noinstall-update-debug-validate-exception-fsckfail-done",
+ "bminit-cfg-auth-getplc-update-installinit-validate-exception-fsckfail2-exception-noinstall-update-debug-validate-exception-fsckfail2-done",
+ "bminit-cfg-auth-getplc-installinit-validate-exception-fsckfail2-exception-debug-validate-done",
+ "bminit-cfg-auth-getplc-installinit-validate-exception-fsckfail2-exception-debug-validate-exception-fsckfail2-done",
+ "bminit-cfg-auth-getplc-installinit-validate-exception-fsckfail2-exception-debug-validate-exception-fsckfail-done",
+ "bminit-cfg-auth-getplc-update-installinit-validate-fsckabort-exception-fsckfail-exception-debug-validate-fsckabort-exception-fsckfail-done",
+ "bminit-cfg-auth-getplc-update-installinit-validate-exception-fsckfail2-exception-debug-validate-exception-fsckfail2-done",
+ "bminit-cfg-auth-getplc-installinit-validate-exception-fsckfail-exception-debug-validate-exception-fsckfail2-done",
+ "bminit-cfg-auth-getplc-installinit-validate-exception-fsckfail-exception-debug-validate-exception-fsckfail-done",
+ "bminit-cfg-auth-getplc-installinit-validate-exception-fsckfail-exception-debug-validate-done",
+ "bminit-cfg-auth-getplc-update-installinit-validate-exception-fsckfail-exception-debug-validate-exception-fsckfail-done",
+ "bminit-cfg-auth-getplc-update-debug-validate-exception-fsckfail-done",
+ ]:
+ sequences.update({n : "fsck_repair"})
+
+ # nodeconfig_notice
for n in ["bminit-cfg-exception-nocfg-update-bootupdatefail-nonode-debug-done",
"bminit-cfg-exception-update-bootupdatefail-nonode-debug-done",
"bminit-cfg-exception-update-bootupdatefail-nonode-debug-validate-exception-done",
+ "bminit-cfg-exception-nocfg-update-bootupdatefail-nonode-debug-validate-exception-done",
"bminit-cfg-auth-bootcheckfail-nonode-exception-update-bootupdatefail-nonode-debug-done",
+ "bminit-cfg-exception-noconfig-nonode-debug-validate-exception-done",
+ "bminit-cfg-exception-noconfig-update-debug-validate-exception-done",
]:
- sequences.update({n : "update_node_config_email"})
+ sequences.update({n : "nodeconfig_notice"})
for n in [ "bminit-cfg-exception-nodehostname-update-debug-done",
+ "bminit-cfg-update-exception-nodehostname-update-debug-validate-exception-done",
"bminit-cfg-update-exception-nodehostname-update-debug-done",
+ "bminit-cfg-exception-nodehostname-debug-validate-exception-done",
]:
sequences.update({n : "nodenetwork_email"})
- # update_bootcd_email
+ # noblockdevice_notice
for n in ["bminit-cfg-auth-getplc-update-hardware-exception-noblockdev-hardwarerequirefail-update-debug-done",
+ "bminit-cfg-auth-getplc-update-hardware-noblockdev-exception-hardwarerequirefail-update-debug-validate-bmexceptvgscan-done",
"bminit-cfg-auth-getplc-hardware-exception-noblockdev-hardwarerequirefail-update-debug-done",
"bminit-cfg-auth-getplc-update-hardware-noblockdev-exception-hardwarerequirefail-update-debug-done",
"bminit-cfg-auth-getplc-hardware-noblockdev-exception-hardwarerequirefail-update-debug-done",
- "bminit-cfg-auth-getplc-hardware-exception-hardwarerequirefail-update-debug-done",
+ ]:
+ sequences.update({n : "noblockdevice_notice"})
+
+ # update_bootcd_email
+ for n in [ "bminit-cfg-auth-getplc-hardware-exception-hardwarerequirefail-update-debug-done",
]:
sequences.update({n : "update_bootcd_email"})
for n in [ "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-nofilereference-update-debug-done",
]:
- sequences.update({n: "suspect_error_email"})
+ sequences.update({n: "unknownsequence_notice"})
- # update_hardware_email
- sequences.update({"bminit-cfg-auth-getplc-hardware-exception-disktoosmall-hardwarerequirefail-update-debug-done" : "update_hardware_email"})
- sequences.update({"bminit-cfg-auth-getplc-hardware-disktoosmall-exception-hardwarerequirefail-update-debug-done" : "update_hardware_email"})
+ # minimalhardware_notice
+ sequences.update({"bminit-cfg-auth-getplc-hardware-exception-disktoosmall-hardwarerequirefail-update-debug-done" : "minimalhardware_notice"})
+ sequences.update({"bminit-cfg-auth-getplc-hardware-disktoosmall-exception-hardwarerequirefail-update-debug-done" : "minimalhardware_notice"})
- # broken_hardware_email
- sequences.update({"bminit-cfg-auth-getplc-update-hardware-exception-hardwarerequirefail-update-debug-done" : "broken_hardware_email"})
+ # baddisk_notice
+ sequences.update({"bminit-cfg-auth-getplc-update-hardware-exception-hardwarerequirefail-update-debug-done" : "baddisk_notice"})
- # bad_dns_email
+ # baddns_notice
for n in [
"bminit-cfg-update-implementerror-bootupdatefail-dnserror-update-implementerror-bootupdatefail-dnserror-done",
"bminit-cfg-auth-implementerror-bootcheckfail-dnserror-update-implementerror-bootupdatefail-dnserror-done",
]:
- sequences.update( { n : "bad_dns_email"})
+ sequences.update( { n : "baddns_notice"})
return sequences
('protoerror' , 'XML RPC protocol error'),
('nodehostname' , 'Configured node hostname does not resolve'),
('implementerror', 'Implementation Error'),
- ('readonlyfs' , '[Errno 30] Read-only file system'),
- ('baddisk' , "IOError: [Errno 13] Permission denied: '/tmp/mnt/sysimg//vservers/\w+/etc/hosts'"),
+ ('fsckabort' , 'is mounted. e2fsck: Cannot continue, aborting'),
+ ('fsckfail' , 'Running e2fsck -v -p /dev/planetlab/root failed'),
+ ('fsckfail2' , 'Running e2fsck -v -p /dev/planetlab/vservers failed'),
+ ('readonlyfs' , '\[Errno 30\] Read-only file system'),
+ ('baddisk' , "IOError: \[Errno 13\] Permission denied: '/tmp/mnt/sysimg//vservers/\w+/etc/hosts'"),
('noinstall' , 'notinstalled'),
('bziperror' , 'bzip2: Data integrity error when decompressing.'),
('noblockdev' , "No block devices detected."),
('dnserror' , 'Name or service not known'),
+ ('noconfig' , "Unable to find and read a node configuration file"),
('downloadfail' , 'Unable to download main tarball /boot/bootstrapfs-planetlab-i386.tar.bz2 from server.'),
('disktoosmall' , 'The total usable disk size of all disks is insufficient to be usable as a PlanetLab node.'),
('hardwarerequirefail' , 'Hardware requirements not met'),
return sequence
-
def restore(sitehist, hostname, config=None, forced_action=None):
+ ret = restore_basic(sitehist, hostname, config, forced_action)
+ session.flush()
+ return ret
+
+def restore_basic(sitehist, hostname, config=None, forced_action=None):
# NOTE: Nothing works if the bootcd is REALLY old.
# So, this is the first step.
+ bootman_action = "unknown"
+
fbnode = FindbadNodeRecord.get_latest_by(hostname=hostname).to_dict()
recent_actions = sitehist.getRecentActions(hostname=hostname)
if fbnode['observed_category'] == "OLDBOOTCD":
print "\t...Notify owner to update BootImage!!!"
- if not found_within(recent_actions, 'newbootcd_notice', 3):
+ if not found_within(recent_actions, 'newbootcd_notice', 3.5):
sitehist.sendMessage('newbootcd_notice', hostname=hostname)
print "\tDisabling %s due to out-of-date BootImage" % hostname
- api.UpdateNode(hostname, {'boot_state' : 'disable'})
+ api.UpdateNode(hostname, {'boot_state' : 'disabled'})
# NOTE: nothing else is possible.
- return True
+ return "disabled"
debugnode = DebugInterface(hostname)
conn = debugnode.getConnection()
- #print "conn: %s" % conn
- #print "trying to use conn after returning it."
- #print conn.c.modules.sys.path
- #print conn.c.modules.os.path.exists('/tmp/source')
- if type(conn) == type(False): return False
-
- #if forced_action == "reboot":
- # conn.restart_node('reinstall')
- # return True
+ if type(conn) == type(False): return "error"
boot_state = conn.get_boot_state()
if boot_state != "debug":
print "... %s in %s state: skipping..." % (hostname , boot_state)
- return boot_state == "boot"
+ return "skipped" #boot_state == "boot"
if conn.bootmanager_running():
print "...BootManager is currently running. Skipping host %s" %hostname
- return True
+ return "skipped" # True
# Read persistent flags, tagged on one week intervals.
print "...Should investigate. Skipping node."
# TODO: send message related to these errors.
- if not found_within(recent_actions, 'newbootcd_notice', 3):
+ if not found_within(recent_actions, 'baddisk_notice', 7):
+ print "baddisk_notice not found recently"
log=conn.get_dmesg().read()
sitehist.sendMessage('baddisk_notice', hostname=hostname, log=log)
- conn.set_nodestate('disable')
+ #conn.set_nodestate('disabled')
- return False
+ return "skipping_baddisk"
print "...Downloading bm.log from %s" %hostname
log = conn.get_bootmanager_log()
child = fdpexpect.fdspawn(log)
- if hasattr(config, 'collect') and config.collect: return True
+ if hasattr(config, 'collect') and config.collect: return "collect"
if config and not config.quiet: print "...Scanning bm.log for errors"
args['sequence'] = s
args['bmlog'] = conn.get_bootmanager_log().read()
args['viart'] = False
+ args['saveact'] = True
+ args['ccemail'] = True
sitehist.sendMessage('unknownsequence_notice', **args)
conn.restart_bootmanager('boot')
+ bootman_action = "restart_bootmanager"
+
# NOTE: Do not set the pflags value for this sequence if it's unknown.
# This way, we can check it again after we've fixed it.
flag_set = False
else:
+ bootman_action = sequences[s]
if sequences[s] == "restart_bootmanager_boot":
print "...Restarting BootManager.py on %s "%hostname
conn.restart_node('reinstall')
elif sequences[s] == "restart_node_boot":
conn.restart_node('boot')
+ elif sequences[s] == "fsck_repair":
+ conn.fsck_repair_node()
elif sequences[s] == "repair_node_keys":
if conn.compare_and_repair_nodekeys():
# the keys either are in sync or were forced in sync.
- # so try to reboot the node again.
- # TODO: why was this originally 'reinstall' instead of 'boot'??
- conn.restart_bootmanager('boot')
+ # so try to start BM again.
+ conn.restart_bootmanager(conn.get_nodestate())
pass
else:
# there was some failure to synchronize the keys.
print "...Unable to repair node keys on %s" %hostname
- elif sequences[s] == "suspect_error_email":
+ elif sequences[s] == "unknownsequence_notice":
args = {}
args['hostname'] = hostname
args['sequence'] = s
args['bmlog'] = conn.get_bootmanager_log().read()
args['viart'] = False
+ args['saveact'] = True
+ args['ccemail'] = True
sitehist.sendMessage('unknownsequence_notice', **args)
conn.restart_bootmanager('boot')
- # TODO: differentiate this and the 'nodenetwork_email' actions.
- elif sequences[s] == "update_node_config_email":
+ elif sequences[s] == "nodeconfig_notice":
- if not found_within(recent_actions, 'nodeconfig_notice', 3):
+ if not found_within(recent_actions, 'nodeconfig_notice', 3.5):
args = {}
args['hostname'] = hostname
sitehist.sendMessage('nodeconfig_notice', **args)
elif sequences[s] == "nodenetwork_email":
- if not found_within(recent_actions, 'nodeconfig_notice', 3):
+ if not found_within(recent_actions, 'nodeconfig_notice', 3.5):
args = {}
args['hostname'] = hostname
args['bmlog'] = conn.get_bootmanager_log().read()
sitehist.sendMessage('nodeconfig_notice', **args)
conn.dump_plconf_file()
- elif sequences[s] == "update_bootcd_email":
+ elif sequences[s] == "noblockdevice_notice":
- if not found_within(recent_actions, 'newalphacd_notice', 3):
+ if not found_within(recent_actions, 'noblockdevice_notice', 3.5):
args = {}
- args.update(getconf.getconf(hostname)) # NOTE: Generates boot images for the user:
+ #args.update(getconf.getconf(hostname)) # NOTE: Generates boot images for the user:
args['hostname'] = hostname
- sitehist.sendMessage('newalphacd_notice', **args)
-
- print "\tDisabling %s due to out-of-date BOOTCD" % hostname
+ sitehist.sendMessage('noblockdevice_notice', **args)
- elif sequences[s] == "broken_hardware_email":
+ elif sequences[s] == "baddisk_notice":
# MAKE An ACTION record that this host has failed hardware. May
# require either an exception "/minhw" or other manual intervention.
# Definitely need to send out some more EMAIL.
# TODO: email notice of broken hardware
- if not found_within(recent_actions, 'baddisk_notice', 1):
+ if not found_within(recent_actions, 'baddisk_notice', 7):
print "...NOTIFYING OWNERS OF BROKEN HARDWARE on %s!!!" % hostname
args = {}
args['hostname'] = hostname
args['log'] = conn.get_dmesg().read()
sitehist.sendMessage('baddisk_notice', **args)
- conn.set_nodestate('disable')
+ #conn.set_nodestate('disabled')
- elif sequences[s] == "update_hardware_email":
- if not found_within(recent_actions, 'minimalhardware_notice', 1):
+ elif sequences[s] == "minimalhardware_notice":
+ if not found_within(recent_actions, 'minimalhardware_notice', 7):
print "...NOTIFYING OWNERS OF MINIMAL HARDWARE FAILURE on %s!!!" % hostname
args = {}
args['hostname'] = hostname
args['bmlog'] = conn.get_bootmanager_log().read()
sitehist.sendMessage('minimalhardware_notice', **args)
- elif sequences[s] == "bad_dns_email":
+ elif sequences[s] == "baddns_notice":
if not found_within(recent_actions, 'baddns_notice', 1):
print "...NOTIFYING OWNERS OF DNS FAILURE on %s!!!" % hostname
args = {}
print traceback.print_exc()
# TODO: api error. skip email, b/c all info is not available,
# flag_set will not be recorded.
- return False
+ return "exception"
nodenet_str = network_config_to_str(net)
args['hostname'] = hostname
sitehist.sendMessage('baddns_notice', **args)
- return True
+ return bootman_action
# MAIN -------------------------------------------------------------------