#!/bin/bash
+export PATH=$PATH:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
+
# NOTE: Must be an absolute path to guarantee it is read.
INSTALLPATH=/usr/share/monitor/
-# Generate an 'sh' style file full of variables in monitor.conf
-$INSTALLPATH/shconfig.py > $INSTALLPATH/monitorconfig.sh
+$INSTALLPATH/commands/shconfig.py > $INSTALLPATH/monitorconfig.sh
source $INSTALLPATH/monitorconfig.sh
cd ${MONITOR_SCRIPT_ROOT}
set -e
DATE=`date +%Y-%m-%d-%T`
MONITOR_PID="${MONITOR_SCRIPT_ROOT}/SKIP"
+function send_mail ()
+{
+ subject=$1
+ body=$2
+ mail -s "$subject" $exception_email <<EOF
+$body
+EOF
+}
+
+
echo "#######################################"; echo "Running Monitor at $DATE"; echo "######################################"
echo "Performing API test"
-API=$(./testapi.py)
+API=$(${MONITOR_SCRIPT_ROOT}/tools/testapi.py)
if [ "$API" != "ok" ] ; then
# NOTE: Do not try to run any commands if the API is obviously broken.
echo "API IS DOWN : "`date`
+ send_mail "API IS DOWN: canceled monitor run for `date`" "have a nice day..."
exit 1
fi
PID=`cat $MONITOR_PID`
rm -f $MONITOR_PID
if [ -z $PID ] ; then
- ${MONITOR_SCRIPT_ROOT}/kill.cmd.sh $PID
+ ${MONITOR_SCRIPT_ROOT}/tools/kill.cmd.sh $PID
echo "done."
else
echo "No PID to be killed."
# if no agent is running, set it up.
ssh-agent > ${MONITOR_SCRIPT_ROOT}/agent.sh
source ${MONITOR_SCRIPT_ROOT}/agent.sh
+ ssh-add /etc/planetlab/myops_ssh_key.rsa
ssh-add /etc/planetlab/debug_ssh_key.rsa
ssh-add /etc/planetlab/root_ssh_key.rsa
fi
#TODO: should add a call to ssh-add -l to check if the keys are loaded or not.
source ${MONITOR_SCRIPT_ROOT}/agent.sh
+# CHECK AGENT IS UP AND RUNNING
+count=$( ssh-add -l | wc -l )
+if [ $count -lt 3 ] ; then
+ send_mail "ssh-agent is not up and running." "Add keys before monitoring can continue"
+ exit
+fi
-echo "Performing Findbad Nodes"
+${MONITOR_SCRIPT_ROOT}/commands/syncwithplc.py $DATE || :
+service plc restart monitor
+
+echo "Performing FindAll Nodes"
#########################
# 1. FINDBAD NODES
-${MONITOR_SCRIPT_ROOT}/findbad.py --increment $DATE || :
+${MONITOR_SCRIPT_ROOT}/commands/findall.py $DATE || :
ps ax | grep BatchMode | grep -v grep | awk '{print $1}' | xargs -r kill || :
-
-echo "Performing Findbad PCUs"
-#########################
-# 2. FINDBAD PCUS
-${MONITOR_SCRIPT_ROOT}/findbadpcu.py --increment $DATE || :
# clean up stray 'locfg' processes that hang around inappropriately...
ps ax | grep locfg | grep -v grep | awk '{print $1}' | xargs -r kill || :
-echo "Performing uptime changes for sites, nodes, and pcus"
-########################
-# 3. record last-changed for sites, nodes and pcus.
-${MONITOR_SCRIPT_ROOT}/sitebad.py --increment || :
-${MONITOR_SCRIPT_ROOT}/nodebad.py --increment || :
-${MONITOR_SCRIPT_ROOT}/pcubad.py --increment || :
-echo "Archiving pkl files"
-#########################
-# Archive pkl files.
-for f in findbad act_all findbadpcus l_plcnodes site_persistflags node_persistflags pcu_persistflags ; do
- if [ -f ${MONITOR_DATA_ROOT}/production.$f.pkl ] ; then
- cp ${MONITOR_DATA_ROOT}/production.$f.pkl ${MONITOR_ARCHIVE_ROOT}/`date +%F-%H:%M`.production.$f.pkl
- else
- echo "Warning: It failed to archive ${MONITOR_DATA_ROOT}/production.$f.pkl"
- fi
-done
+${MONITOR_SCRIPT_ROOT}/commands/repair.py $DATE || :
+${MONITOR_SCRIPT_ROOT}/commands/policy.py $DATE || :
+curl -s 'http://summer.cs.princeton.edu/status/tabulator.cgi?table=table_nodeview&formatcsv' > /var/lib/monitor/comon/$DATE.comon.csv || :
-#echo "Running grouprins on all dbg nodes"
-############################
-# 5. Check if there are any nodes in dbg state. Clean up afterward.
-#${MONITOR_SCRIPT_ROOT}/grouprins.py --mail=1 --reboot --nodeselect 'state=DOWN&&boot_state=(boot|rins|dbg|diag)' --stopselect "state=BOOT" || :
-#${MONITOR_SCRIPT_ROOT}/grouprins.py --mail=1 --reboot --nodeselect 'state=DEBUG&&boot_state=(rins|dbg|boot)' --stopselect 'state=BOOT' || :
+#${MONITOR_SCRIPT_ROOT}/statistics/add-google-record.py --email email --password password --database MonitorStats --sheet NodeHistory `${MONITOR_SCRIPT_ROOT}/statistics/get-records.py nodes`
+#${MONITOR_SCRIPT_ROOT}/statistics/add-google-record.py --email email --password password --database MonitorStats --sheet SiteHistory `${MONITOR_SCRIPT_ROOT}/statistics/get-records.py sites`
cp ${MONITOR_SCRIPT_ROOT}/monitor.log ${MONITOR_ARCHIVE_ROOT}/`date +%F-%H:%M`.monitor.log
+service plc restart monitor || :
rm -f $MONITOR_PID
+
+D=`date +%F-%H:%M`
+
+# NOTE: check log for major sections.
+wc=`grep -E "^(findbad|findbadpcu|nodebad|pcubad|sitebad|apply-policy)$" ${MONITOR_SCRIPT_ROOT}/monitor.log | wc -l`
+if [[ $wc -ge 6 ]] ; then
+ send_mail "A:finished monitor run for $SD at $D" "Thank you..."
+else
+ send_mail "ERROR finished monitor run for $SD at $D" "Missing some sections:
+ $(grep -E "findbad|findbadpcu|nodebad|pcubad|sitebad|apply-policy" ${MONITOR_SCRIPT_ROOT}/monitor.log)"
+fi
+