#!/bin/bash
+export PATH=$PATH:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
+
# NOTE: Must be an absolute path to guarantee it is read.
INSTALLPATH=/usr/share/monitor/
-# Generate an 'sh' style file full of variables in monitor.conf
-$INSTALLPATH/shconfig.py > $INSTALLPATH/monitorconfig.sh
+$INSTALLPATH/commands/shconfig.py > $INSTALLPATH/monitorconfig.sh
source $INSTALLPATH/monitorconfig.sh
cd ${MONITOR_SCRIPT_ROOT}
set -e
echo "#######################################"; echo "Running Monitor at $DATE"; echo "######################################"
echo "Performing API test"
-API=$(./testapi.py)
+API=$(${MONITOR_SCRIPT_ROOT}/tools/testapi.py)
if [ "$API" != "ok" ] ; then
# NOTE: Do not try to run any commands if the API is obviously broken.
echo "API IS DOWN : "`date`
PID=`cat $MONITOR_PID`
rm -f $MONITOR_PID
if [ -z $PID ] ; then
- ${MONITOR_SCRIPT_ROOT}/kill.cmd.sh $PID
+ ${MONITOR_SCRIPT_ROOT}/tools/kill.cmd.sh $PID
echo "done."
else
echo "No PID to be killed."
# if no agent is running, set it up.
ssh-agent > ${MONITOR_SCRIPT_ROOT}/agent.sh
source ${MONITOR_SCRIPT_ROOT}/agent.sh
+ ssh-add /etc/planetlab/myops_ssh_key.rsa
ssh-add /etc/planetlab/debug_ssh_key.rsa
ssh-add /etc/planetlab/root_ssh_key.rsa
fi
#TODO: should add a call to ssh-add -l to check if the keys are loaded or not.
source ${MONITOR_SCRIPT_ROOT}/agent.sh
+${MONITOR_SCRIPT_ROOT}/commands/syncwithplc.py $DATE || :
+service plc restart monitor
-echo "Performing Findbad Nodes"
+echo "Performing FindAll Nodes"
#########################
# 1. FINDBAD NODES
-${MONITOR_SCRIPT_ROOT}/findbad.py --increment $DATE || :
+${MONITOR_SCRIPT_ROOT}/commands/findall.py $DATE || :
ps ax | grep BatchMode | grep -v grep | awk '{print $1}' | xargs -r kill || :
-
-echo "Performing Findbad PCUs"
-#########################
-# 2. FINDBAD PCUS
-${MONITOR_SCRIPT_ROOT}/findbadpcu.py --increment $DATE || :
# clean up stray 'locfg' processes that hang around inappropriately...
ps ax | grep locfg | grep -v grep | awk '{print $1}' | xargs -r kill || :
-echo "Performing uptime changes for sites, nodes, and pcus"
-########################
-# 3. record last-changed for sites, nodes and pcus.
-${MONITOR_SCRIPT_ROOT}/sitebad.py || :
-${MONITOR_SCRIPT_ROOT}/nodebad.py || :
-${MONITOR_SCRIPT_ROOT}/pcubad.py || :
-
-echo "Archiving pkl files"
-#########################
-# Archive pkl files.
-for f in findbad act_all findbadpcus l_plcnodes site_persistflags node_persistflags pcu_persistflags ; do
- if [ -f ${MONITOR_DATA_ROOT}/production.$f.pkl ] ; then
- cp ${MONITOR_DATA_ROOT}/production.$f.pkl ${MONITOR_ARCHIVE_ROOT}/`date +%F-%H:%M`.production.$f.pkl
- else
- echo "Warning: It failed to archive ${MONITOR_DATA_ROOT}/production.$f.pkl"
- fi
-done
-#echo "Running grouprins on all dbg nodes"
-############################
-# 5. Check if there are any nodes in dbg state. Clean up afterward.
-#${MONITOR_SCRIPT_ROOT}/grouprins.py --mail=1 --reboot --nodeselect 'state=DOWN&&boot_state=(boot|rins|dbg|diag)' --stopselect "state=BOOT" || :
-#${MONITOR_SCRIPT_ROOT}/grouprins.py --mail=1 --reboot --nodeselect 'state=DEBUG&&boot_state=(rins|dbg|boot)' --stopselect 'state=BOOT' || :
+${MONITOR_SCRIPT_ROOT}/commands/policy.py $DATE || :
+curl -s 'http://summer.cs.princeton.edu/status/tabulator.cgi?table=table_nodeview&formatcsv' > /var/lib/monitor/comon/$DATE.comon.csv || :
cp ${MONITOR_SCRIPT_ROOT}/monitor.log ${MONITOR_ARCHIVE_ROOT}/`date +%F-%H:%M`.monitor.log
+service plc restart monitor || :
rm -f $MONITOR_PID