X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=automate-default.sh;h=a51144a2afbe719e57dbee5afca7c44814b768bd;hb=f4f26439ae2db33f8f9a55e1a3350f6ed4f78278;hp=046c1acb85d1047435d8bcf7a05e8ed31e34aab4;hpb=f6ae4843ec52f237b8c01c9fdcc9130a34518944;p=monitor.git diff --git a/automate-default.sh b/automate-default.sh index 046c1ac..a51144a 100755 --- a/automate-default.sh +++ b/automate-default.sh @@ -1,9 +1,10 @@ #!/bin/bash +export PATH=$PATH:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin + # NOTE: Must be an absolute path to guarantee it is read. INSTALLPATH=/usr/share/monitor/ -# Generate an 'sh' style file full of variables in monitor.conf -$INSTALLPATH/shconfig.py > $INSTALLPATH/monitorconfig.sh +$INSTALLPATH/commands/shconfig.py > $INSTALLPATH/monitorconfig.sh source $INSTALLPATH/monitorconfig.sh cd ${MONITOR_SCRIPT_ROOT} set -e @@ -12,7 +13,7 @@ MONITOR_PID="${MONITOR_SCRIPT_ROOT}/SKIP" echo "#######################################"; echo "Running Monitor at $DATE"; echo "######################################" echo "Performing API test" -API=$(./testapi.py) +API=$(${MONITOR_SCRIPT_ROOT}/tools/testapi.py) if [ "$API" != "ok" ] ; then # NOTE: Do not try to run any commands if the API is obviously broken. echo "API IS DOWN : "`date` @@ -25,7 +26,7 @@ if [ -f $MONITOR_PID ] ; then PID=`cat $MONITOR_PID` rm -f $MONITOR_PID if [ -z $PID ] ; then - ${MONITOR_SCRIPT_ROOT}/kill.cmd.sh $PID + ${MONITOR_SCRIPT_ROOT}/tools/kill.cmd.sh $PID echo "done." else echo "No PID to be killed." @@ -54,49 +55,28 @@ if [ -z "$AGENT" ] ; then # if no agent is running, set it up. ssh-agent > ${MONITOR_SCRIPT_ROOT}/agent.sh source ${MONITOR_SCRIPT_ROOT}/agent.sh + ssh-add /etc/planetlab/myops_ssh_key.rsa ssh-add /etc/planetlab/debug_ssh_key.rsa ssh-add /etc/planetlab/root_ssh_key.rsa fi #TODO: should add a call to ssh-add -l to check if the keys are loaded or not. source ${MONITOR_SCRIPT_ROOT}/agent.sh +${MONITOR_SCRIPT_ROOT}/commands/syncwithplc.py $DATE || : +service plc restart monitor -echo "Performing Findbad Nodes" +echo "Performing FindAll Nodes" ######################### # 1. FINDBAD NODES -${MONITOR_SCRIPT_ROOT}/findbad.py --increment $DATE || : +${MONITOR_SCRIPT_ROOT}/commands/findall.py $DATE || : ps ax | grep BatchMode | grep -v grep | awk '{print $1}' | xargs -r kill || : - -echo "Performing Findbad PCUs" -######################### -# 2. FINDBAD PCUS -${MONITOR_SCRIPT_ROOT}/findbadpcu.py --increment $DATE || : # clean up stray 'locfg' processes that hang around inappropriately... ps ax | grep locfg | grep -v grep | awk '{print $1}' | xargs -r kill || : -echo "Performing uptime changes for sites, nodes, and pcus" -######################## -# 3. record last-changed for sites, nodes and pcus. -${MONITOR_SCRIPT_ROOT}/sitebad.py || : -${MONITOR_SCRIPT_ROOT}/nodebad.py || : -${MONITOR_SCRIPT_ROOT}/pcubad.py || : - -echo "Archiving pkl files" -######################### -# Archive pkl files. -for f in findbad act_all findbadpcus l_plcnodes site_persistflags node_persistflags pcu_persistflags ; do - if [ -f ${MONITOR_DATA_ROOT}/production.$f.pkl ] ; then - cp ${MONITOR_DATA_ROOT}/production.$f.pkl ${MONITOR_ARCHIVE_ROOT}/`date +%F-%H:%M`.production.$f.pkl - else - echo "Warning: It failed to archive ${MONITOR_DATA_ROOT}/production.$f.pkl" - fi -done -#echo "Running grouprins on all dbg nodes" -############################ -# 5. Check if there are any nodes in dbg state. Clean up afterward. -#${MONITOR_SCRIPT_ROOT}/grouprins.py --mail=1 --reboot --nodeselect 'state=DOWN&&boot_state=(boot|rins|dbg|diag)' --stopselect "state=BOOT" || : -#${MONITOR_SCRIPT_ROOT}/grouprins.py --mail=1 --reboot --nodeselect 'state=DEBUG&&boot_state=(rins|dbg|boot)' --stopselect 'state=BOOT' || : +${MONITOR_SCRIPT_ROOT}/commands/policy.py $DATE || : +curl -s 'http://summer.cs.princeton.edu/status/tabulator.cgi?table=table_nodeview&formatcsv' > /var/lib/monitor/comon/$DATE.comon.csv || : cp ${MONITOR_SCRIPT_ROOT}/monitor.log ${MONITOR_ARCHIVE_ROOT}/`date +%F-%H:%M`.monitor.log +service plc restart monitor || : rm -f $MONITOR_PID