X-Git-Url: http://git.onelab.eu/?p=monitor.git;a=blobdiff_plain;f=automate-default.sh;h=d691c6651ad33b6a52ef76cb7e0693759851a99e;hp=ef1cc2f69798695777d29e3c5bf17b19213c9cab;hb=HEAD;hpb=7b3d462aa05fcc1892fd914db163143f36a05945 diff --git a/automate-default.sh b/automate-default.sh index ef1cc2f..d691c66 100755 --- a/automate-default.sh +++ b/automate-default.sh @@ -1,21 +1,33 @@ #!/bin/bash +export PATH=$PATH:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin + # NOTE: Must be an absolute path to guarantee it is read. INSTALLPATH=/usr/share/monitor/ -# Generate an 'sh' style file full of variables in monitor.conf -$INSTALLPATH/shconfig.py > $INSTALLPATH/monitorconfig.sh +$INSTALLPATH/commands/shconfig.py > $INSTALLPATH/monitorconfig.sh source $INSTALLPATH/monitorconfig.sh cd ${MONITOR_SCRIPT_ROOT} set -e DATE=`date +%Y-%m-%d-%T` MONITOR_PID="${MONITOR_SCRIPT_ROOT}/SKIP" +function send_mail () +{ + subject=$1 + body=$2 + mail -s "$subject" $exception_email < ${MONITOR_SCRIPT_ROOT}/agent.sh source ${MONITOR_SCRIPT_ROOT}/agent.sh + ssh-add /etc/planetlab/myops_ssh_key.rsa ssh-add /etc/planetlab/debug_ssh_key.rsa ssh-add /etc/planetlab/root_ssh_key.rsa fi #TODO: should add a call to ssh-add -l to check if the keys are loaded or not. source ${MONITOR_SCRIPT_ROOT}/agent.sh +# CHECK AGENT IS UP AND RUNNING +count=$( ssh-add -l | wc -l ) +if [ $count -lt 3 ] ; then + send_mail "ssh-agent is not up and running." "Add keys before monitoring can continue" + exit +fi + +${MONITOR_SCRIPT_ROOT}/commands/syncwithplc.py $DATE || : +service plc restart monitor -echo "Performing Findbad Nodes" +echo "Performing FindAll Nodes" ######################### # 1. FINDBAD NODES -rm -f ${MONITOR_DATA_ROOT}/production.findbad2.pkl -${MONITOR_SCRIPT_ROOT}/findbad.py --increment --cachenodes --debug=0 --dbname="findbad2" $DATE || : -cp ${MONITOR_DATA_ROOT}/production.findbad2.pkl ${MONITOR_DATA_ROOT}/production.findbad.pkl +${MONITOR_SCRIPT_ROOT}/commands/findall.py $DATE || : ps ax | grep BatchMode | grep -v grep | awk '{print $1}' | xargs -r kill || : - -echo "Performing Findbad PCUs" -######################### -# 2. FINDBAD PCUS -rm -f ${MONITOR_DATA_ROOT}/production.findbadpcus2.pkl -${MONITOR_SCRIPT_ROOT}/findbadpcu.py --increment --refresh --debug=0 --dbname=findbadpcus2 $DATE || : -cp ${MONITOR_DATA_ROOT}/production.findbadpcus2.pkl ${MONITOR_DATA_ROOT}/production.findbadpcus.pkl # clean up stray 'locfg' processes that hang around inappropriately... ps ax | grep locfg | grep -v grep | awk '{print $1}' | xargs -r kill || : -#echo "Generating web data" -# badcsv.txt -#${MONITOR_SCRIPT_ROOT}/printbadcsv.py | grep -v loading | tr -d ' ' > badcsv.txt -#cp badcsv.txt /plc/data/var/www/html/monitor/ -#${MONITOR_SCRIPT_ROOT}/showlatlon.py | head -9 | awk 'BEGIN {print ""} { print ""} END{print "
", $0, "
"}' | sed -e 's\|\\g' > /plc/data/var/www/html/monitor/regions.html -echo "Performing uptime changes for sites, nodes, and pcus" -######################## -# 3. record last-changed for sites, nodes and pcus. -${MONITOR_SCRIPT_ROOT}/sitebad.py --increment || : -${MONITOR_SCRIPT_ROOT}/nodebad.py --increment || : -${MONITOR_SCRIPT_ROOT}/pcubad.py --increment || : +${MONITOR_SCRIPT_ROOT}/commands/repair.py $DATE || : +${MONITOR_SCRIPT_ROOT}/commands/policy.py $DATE || : +curl -s 'http://summer.cs.princeton.edu/status/tabulator.cgi?table=table_nodeview&formatcsv' > /var/lib/monitor/comon/$DATE.comon.csv || : -echo "Converting pkl files to phpserial" -######################### -# 4. convert pkl to php serialize format. -${MONITOR_SCRIPT_ROOT}/pkl2php.py -i findbadpcus2 -o findbadpcus -for f in act_all plcdb_hn2lb ; do - if [ -f ${MONITOR_DATA_ROOT}/production.$f.pkl ]; then - ${MONITOR_SCRIPT_ROOT}/pkl2php.py -i $f -o $f - else - echo "Warning: ${MONITOR_DATA_ROOT}/production.$f.pkl does not exist." - fi -done -${MONITOR_SCRIPT_ROOT}/pkl2php.py -i findbad -o findbadnodes -#${MONITOR_SCRIPT_ROOT}/pkl2php.py -i ad_dbTickets -o ad_dbTickets -#${MONITOR_SCRIPT_ROOT}/pkl2php.py -i idTickets -o idTickets - -echo "Archiving pkl files" -######################### -# Archive pkl files. -for f in findbad act_all findbadpcus l_plcnodes site_persistflags node_persistflags pcu_persistflags ; do - if [ -f ${MONITOR_DATA_ROOT}/production.$f.pkl ] ; then - cp ${MONITOR_DATA_ROOT}/production.$f.pkl ${MONITOR_ARCHIVE_ROOT}/`date +%F-%H:%M`.production.$f.pkl - else - echo "Warning: It failed to archive ${MONITOR_DATA_ROOT}/production.$f.pkl" - fi -done - -echo "Running grouprins on all dbg nodes" -############################ -# 5. Check if there are any nodes in dbg state. Clean up afterward. -${MONITOR_SCRIPT_ROOT}/grouprins.py --mail=1 --reboot --nodeselect 'state=DOWN&&boot_state=(boot|rins|dbg|diag)' --stopselect "state=BOOT" || : -${MONITOR_SCRIPT_ROOT}/grouprins.py --mail=1 --reboot --nodeselect 'state=DEBUG&&boot_state=(rins|dbg|boot)' --stopselect 'state=BOOT' || : +#${MONITOR_SCRIPT_ROOT}/statistics/add-google-record.py --email email --password password --database MonitorStats --sheet NodeHistory `${MONITOR_SCRIPT_ROOT}/statistics/get-records.py nodes` +#${MONITOR_SCRIPT_ROOT}/statistics/add-google-record.py --email email --password password --database MonitorStats --sheet SiteHistory `${MONITOR_SCRIPT_ROOT}/statistics/get-records.py sites` cp ${MONITOR_SCRIPT_ROOT}/monitor.log ${MONITOR_ARCHIVE_ROOT}/`date +%F-%H:%M`.monitor.log +service plc restart monitor || : rm -f $MONITOR_PID + +D=`date +%F-%H:%M` + +# NOTE: check log for major sections. +wc=`grep -E "^(findbad|findbadpcu|nodebad|pcubad|sitebad|apply-policy)$" ${MONITOR_SCRIPT_ROOT}/monitor.log | wc -l` +if [[ $wc -ge 6 ]] ; then + send_mail "A:finished monitor run for $SD at $D" "Thank you..." +else + send_mail "ERROR finished monitor run for $SD at $D" "Missing some sections: + $(grep -E "findbad|findbadpcu|nodebad|pcubad|sitebad|apply-policy" ${MONITOR_SCRIPT_ROOT}/monitor.log)" +fi +