3 # NOTE: Must be an absolute path to guarantee it is read.
4 INSTALLPATH=/usr/share/monitor-server/
5 # Generate an 'sh' style file full of variables in monitor.conf
6 $INSTALLPATH/shconfig.py > $INSTALLPATH/monitorconfig.sh
7 source $INSTALLPATH/monitorconfig.sh
8 cd ${MONITOR_SCRIPT_ROOT}
10 DATE=`date +%Y-%m-%d-%T`
11 MONITOR_PID="${MONITOR_SCRIPT_ROOT}/SKIP"
13 echo "#######################################"; echo "Running Monitor at $DATE"; echo "######################################"
14 echo "Performing API test"
16 if [ "$API" != "ok" ] ; then
17 # NOTE: Do not try to run any commands if the API is obviously broken.
18 echo "API IS DOWN : "`date`
22 if [ -f $MONITOR_PID ] ; then
24 echo "KILLING Monitor"
25 PID=`cat $MONITOR_PID`
28 ${MONITOR_SCRIPT_ROOT}/kill.cmd.sh $PID
31 echo "No PID to be killed."
35 echo "SKIPPING Monitor"
39 echo $$ > $MONITOR_PID
41 # SETUP act_all database if it's not there.
42 if [ ! -f ${MONITOR_SCRIPT_ROOT}/actallsetup.flag ]; then
43 if ! python -c 'import database; database.dbLoad("act_all")' 2>/dev/null ; then
44 touch ${MONITOR_SCRIPT_ROOT}/actallsetup.flag
50 AGENT=`ps ax | grep ssh-agent | grep -v grep`
52 if [ -z "$AGENT" ] ; then
53 echo "starting ssh agent"
54 # if no agent is running, set it up.
55 ssh-agent > ${MONITOR_SCRIPT_ROOT}/agent.sh
56 source ${MONITOR_SCRIPT_ROOT}/agent.sh
57 ssh-add /etc/planetlab/debug_ssh_key.rsa
58 ssh-add /etc/planetlab/root_ssh_key.rsa
60 #TODO: should add a call to ssh-add -l to check if the keys are loaded or not.
61 source ${MONITOR_SCRIPT_ROOT}/agent.sh
64 echo "Performing Findbad Nodes"
65 #########################
67 rm -f ${MONITOR_DATA_ROOT}/production.findbad2.pkl
68 ${MONITOR_SCRIPT_ROOT}/findbad.py --increment --cachenodes --debug=0 --dbname="findbad2" $DATE || :
69 cp ${MONITOR_DATA_ROOT}/production.findbad2.pkl ${MONITOR_DATA_ROOT}/production.findbad.pkl
70 ps ax | grep BatchMode | grep -v grep | awk '{print $1}' | xargs -r kill || :
72 echo "Performing Findbad PCUs"
73 #########################
75 rm -f ${MONITOR_DATA_ROOT}/production.findbadpcus2.pkl
76 ${MONITOR_SCRIPT_ROOT}/findbadpcu.py --increment --refresh --debug=0 --dbname=findbadpcus2 $DATE || :
77 cp ${MONITOR_DATA_ROOT}/production.findbadpcus2.pkl ${MONITOR_DATA_ROOT}/production.findbadpcus.pkl
78 # clean up stray 'locfg' processes that hang around inappropriately...
79 ps ax | grep locfg | grep -v grep | awk '{print $1}' | xargs -r kill || :
81 #echo "Generating web data"
83 #${MONITOR_SCRIPT_ROOT}/printbadcsv.py | grep -v loading | tr -d ' ' > badcsv.txt
84 #cp badcsv.txt /plc/data/var/www/html/monitor/
85 #${MONITOR_SCRIPT_ROOT}/showlatlon.py | head -9 | awk 'BEGIN {print "<table>"} { print "<tr><td>", $0, "</td></tr>"} END{print "</table>"}' | sed -e 's\|\</td><td>\g' > /plc/data/var/www/html/monitor/regions.html
87 echo "Performing uptime changes for sites, nodes, and pcus"
88 ########################
89 # 3. record last-changed for sites, nodes and pcus.
90 ${MONITOR_SCRIPT_ROOT}/sitebad.py --increment || :
91 ${MONITOR_SCRIPT_ROOT}/nodebad.py --increment || :
92 ${MONITOR_SCRIPT_ROOT}/pcubad.py --increment || :
94 echo "Converting pkl files to phpserial"
95 #########################
96 # 4. convert pkl to php serialize format.
97 ${MONITOR_SCRIPT_ROOT}/pkl2php.py -i findbadpcus2 -o findbadpcus
98 for f in act_all plcdb_hn2lb ; do
99 if [ -f ${MONITOR_DATA_ROOT}/production.$f.pkl ]; then
100 ${MONITOR_SCRIPT_ROOT}/pkl2php.py -i $f -o $f
102 echo "Warning: ${MONITOR_DATA_ROOT}/production.$f.pkl does not exist."
105 ${MONITOR_SCRIPT_ROOT}/pkl2php.py -i findbad -o findbadnodes
106 #${MONITOR_SCRIPT_ROOT}/pkl2php.py -i ad_dbTickets -o ad_dbTickets
107 #${MONITOR_SCRIPT_ROOT}/pkl2php.py -i idTickets -o idTickets
109 echo "Archiving pkl files"
110 #########################
112 for f in findbad act_all findbadpcus l_plcnodes site_persistflags node_persistflags pcu_persistflags ; do
113 if [ -f ${MONITOR_DATA_ROOT}/production.$f.pkl ] ; then
114 cp ${MONITOR_DATA_ROOT}/production.$f.pkl ${MONITOR_ARCHIVE_ROOT}/`date +%F-%H:%M`.production.$f.pkl
116 echo "Warning: It failed to archive ${MONITOR_DATA_ROOT}/production.$f.pkl"
120 echo "Running grouprins on all dbg nodes"
121 ############################
122 # 5. Check if there are any nodes in dbg state. Clean up afterward.
123 ${MONITOR_SCRIPT_ROOT}/grouprins.py --mail=1 --reboot --nodeselect 'state=DOWN&&boot_state=(boot|rins|dbg|diag)' --stopselect "state=BOOT" || :
124 ${MONITOR_SCRIPT_ROOT}/grouprins.py --mail=1 --reboot --nodeselect 'state=DEBUG&&boot_state=(rins|dbg|boot)' --stopselect 'state=BOOT' || :
126 cp ${MONITOR_SCRIPT_ROOT}/monitor.log ${MONITOR_ARCHIVE_ROOT}/`date +%F-%H:%M`.monitor.log