3 # NOTE: Must be an absolute path to guarantee it is read.
4 source /usr/share/monitor-server/monitorconfig.py
5 cd ${MONITOR_SCRIPT_ROOT}
7 DATE=`date +%Y-%m-%d-%T`
8 MONITOR_PID="$HOME/monitor/SKIP"
10 echo "Performing API test"
12 if [ "$API" != "ok" ] ; then
13 # NOTE: Do not try to run any commands if the API is obviously broken.
14 echo "API IS DOWN : "`date`
18 if [ -f $MONITOR_PID ] ; then
20 echo "KILLING Monitor"
21 PID=`cat $MONITOR_PID`
23 ${MONITOR_SCRIPT_ROOT}/kill.cmd.sh $PID
26 echo "SKIPPING Monitor"
30 echo $$ > $MONITOR_PID
32 AGENT=`ps ax | grep ssh-agent | grep -v grep`
33 if [ -z "$AGENT" ] ; then
34 echo "starting ssh agent"
35 # if no agent is running, set it up.
36 ssh-agent > ${MONITOR_SCRIPT_ROOT}/agent.sh
37 source ${MONITOR_SCRIPT_ROOT}/agent.sh
38 ssh-add /etc/planetlab/debug_ssh_key.rsa
39 ssh-add /etc/planetlab/root_ssh_key.rsa
41 #TODO: should add a call to ssh-add -l to check if the keys are loaded or not.
42 source ${MONITOR_SCRIPT_ROOT}/agent.sh
45 echo "Performing Findbad Nodes"
46 #########################
48 rm -f ${MONITOR_DATA_ROOT}/production.findbad2.pkl
49 ${MONITOR_SCRIPT_ROOT}/findbad.py --increment --cachenodes --debug=0 --dbname="findbad2" $DATE || :
50 cp ${MONITOR_DATA_ROOT}/production.findbad2.pkl ${MONITOR_DATA_ROOT}/production.findbad.pkl
51 ps ax | grep BatchMode | grep -v grep | awk '{print $1}' | xargs kill || :
53 echo "Performing Findbad PCUs"
54 #########################
56 rm -f ${MONITOR_DATA_ROOT}/production.findbadpcus2.pkl
57 ${MONITOR_SCRIPT_ROOT}/findbadpcu.py --increment --refresh --debug=0 --dbname=findbadpcus2 $DATE || :
58 cp ${MONITOR_DATA_ROOT}/production.findbadpcus2.pkl ${MONITOR_DATA_ROOT}/production.findbadpcus.pkl
59 # clean up stray 'locfg' processes that hang around inappropriately...
60 ps ax | grep locfg | grep -v grep | awk '{print $1}' | xargs kill || :
62 echo "Generating web data"
64 ${MONITOR_SCRIPT_ROOT}/printbadcsv.py | grep -v loading | tr -d ' ' > badcsv.txt
65 cp badcsv.txt /plc/data/var/www/html/monitor/
66 ${MONITOR_SCRIPT_ROOT}/showlatlon.py | head -9 | awk 'BEGIN {print "<table>"} { print "<tr><td>", $0, "</td></tr>"} END{print "</table>"}' | sed -e 's\|\</td><td>\g' > /plc/data/var/www/html/monitor/regions.html
68 echo "Performing uptime changes for sites, nodes, and pcus"
69 ########################
70 # 3. record last-changed for sites, nodes and pcus.
71 ${MONITOR_SCRIPT_ROOT}/sitebad.py --increment || :
72 ${MONITOR_SCRIPT_ROOT}/nodebad.py --increment || :
73 ${MONITOR_SCRIPT_ROOT}/pcubad.py --increment || :
75 echo "Converting pkl files to phpserial"
76 #########################
77 # 4. convert pkl to php serialize format.
78 ${MONITOR_SCRIPT_ROOT}/pkl2php.py -i findbadpcus2 -o findbadpcus
79 ${MONITOR_SCRIPT_ROOT}/pkl2php.py -i act_all -o act_all
80 ${MONITOR_SCRIPT_ROOT}/pkl2php.py -i plcdb_hn2lb -o plcdb_hn2lb
81 ${MONITOR_SCRIPT_ROOT}/pkl2php.py -i findbad -o findbadnodes
82 ${MONITOR_SCRIPT_ROOT}/pkl2php.py -i ad_dbTickets -o ad_dbTickets
83 ${MONITOR_SCRIPT_ROOT}/pkl2php.py -i idTickets -o idTickets
85 echo "Archiving pkl files"
86 #########################
88 for f in findbad act_all findbadpcus l_plcnodes site_persistflags node_persistflags pcu_persistflags ; do
89 cp ${MONITOR_DATA_ROOT}/production.$f.pkl ${MONITOR_ARCHIVE_ROOT}/`date +%F-%H:%M`.production.$f.pkl
92 echo "Running grouprins on all dbg nodes"
93 ############################
94 # 5. Check if there are any nodes in dbg state. Clean up afterward.
95 ${MONITOR_SCRIPT_ROOT}/grouprins.py --mail=1 \
96 --nodeselect 'state=DEBUG&&boot_state=(rins|dbg|boot)' \
97 --stopselect 'state=BOOT&&kernel=2.6.22.19-vs2.3.0.34.9.planetlab' \
99 ${MONITOR_SCRIPT_ROOT}/findbad.py --increment --cachenodes --debug=0 --dbname="findbad" --nodeselect 'state=DEBUG&&boot_state=dbg||state=DEBUG&&boot_state=boot' || :
101 echo "Collecting RT database dump"
102 ##########################
103 # 6. cache the RT db locally.
104 python ${MONITOR_SCRIPT_ROOT}/rt.py