ls web/collect
ls $RPM_BUILD_ROOT
+install -D -m 644 README $RPM_BUILD_ROOT/usr/share/%{name}/
install -D -m 644 web/db-config.d/030-conf_files_myops $RPM_BUILD_ROOT/etc/planetlab/db-config.d
install -D -m 755 web/collect/sar2graphite.py $RPM_BUILD_ROOT/usr/bin/
install -D -m 755 web/collect/client/update.sh $RPM_BUILD_ROOT/var/www/html/PlanetLabConf/myops_update_sh
%files getqueryview
%defattr(-,root,root)
+/usr/share/%{name}/README
/usr/share/%{name}/web
/%{_sysconfdir}/cron.d/sysstat.cron
/var/www/html/PlanetLabConf/myops_update_sh
--- /dev/null
+Steps to deploy myops2:
+
+ * test myops_update_sh script on one more node.
+ * move myops data into appropriate NS.
+ * update PLC with myops_update_sh script.
+ *
+ * integrate log axis patch into installation.
+ * shutdown promultis:myops
+
+Done:
+ * Copy log axis patch.
+ * update extraPLC machines
+ * add myops_cacert.pem to build.sh
+ * preserve old logs (via sync)
+ * Sync metaplc stats from stormers to echo
+ * update VICCI to point at echo instead of stormers.
+
+Follow-up:
+ * Make sure vicci update is running correctly!!!
+ * Vicci on myops.planet-lab.org has stopped
+ * vicci on echo has begun / continued.
+
$min3 */1 * * * /home/${PLC_SLICE_PREFIX}_myops/update.sh
EOF
-yum install -y lshw
+if [ ! -x /usr/sbin/lshw ] ; then
+ yum install -y lshw
+fi
+
+if [ ! -f /usr/boot/myops_cacert.pem ] ; then
+ mkdir -p /usr/boot
+ cp -f myops_cacert.pem /usr/boot/myops_cacert.pem
+fi
chmod 755 /home/${PLC_SLICE_PREFIX}_myops/collect.sh
chmod 755 /home/${PLC_SLICE_PREFIX}_myops/upload.sh
--- /dev/null
+#!/usr/bin/python
+
+import commands
+import os
+import re
+import socket
+import struct
+import DNS
+import time
+from history import *
+
+
+def get_success_ratio(measured):
+ return float(len(filter(lambda x: x > 0, measured)))/float(len(measured))
+
+def timed(method):
+
+ def timeit(*args, **kw):
+ ts = time.time()
+ result = method(*args, **kw)
+ te = time.time()
+
+ #print '%r (%r, %r) %2.2f sec' % \
+ # (method.__name__, args, kw, te-ts)
+ return (result, te-ts)
+
+ return timeit
+
+@timed
+def check_dns(ip, protocol='udp'):
+ try:
+ #ip = ip[:-1] + "0"
+ ro = DNS.Request(name="www.yahoo.com", qtype="A", server=ip, timeout=45)
+ r = ro.req(protocol=protocol)
+ r = "OK"
+ except DNS.Base.DNSError, e:
+ r = "Error: %s" % e
+ return r
+
+def get_nameserver_ips(filename):
+ ip_re = re.compile("\d+\.\d+\.\d+\.\d+")
+ ret = {}
+ if not os.path.exists(filename):
+ return ret
+
+ f = open(filename, 'r')
+
+ if 'resolv' in filename:
+ for l in f:
+ for field in l.strip().split():
+ if ip_re.match(field) and field not in ret:
+ ret[field] = 0
+
+ if 'ifcfg' in filename:
+ for l in f:
+ if 'DNS' not in l:
+ continue
+ for field in l.strip().split('='):
+ field = field.replace('"', '')
+ field = field.replace("'", '')
+ if ip_re.match(field) and field not in ret:
+ ret[field] = 0
+ return ret
+
+def main():
+
+ root_ips = get_nameserver_ips('/etc/resolv.conf')
+ slice_ips = get_nameserver_ips( '/vservers/princeton_comon/etc/resolv.conf')
+
+ context_list = ['root', 'slice']
+ proto_list = ['udp', 'tcp']
+
+ sequence_list = zip(sorted(context_list*2), zip(root_ips.keys(),sorted(proto_list*2)[::-1]))
+ sequence_list += zip(sorted(context_list*2), zip(slice_ips.keys(),sorted(proto_list*2)[::-1]))
+
+ if set(root_ips.keys()) == set(slice_ips.keys()):
+ print "CONF-ROOT_SLICE-MATCH",
+ else:
+ print "CONF-ROOT_SLICE-MISMATCH",
+
+ for i,(context,(ip,proto)) in enumerate(sequence_list):
+ (s,t) = check_dns(ip, proto)
+ if "Error" in s: t = -1
+ dns = HistoryFile("dns_history_%s_%s%s.dat" % (context, proto, i), DNSHistory)
+ dns.append(t)
+ print get_success_ratio(dns.get()),
+ dns.close()
+
+ c_dns = os.popen("curl -s http://localhost:3121 | grep -a DNSFail").read().strip()
+ if len(c_dns) > 9 and "DNS" in c_dns:
+ c_dns = "cm " + c_dns[9:]
+ else:
+ c_dns = ""
+ print c_dns,
+
+ print ""
+
+
+if __name__ == "__main__":
+ main()
+
cd /home/${PLC_SLICE_PREFIX}_myops
-if [ -f /usr/boot/cacert.pem ] ; then
- CURL_ARGS="$CURL_ARGS --cacert /usr/boot/cacert.pem"
+if [ -f /usr/boot/myops_cacert.pem ] ; then
+ CURL_ARGS="$CURL_ARGS --cacert /usr/boot/myops_cacert.pem"
else
CURL_ARGS="$CURL_ARGS --insecure"
fi
--- /dev/null
+#!/usr/bin/python
+
+
+import ctypes
+import os
+import hashlib
+
+class ExceptionCorruptData(Exception): pass
+
+# TODO: maybe when there's more time; for better readability.
+class History(ctypes.Structure):
+
+ HISTORY_version = 2
+ HISTORY_length = 30*24 # 30 days once an hour
+ DIGEST_length = 32
+
+ # NOTE: assumes the first DIGEST_length bytes are a hexdigest checksum.
+
+ def save(self):
+ bytes = buffer(self)[:]
+ d = self.digest(bytes)
+ ctypes.memmove(ctypes.addressof(self), d, len(d))
+ bytes = buffer(self)[:]
+ return bytes
+
+ def digest(self, bytes):
+ m = hashlib.md5()
+ m.update(bytes[History.DIGEST_length:])
+ d = m.hexdigest()
+ return d
+
+ def verify(self, bytes, hexdigest):
+ d = self.digest(bytes)
+ #return d == hexdigest
+ return True
+
+ def restore(self, bytes):
+ fit = min(len(bytes), ctypes.sizeof(self))
+ hexdigest = bytes[:History.DIGEST_length]
+ if self.verify(bytes, hexdigest):
+ ctypes.memmove(ctypes.addressof(self), bytes, fit)
+ else:
+ raise ExceptionCorruptData()
+ return
+
+class HistoryFile:
+ def __init__(self, filename, subtype):
+ self.subtype = subtype
+ self.struct = self.subtype()
+ self.filename = filename
+ if not os.path.exists(self.filename):
+ # create for the first time, with empty data
+ self.write(False)
+ else:
+ self.read()
+
+ def __getattr__(self, name):
+ # NOTE: if the given name is not part of this instance, try looking in
+ # the structure object.
+ return getattr(self.struct, name)
+
+ def close(self):
+ self.write()
+
+ def read(self, check_for_file=True):
+ """
+ This function guarantees that space is preserved.
+ If one of the file operations fail, it will throw an exception.
+ """
+ # the file should already exist
+ if check_for_file:
+ assert os.path.exists(self.filename)
+
+ fd = os.open(self.filename, os.O_RDONLY)
+ a = os.read(fd, os.path.getsize(self.filename))
+ os.close(fd)
+ try:
+ self.struct.restore(a)
+ except ExceptionCorruptData:
+ raise Exception("Corrupt data in %s; remove and try again." % self.filename)
+ try:
+ assert self.struct.version >= History.HISTORY_version
+ except:
+ print "Old version found; updating data file."
+ self.upgrade(self.filename)
+ # create for the first time, with empty data
+ self.struct = self.subtype()
+ self.write(False)
+
+ return True
+
+ def write(self, check_for_file=True):
+ # the file should already exist
+ if check_for_file:
+ assert os.path.exists(self.filename)
+
+ # open without TRUNC nor APPEND, then seek to beginning to preserve space on disk
+ fd = os.open(self.filename, os.O_WRONLY|os.O_CREAT)
+ os.lseek(fd, 0, 0)
+ ret = os.write(fd, self.struct.save())
+ os.close(fd)
+ return ret
+
+ def upgrade(self, filename):
+ # TODO: in the future a more clever version migration might be nice.
+ print [ h for h in self.struct.history ]
+ os.remove(filename) # just nuke the old version
+
+class DNSHistory(History):
+ _fields_ = [ ("checksum", ctypes.c_char * History.DIGEST_length),
+ ("version", ctypes.c_int),
+ ("index", ctypes.c_int),
+ ("history", ctypes.c_float * History.HISTORY_length), ]
+
+ def __init__(self, *args, **kwargs):
+ super(DNSHistory, self).__init__(*args, **kwargs)
+
+ self.checksum = "0"*History.DIGEST_length
+ self.version = History.HISTORY_version
+ self.index = 0
+
+ def get(self):
+ summary = self.history[self.index:] + self.history[:self.index]
+ measured = filter(lambda x: x != 0, summary)
+ return measured
+
+ def append(self, data):
+ #try:
+ # note, this won't be the case when reboot occurs, or on first run.
+ #assert last_value > 0.0
+ #assert data > last_value
+ #print "Recording: %s"% (data-last_value)
+ #history[i] = data-last_value
+ self.history[self.index] = data
+ self.index += 1
+ self.index = self.index % History.HISTORY_length
+ #except:
+ # on init when last_value is 0, or reboot when counter resets.
+ # do not record data except for last_value, do not increment index
+ # pass
+
+ #last_value = data
+ return
+
+if __name__ == "__main__":
+ d = HistoryFile('test.dat', DNSHistory)
+ d.append(-1)
+ d.close()
CURL_ARGS=""
fi
-if [ -f /usr/boot/cacert.pem ] ; then
- CURL_ARGS="$CURL_ARGS --cacert /usr/boot/cacert.pem"
+if [ -f /usr/boot/myops_cacert.pem ] ; then
+ CURL_ARGS="$CURL_ARGS --cacert /usr/boot/myops_cacert.pem"
else
CURL_ARGS="$CURL_ARGS --insecure"
fi
tar -xzf $FILE
chmod 755 ./*.sh ./*.py
./bootstrap.sh || exit 1
- touch $HDIR/update_ok
+ touch $HDIR/update_ok
fi
else
PLC_SLICE_PREFIX='pl'
fi
-if [ -f /usr/boot/cacert.pem ] ; then
- CURL_ARGS="$CURL_ARGS --cacert /usr/boot/cacert.pem"
+if [ -f /usr/boot/myops_cacert.pem ] ; then
+ CURL_ARGS="$CURL_ARGS --cacert /usr/boot/myops_cacert.pem"
else
CURL_ARGS="$CURL_ARGS --insecure"
fi
#comm -1 -3 $old $new > $log
cp $new $log
if [ $( stat -c %s $log ) -ne 0 ] ; then
- curl $CURL_ARGS --max-time 60 --silent https://${MYOPS_SERVER}/upload.php --form "log=@$log"
+ curl $CURL_ARGS --max-time 60 --silent https://${MYOPS_SERVER}/upload.php --form "namespace=${PLC_SLICE_PREFIX}" --form "log=@$log"
if [ $? -ne 0 ] ; then
# the upload has failed, so remove new file so no data is lost
rm -f /tmp/$( basename $file ).new
#!/bin/bash
-FILELIST="sysinfo get_bootcd_version.sh bootstrap.sh collect.sh upload.sh timeout3.sh check_dns.py DNS check_bw.py check_uptime.py update.sh"
+
+if [ -f /etc/planetlab/boot_ssl.crt ] ; then
+ cp /etc/planetlab/boot_ssl.crt $PWD/../client/myops_cacert.pem
+else
+ if [ -f /etc/pki/tls/certs/localhost.crt ] ; then
+ cp /etc/pki/tls/certs/localhost.crt $PWD/../client/myops_cacert.pem
+ fi
+fi
+
+FILELIST="sysinfo get_bootcd_version.sh bootstrap.sh collect.sh upload.sh timeout3.sh check_dns.py DNS check_bw.py check_uptime.py update.sh myops_cacert.pem"
tar -zcf /var/www/html/PlanetLabConf/bootstrap.tar.gz -C /usr/share/myops/web/collect/client/ .
if [ ! -f /var/www/html/PlanetLabConf/input.cfg ] ; then
ln input.cfg /var/www/html/PlanetLabConf/input.cfg
#!/bin/bash
-PREFIX=/var/www/html/uploadlogs/raw
+PREFIX=/var/www/html/uploadlogs
EXEC_PREFIX=/usr/share/myops/web/collect/server
-lasthour=`date +%Y-%m-%dT%H -d "1 hour ago"`
-mkdir -p $PREFIX/old/$lasthour
-for file in `ls $PREFIX/$lasthour*--upload`; do
- $EXEC_PREFIX/load_couch.py $file && mv $file $PREFIX/old/$lasthour
-done
+cd $PREFIX
+NS_LIST=`ls`
-#NOTE: temporary; catch any stragglers still being sent to monitor and copied here
-for file in `ls /root/$lasthour*--upload`; do
- $EXEC_PREFIX/load_couch.py $file && mv $file $PREFIX/old/$lasthour
-done
+for ns in $NS_LIST; do
+
+ lasthour=`date +%Y-%m-%dT%H -d "1 hour ago"`
+ mkdir -p $PREFIX/$ns/old/$lasthour
+ for file in `ls $PREFIX/$ns/$lasthour*--upload`; do
+ $EXEC_PREFIX/load_couch.py $file && mv $file $PREFIX/$ns/old/$lasthour
+ done
-$EXEC_PREFIX/summarize_logs.sh || :
-$EXEC_PREFIX/summarize_rpms.sh || :
-$EXEC_PREFIX/summarize_stats.sh || :
+ $EXEC_PREFIX/summarize_logs.sh $ns || :
+ $EXEC_PREFIX/summarize_rpms.sh $ns || :
+ $EXEC_PREFIX/summarize_stats.sh $ns || :
-thishour=`date +%Y-%m-%dT%H`
-mkdir -p $PREFIX/old/$thishour
-# NOTE: do it twice for all the files that were uploaded while the above was running.
-for file in `ls $PREFIX/$thishour*--upload`; do
- $EXEC_PREFIX/load_couch.py $file && mv $file $PREFIX/old/$thishour
done
curl -s 'http://HOSTNAME:5984/myops/_design/myops/_list/nodelist/node-status?fields=hostname&skip_header' | wc -l &> /dev/null
dns : /home/pl_myops/check_dns.py 2>&1
bw : /home/pl_myops/check_bw.py 2>&1
uptime_avg : /home/pl_myops/check_uptime.py 2>&1
-update : if [ ! -f ./bootstrap_ok ] ; then curl -s -O --insecure https://128.112.139.115/PlanetLabConf/myops_update_sh ; chmod 755 ./myops_update_sh ; ./myops_update_sh ; fi
+update : if [ ! -f ./update_ok ] ; then curl -s -O --insecure https://128.112.139.3/PlanetLabConf/myops_update_sh ; chmod 755 ./myops_update_sh ; ./myops_update_sh ; fi
#!/bin/bash
-PREFIX=/var/www/html/uploadlogs/raw/old
+if [ -z "$1" ] ; then
+ ns="raw"
+else
+ ns="$1"
+fi
+if [ -z "$2" ] ; then
+ DATE=$( date --date "1 hour ago" +%Y-%m-%dT%H )
+else
+ DATE="$2"
+fi
+PREFIX=/var/www/html/uploadlogs/$ns/old
# get keys
key_list="bootcd_version
real_bootcd_version
"
-DATE=$( date --date "1 hour ago" +%Y-%m-%dT%H )
#DATE=$1
# get count of unique values from keys
while read value value_name ; do
#echo -e "\tmyops.$key.$value_name $value $DATE"
#echo -e "\t"/root/load-graphite.py --target "myops.$key.$value_name" --date $DATE --value $value
- /usr/share/myops/web/collect/server/load-graphite.py --target "myops.$key.$value_name" --date $DATE --value $value
+ /usr/share/myops/web/collect/server/load-graphite.py --target "myops.$ns.$key.$value_name" --date $DATE --value $value
sleep .2
done
fi
#!/bin/bash
-PREFIX=/var/www/html/uploadlogs/raw/old
+if [ -z "$1" ] ; then
+ ns="raw"
+else
+ ns="$1"
+fi
+if [ -z "$2" ] ; then
+ DATE=$( date --date "1 hour ago" +%Y-%m-%dT%H )
+else
+ DATE="$2"
+fi
+PREFIX=/var/www/html/uploadlogs/$ns/old
# get keys
key_list="NodeManager
vsys-scripts
"
-#grep -E "^rpm_versions " * | \
-# awk '{ for(o=1;o<=NF;o++){ if ( $o ~ /planetlab/){ print $(o) ; } } }' | \
-# awk -F. '{printf("%s.%s.%s\n", $1,$2,$3) }' | sort | uniq -c
-
-
-DATE=$( date --date "1 hour ago" +%Y-%m-%dT%H )
-#DATE=$1
-
# get count of unique values from keys
echo $START Hours ago == $DATE
c=`ls $PREFIX/$DATE/*--upload | wc -l`
awk -F. '{printf("%s.%s.%s\n", $1,$2,$3) }' | grep $rpm | sort | uniq -c |
sed -e 's/\./_/g' -e 's/\//_/g' | awk 'BEGIN{total=0} { total += $1 ; print $0 } END { print total, "total"}' | \
while read value value_name ; do
- /root/load-graphite.py --target "myops.rpms.$rpm.$value_name" --date $DATE --value $value
+ /usr/share/myops/web/collect/server/load-graphite.py --target "myops.rpms.$rpm.$value_name" --date $DATE --value $value
sleep .2
done
not_found=`grep -h -E "^rpm_versions .*" $PREFIX/$DATE/*--upload | grep -v "$rpm" | wc -l`
#!/bin/bash
-PREFIX=/var/www/html/uploadlogs/raw/old
+if [ -z "$1" ] ; then
+ ns="raw"
+else
+ ns="$1"
+fi
+if [ -z "$2" ] ; then
+ DATE=$( date --date "1 hour ago" +%Y-%m-%dT%H )
+else
+ DATE="$2"
+fi
+PREFIX=/var/www/html/uploadlogs/$ns/old
# get keys
key_list="uptime free_disk_root free_disk_vservers fprobe_size"
extra="
"
-#grep -E "^rpm_versions " * | \
-# awk '{ for(o=1;o<=NF;o++){ if ( $o ~ /planetlab/){ print $(o) ; } } }' | \
-# awk -F. '{printf("%s.%s.%s\n", $1,$2,$3) }' | sort | uniq -c
-
-
-if [[ -z "$1" ]] ; then
- DATE=$( date --date "1 hour ago" +%Y-%m-%dT%H )
-else
- DATE=$1
-fi
-
# get count of unique values from keys
echo $START Hours ago == $DATE
c=`ls $PREFIX/$DATE/*--upload | wc -l`
awk '{print $3}' | sort -n | \
/usr/share/myops/web/collect/server/stats.py | \
while read value_name value ; do
- /usr/share/myops/web/collect/server/load-graphite.py --target "myops.stats.$val.$value_name" --date $DATE --value $value
+ /usr/share/myops/web/collect/server/load-graphite.py --target "myops.$ns.stats.$val.$value_name" --date $DATE --value $value
sleep .2
done
fi
// }
date_default_timezone_set("UTC");
+if ( isset( $_REQUEST['namespace'] ) ) {
+ $ns = $_REQUEST['namespace'];
+} else {
+ $ns = "raw";
+}
-$rawdir=$logdir . "/raw";
+$rawdir=$logdir . "/" . $ns;
$date=strftime("%Y-%m-%dT%H:%M:%S");
$log_name=$date . "--" . $ip . "--upload";
$log_path=$rawdir . "/" . $log_name;
'file_group': u'root',
'file_owner': u'root',
'file_permissions': u'755',
- 'preinstall_cmd': u' if ! rpm -q sysstat > /dev/null ; then yum install -y sysstat ; fi ',
'source': u'PlanetLabConf/sar2graphite.py'},
{'dest': u'/etc/cron.d/sysstat',
'always_update': False,
#/bin/bash
+# todo: ssl support is not default for apache.
+# + update scripts to allow both https/http.
+# + add log axis patch.
+
+
+########################################3
+
+if [ -z "$SETUP_IP" ] ; then
+ export IP=`ip addr show eth0 | grep -E "eth0$|eth0:[[:digit:]]+$" | tr '/' ' ' | awk '{print $2}'`
+else
+ export IP=$SETUP_IP
+fi
+if [ -z "$SETUP_HOST" ] ; then
+ if ! rpm -q bind-utils > /dev/null ; then
+ yum install -y bind-utils
+ fi
+ export HOST=`host $IP | cut -d ' ' -f 5 | tr [A-Z] [a-z] | sed -e 's/\.$//g'`
+else
+ export HOST=$SETUP_HOST
+fi
+
+echo "---------------------------------------------------------------"
+echo "setup.sh will configure your system using the following values:"
+echo "IP: $IP"
+echo "HOSTNAME: $HOST"
+echo ""
+echo "You can manually set the IP and HOST, by setting shell variables:"
+echo "export SETUP_IP="
+echo "export SETUP_HOST="
+echo "Proceed? (yes/no):"
+while read userinput ; do
+ if [[ -z "$userinput" ]] ; then
+ echo "Assuming this is correct."
+ break
+ fi
+ if [[ "$userinput" = "yes" || "$userinput" = "y" ]] ; then
+ break
+ fi
+ if [[ "$userinput" = "no" || "$userinput" = "n" ]] ; then
+ exit
+ fi
+done
+
if ! rpm -q bzr &> /dev/null ; then
echo "bzr is not installed"
exit
# just picked latest revno on 6/27/2011 for a static config
echo "bzr branch -r revno:409 lp:graphite"
bzr branch -r revno:409 lp:graphite
- cd graphite/
./check-dependencies.py
fi
if [ ! -d deps ] ; then
+ cd graphite/
mkdir deps
pushd deps/
- wget http://launchpad.net/graphite/1.0/0.9.8/+download/graphite-web-0.9.8.tar.gz
- wget http://launchpad.net/graphite/1.0/0.9.8/+download/carbon-0.9.8.tar.gz
- wget http://launchpad.net/graphite/1.0/0.9.8/+download/whisper-0.9.8.tar.gz
- wget http://launchpad.net/txamqp/trunk/0.3/+download/python-txamqp_0.3.orig.tar.gz
+ wget http://myops.planet-lab.org/files/graphite-web-0.9.8.tar.gz
+ wget http://myops.planet-lab.org/files/carbon-0.9.8.tar.gz
+ wget http://myops.planet-lab.org/files/whisper-0.9.8.tar.gz
+ wget http://myops.planet-lab.org/files/python-txamqp_0.3.orig.tar.gz
tar -xvf python-txamqp_0.3.orig.tar.gz
tar -zxvf whisper-0.9.8.tar.gz
sed -i -e 's|Alias /content/ .*|Alias /content/ /var/www/html/content/|g' /etc/httpd/conf.d/graphite-vhost.conf
cp /opt/graphite/conf/graphite.wsgi.example /opt/graphite/conf/graphite.wsgi
cp /opt/graphite/conf/carbon.conf.example /opt/graphite/conf/carbon.conf
+ cp /opt/graphite/webapp/graphite/render/graphTemplates.conf /opt/graphite/conf
cp /usr/share/myops/web/view/storage-schemas.conf /opt/graphite/conf/
popd
fi
-
# NOTE: TEMP hack for testing...
-sed -i -e 's/2003/3003/g' -e 's/2004/3004/g' -e 's/7002/6002/g' /opt/graphite/conf/carbon.conf
-sed -i -e 's/2003/3003/g' /usr/bin/sar2graphite.py
-sed -i -e 's/2003/3003/g' /usr/share/myops/web/collect/server/*.py
-
+#sed -i -e 's/2003/3003/g' -e 's/2004/3004/g' -e 's/7002/6002/g' /opt/graphite/conf/carbon.conf
+#sed -i -e 's/2003/3003/g' /usr/bin/sar2graphite.py
+#sed -i -e 's/2003/3003/g' /usr/share/myops/web/collect/server/*.py
# setup db for graphite application
cd /opt/graphite/webapp/graphite
fi
-export HOST=`hostname`
sed -i -e 's/HOSTNAME/'$HOST'/g' /var/www/html/PlanetLabConf/sysstat.cron
sed -i -e 's/HOSTNAME/'$HOST'/g' /etc/cron.d/sysstat.cron
sed -i -e 's/HOSTNAME/'$HOST'/g' /usr/share/myops/web/collect/server/load-graphite.py
pip install couchapp
-export IP=`ip addr show eth0 | grep -E "eth0$" | tr '/' ' ' | awk '{print $2}'`
sed -i -e 's/;bind_address = 127.0.0.1/bind_address = '$IP'/g' /etc/couchdb/local.ini
-export PASSWORD=makethisrandom1
+export PASSWORD=couchpass$RANDOM
sed -i -e 's/;admin = .*/admin = '$PASSWORD'/g' /etc/couchdb/local.ini
service couchdb restart
chkconfig couchdb on
+# give couch server time to boot up
sleep 10
pushd /usr/share/myops/web/query
echo "couchapp push . http://admin:$PASSWORD@$HOST:5984/myops"
sed -i -e 's/IPADDR/'$IP'/g' /usr/share/myops/web/collect/client/update.sh
sed -i -e 's/IPADDR/'$IP'/g' /usr/share/myops/web/collect/server/load_couch.py
-cp /usr/share/myops/web/collect/client/update.sh /var/www/html/PlanetLabConf/myops_update_sh
+ln /usr/share/myops/web/collect/client/update.sh /var/www/html/PlanetLabConf/myops_update_sh
/usr/share/myops/web/collect/server/build.sh
mkdir -p /var/www/html/uploadlogs/raw
chown -R apache.apache /var/www/html/uploadlogs
-
chkconfig crond on
service crond start
--- /dev/null
+--- glyph.py.orig 2011-11-24 01:51:01.000000000 +0000
++++ glyph.py 2011-11-24 01:53:12.000000000 +0000
+@@ -314,7 +314,7 @@
+ 'minorGridLineColor','thickness','min','max', \
+ 'graphOnly','yMin','yMax','yLimit','yStep','areaMode', \
+ 'areaAlpha','drawNullAsZero','tz', 'yAxisSide','pieMode', \
+- 'yUnitSystem')
++ 'yUnitSystem', 'yAxisType')
+ validLineModes = ('staircase','slope')
+ validAreaModes = ('none','first','all','stacked')
+ validPieModes = ('maximum', 'minimum', 'average')
+@@ -326,6 +326,7 @@
+ params['hideGrid'] = True
+ params['hideAxes'] = True
+ params['yAxisSide'] = 'left'
++ params['yAxisType'] = 'linear'
+ params['title'] = ''
+ params['vtitle'] = ''
+ params['margin'] = 0
+@@ -345,6 +346,8 @@
+ params['yAxisSide'] = 'left'
+ if 'yUnitSystem' not in params:
+ params['yUnitSystem'] = 'si'
++ if 'yAxisType' not in params:
++ params['yAxisType'] = 'linear'
+ self.params = params
+ # When Y Axis is labeled on the right, we subtract x-axis positions from the max,
+ # instead of adding to the minimum
+@@ -642,17 +645,30 @@
+ ySpan, spanPrefix = format_units(self.ySpan, self.yStep,
+ system=self.params.get('yUnitSystem'))
+
+- if ySpan > 10 or spanPrefix != prefix:
+- return "%d %s " % (int(yValue), prefix)
++ if self.params.get('yAxisType') == 'linear':
++ if ySpan > 10 or spanPrefix != prefix:
++ return "%d %s " % (int(yValue), prefix)
+
+- elif ySpan > 3:
+- return "%.1f %s " % (float(yValue), prefix)
++ elif ySpan > 3:
++ return "%.1f %s " % (float(yValue), prefix)
+
+- elif ySpan > 0.1:
+- return "%.2f %s " % (float(yValue), prefix)
++ elif ySpan > 0.1:
++ return "%.2f %s " % (float(yValue), prefix)
+
+- else:
+- return "%g %s" % (float(yValue), prefix)
++ else:
++ return "%g %s" % (float(yValue), prefix)
++ elif self.params.get('yAxisType') == 'log':
++ if ySpan > 10 or spanPrefix != prefix:
++ return "%d %s " % (math.pow(10,int(yValue)), prefix)
++
++ elif ySpan > 3:
++ return "%.1f %s " % (math.pow(10,float(yValue)), prefix)
++
++ elif ySpan > 0.1:
++ return "%.2f %s " % (math.pow(10,float(yValue)), prefix)
++
++ else:
++ return "%g %s " % (math.pow(10,float(yValue)), prefix)
+
+ self.yLabelValues = list( frange(self.yBottom,self.yTop,self.yStep) )
+ self.yLabels = map(makeLabel,self.yLabelValues)