from bigquery_analytics import BigQueryAnalytics
+import datetime
+import re
import os
import sys
+import time
import json
import traceback
+import urllib2
if os.path.exists("/home/smbaker/projects/vicci/plstackapi/planetstack"):
sys.path.append("/home/smbaker/projects/vicci/plstackapi/planetstack")
BLUE_LOAD=5000000
RED_LOAD=15000000
+glo_cached_queries = {}
+
class PlanetStackAnalytics(BigQueryAnalytics):
def __init__(self, tableName="demoevents"):
BigQueryAnalytics.__init__(self, tableName)
return [slice.name for slice in slices]
- def compose_query(self, slice=None, site=None, node=None, service=None, timeField="MinuteTime", avg=[], sum=[], count=[], computed=[], groupBy=["MinuteTime"], orderBy=["MinuteTime"], tableName="demoevents"):
- tablePart = "%s.%s@-3600000--1" % ("vicci", tableName)
+ def compose_query(self, slice=None, site=None, node=None, service=None, timeBucket="60", avg=[], sum=[], count=[], computed=[], val=[], groupBy=["Time"], orderBy=["Time"], tableName="demoevents", latest=False, max_age=60*60):
+ max_age = max_age * 1000
+ tablePart = "[%s.%s@-%d--1]" % ("vicci", tableName, max_age)
fields = []
fieldNames = []
+ srcFieldNames = ["time"]
- if (timeField=="MinuteTime"):
- fields.append("INTEGER(TIMESTAMP_TO_SEC(time)/60)*60 as MinuteTime")
- elif (timeField=="HourTime"):
- fields.append("INTEGER(TIMESTAMP_TO_SEC(time)/60/60)*60*60 as HourTime")
- elif (timeField=="DayTime"):
- fields.append("INTEGER(TIMESTAMP_TO_SEC(time)/60/60/24)*60*60*24 as DayTime")
+ fields.append("SEC_TO_TIMESTAMP(INTEGER(TIMESTAMP_TO_SEC(time)/%s)*%s) as Time" % (str(timeBucket),str(timeBucket)))
+ #fields.append("INTEGER(TIMESTAMP_TO_SEC(time)/%s)*%s as Time" % (str(timeBucket),str(timeBucket)))
for fieldName in avg:
fields.append("AVG(%s) as avg_%s" % (fieldName, fieldName.replace("%","")))
fieldNames.append("avg_%s" % fieldName.replace("%",""))
+ srcFieldNames.append(fieldName)
for fieldName in sum:
fields.append("SUM(%s) as sum_%s" % (fieldName, fieldName.replace("%","")))
fieldNames.append("sum_%s" % fieldName.replace("%",""))
+ srcFieldNames.append(fieldName)
for fieldName in count:
fields.append("COUNT(distinct %s) as count_%s" % (fieldName, fieldName.replace("%","")))
fieldNames.append("count_%s" % fieldName.replace("%",""))
+ srcFieldNames.append(fieldName)
+
+ for fieldName in val:
+ fields.append(fieldName)
+ fieldNames.append(fieldName)
+ srcFieldNames.append(fieldName)
for fieldName in computed:
operator = "/"
computedFieldName = "computed_" + parts[0].replace("%","")+"_mult_"+parts[1].replace("%","")
fields.append("SUM(%s)%sSUM(%s) as %s" % (parts[0], operator, parts[1], computedFieldName))
fieldNames.append(computedFieldName)
+ srcFieldNames.append(parts[0])
+ srcFieldNames.append(parts[1])
for fieldName in groupBy:
- if (fieldName not in ["MinuteTime", "HourTime", "DayTime"]):
+ if (fieldName not in ["Time"]):
fields.append(fieldName)
fieldNames.append(fieldName)
+ srcFieldNames.append(fieldName)
fields = ", ".join(fields)
else:
orderBy = ""
+ if latest:
+ latestFields = ["table1.%s as %s" % (x,x) for x in srcFieldNames]
+ latestFields = ", ".join(latestFields)
+ tablePart = """(SELECT %s FROM %s AS table1
+ JOIN
+ (SELECT %%hostname, event, max(time) as maxtime from %s GROUP BY %%hostname, event) AS latest
+ ON
+ table1.%%hostname = latest.%%hostname AND table1.event = latest.event AND table1.time = latest.maxtime)""" % (latestFields, tablePart, tablePart)
+
if computed:
- subQuery = "SELECT %%hostname, %s FROM [%s]" % (fields, tablePart)
+ subQuery = "SELECT %%hostname, %s FROM %s" % (fields, tablePart)
if where:
subQuery = subQuery + where
subQuery = subQuery + groupBySub
- #subQuery = subQuery + " GROUP BY %s,%%hostname" % timeField
sumFields = []
for fieldName in fieldNames:
sumFields = ",".join(sumFields)
- query = "SELECT %s, %s FROM (%s)" % (timeField, sumFields, subQuery)
+ query = "SELECT %s, %s FROM (%s)" % ("Time", sumFields, subQuery)
if groupBy:
query = query + groupBy
if orderBy:
query = query + orderBy
else:
- query = "SELECT %s FROM [%s]" % (fields, tablePart)
+ query = "SELECT %s FROM %s" % (fields, tablePart)
if where:
query = query + " " + where
if groupBy:
value = req.GET.get(name, None)
if not value:
return default
+ value=value.replace("@","%")
return value.split(",")
- def format_result(self, format, result, query):
+ def format_result(self, format, result, query, dataSourceUrl):
if (format == "json_dicts"):
- result = {"query": query, "rows": result}
+ result = {"query": query, "rows": result, "dataSourceUrl": dataSourceUrl}
return ("application/javascript", json.dumps(result))
elif (format == "json_arrays"):
return ("text/html", new_result)
- elif (format == "json_hpcdash"):
- new_rows = {}
- for row in result:
- new_row = {"lat": float(row.get("lat", 0)),
- "long": float(row.get("long", 0)),
- "health": 0,
- "numNodes": int(row.get("numNodes",0)),
- "numHPCSlivers": int(row.get("sum_count_hostname", 0)),
- "siteUrl": row.get("url", ""),
- "hot": float(row.get("hotness", 0.0)),
- "load": int(float(row.get("max_avg_cpu", 0)))}
- new_rows[row["site"]] = new_row
- return ("application/javascript", json.dumps(new_rows))
-
- def only_largest(self, rows, fieldName):
- """ Given a fieldName, only return the set of rows that had the
- maximum value of that fieldName.
- """
- maxVal = max( [int(row[fieldName]) for row in rows] )
- new_rows = [row for row in rows if int(row[fieldName])==maxVal]
- return new_rows
-
- def merge_datamodel_sites(self, rows):
+ def merge_datamodel_sites(self, rows, slice=None):
""" For a query that included "site" in its groupby, merge in the
opencloud site information.
"""
+
+ if slice:
+ try:
+ slice = Slice.objects.get(name=slice)
+ except:
+ slice = None
+
for row in rows:
sitename = row["site"]
try:
# we didn't find it in the data model
continue
+ allocated_slivers = 0
+ if model_site and slice:
+ for sliver in slice.slivers.all():
+ if sliver.node.site == model_site:
+ allocated_slivers = allocated_slivers + 1
+
row["lat"] = float(model_site.location.latitude)
row["long"] = float(model_site.location.longitude)
row["url"] = model_site.site_url
row["numNodes"] = model_site.nodes.count()
+ row["allocated_slivers"] = allocated_slivers
+
+ max_cpu = row.get("max_avg_cpu", row.get("max_cpu",0))
+ cpu=float(max_cpu)/100.0
+ row["hotness"] = max(0.0, ((cpu*RED_LOAD) - BLUE_LOAD)/(RED_LOAD-BLUE_LOAD))
+
+ def compose_latest_query(self, fieldNames=None, groupByFields=["%hostname", "event"]):
+ """ Compose a query that returns the 'most recent' row for each (hostname, event)
+ pair.
+ """
+
+ if not fieldNames:
+ fieldNames = ["%hostname", "%bytes_sent", "time", "event", "%site", "%elapsed", "%slice", "%cpu"]
+
+ fields = ["table1.%s AS %s" % (x,x) for x in fieldNames]
+ fields = ", ".join(fields)
+
+ tableDesc = "%s.%s" % (self.projectName, self.tableName)
+
+ groupByOn = ["table1.time = latest.maxtime"]
+ for field in groupByFields:
+ groupByOn.append("table1.%s = latest.%s" % (field, field))
+
+ groupByOn = " AND ".join(groupByOn)
+ groupByFields = ", ".join(groupByFields)
- if "max_avg_cpu" in row:
- cpu=float(row["max_avg_cpu"])/100.0
- row["hotness"] = max(0.0, ((cpu*RED_LOAD) - BLUE_LOAD)/(RED_LOAD-BLUE_LOAD))
+ base_query = "SELECT %s FROM [%s@-3600000--1] AS table1 JOIN (SELECT %s, max(time) as maxtime from [%s@-3600000--1] GROUP BY %s) AS latest ON %s" % \
+ (fields, tableDesc, groupByFields, tableDesc, groupByFields, groupByOn)
+
+ return base_query
+
+ def get_cached_query_results(self, q, wait=True):
+ global glo_cached_queries
+
+ if q in glo_cached_queries:
+ if (time.time() - glo_cached_queries[q]["time"]) <= 60:
+ print "using cached query"
+ return glo_cached_queries[q]["rows"]
+
+ if not wait:
+ return None
+
+ print "refreshing cached query"
+ result = self.run_query(q)
+ glo_cached_queries[q] = {"time": time.time(), "rows": result}
+
+ return result
def process_request(self, req):
print req.GET
- tqx = req.GET.get("reqId", None)
+ tqx = req.GET.get("tqx", None)
slice = req.GET.get("slice", None)
site = req.GET.get("site", None)
format = req.GET.get("format", "json_dicts")
- timeField = req.GET.get("timeField", "MinuteTime")
+ timeField = req.GET.get("timeBucket", "60")
avg = self.get_list_from_req(req, "avg")
sum = self.get_list_from_req(req, "sum")
count = self.get_list_from_req(req, "count")
computed = self.get_list_from_req(req, "computed")
- groupBy = self.get_list_from_req(req, "groupBy", ["MinuteTime"])
- orderBy = self.get_list_from_req(req, "orderBy", ["MinuteTime"])
+ groupBy = self.get_list_from_req(req, "groupBy", ["Time"])
+ orderBy = self.get_list_from_req(req, "orderBy", ["Time"])
maxRows = req.GET.get("maxRows", None)
- onlyLargest = req.GET.get("onlyLargest", None)
mergeDataModelSites = req.GET.get("mergeDataModelSites", None)
- q = self.compose_query(slice, site, node, service, timeField, avg, sum, count, computed, groupBy, orderBy)
+ cached = req.GET.get("cached", None)
+
+ q = self.compose_query(slice, site, node, service, timeField, avg, sum, count, computed, [], groupBy, orderBy)
print q
- if (format=="raw"):
+ dataSourceUrl = "http://" + req.META["SERVER_NAME"] + ":" + req.META["SERVER_PORT"] + req.META["PATH_INFO"] + "?" + req.META["QUERY_STRING"].replace("format=","origFormat=").replace("%","%25") + "&format=charts";
+
+ if (format=="dataSourceUrl"):
+ result = {"dataSourceUrl": dataSourceUrl}
+ return ("application/javascript", result)
+
+ elif (format=="raw"):
result = self.run_query_raw(q)
- result["reqId"] = 0 # XXX FIXME
- return ("application/javascript", json.dumps(result))
+ result["dataSourceUrl"] = dataSourceUrl
+
+ result = json.dumps(result);
+
+ return ("application/javascript", result)
+
+ elif (format=="nodata"):
+ result = {"dataSourceUrl": dataSourceUrl, "query": q}
+ result = json.dumps(result);
+ return {"application/javascript", result}
+
+ elif (format=="charts"):
+ bq_result = self.run_query_raw(q)
+
+ # cloudscrutiny code is probably better!
+ table = {}
+ table["cols"] = self.schema_to_cols(bq_result["schema"])
+ rows = []
+ if "rows" in bq_result:
+ for row in bq_result["rows"]:
+ rowcols = []
+ for (colnum,col) in enumerate(row["f"]):
+ if (colnum==0):
+ dt = datetime.datetime.fromtimestamp(float(col["v"]))
+ rowcols.append({"v": 'new Date("%s")' % dt.isoformat()})
+ else:
+ try:
+ rowcols.append({"v": float(col["v"])})
+ except:
+ rowcols.append({"v": col["v"]})
+ rows.append({"c": rowcols})
+ table["rows"] = rows
+
+ if tqx:
+ reqId = tqx.strip("reqId:")
+ else:
+ reqId = "0"
+
+ result = {"status": "okColumnChart", "reqId": reqId, "table": table, "version": "0.6"}
+
+ result = "google.visualization.Query.setResponse(" + json.dumps(result) + ");"
+
+ def unquote_it(x): return x.group()[1:-1].replace('\\"', '"')
+
+ p = re.compile(r'"new Date\(\\"[^"]*\\"\)"')
+ result=p.sub(unquote_it, result)
+
+ return ("application/javascript", result)
+
else:
- result = self.run_query(q)
+ if cached:
+ results = self.get_cached_query_results(self.compose_latest_query())
- if onlyLargest:
- result = self.only_largest(result, onlyLargest)
+ filter={}
+ if slice:
+ filter["slice"] = slice
+ if site:
+ filter["site"] = site
+ if node:
+ filter["hostname"] = node
- if mergeDataModelSites:
- self.merge_datamodel_sites(result)
+ result = self.postprocess_results(results, filter=filter, sum=sum, count=count, avg=avg, computed=computed, maxDeltaTime=120, groupBy=["doesnotexist"])
+ else:
+ result = self.run_query(q)
if maxRows:
result = result[-int(maxRows):]
- return self.format_result(format, result, q)
+ if mergeDataModelSites:
+ self.merge_datamodel_sites(result)
+ return self.format_result(format, result, q, dataSourceUrl)
def DoPlanetStackAnalytics(request):
bq = PlanetStackAnalytics()
def main():
bq = PlanetStackAnalytics()
- """
- q=bq.compose_query(avg=["%cpu"], count=["%hostname"], slice="HyperCache")
+ q = bq.compose_latest_query(groupByFields=["%hostname", "event", "%slice"])
+ results = bq.run_query(q)
+
+ #results = bq.postprocess_results(results,
+ # filter={"slice": "HyperCache"},
+ # groupBy=["site"],
+ # computed=["bytes_sent/elapsed"],
+ # sum=["bytes_sent", "computed_bytes_sent_div_elapsed"], avg=["cpu"],
+ # maxDeltaTime=60)
+
+ results = bq.postprocess_results(results, filter={"slice": "HyperCache"}, maxi=["cpu"], count=["hostname"], computed=["bytes_sent/elapsed"], groupBy=["Time", "site"], maxDeltaTime=80)
+
+ bq.dump_table(results)
+
+ sys.exit(0)
+
+ q=bq.compose_query(sum=["%bytes_sent"], avg=["%cpu"], latest=True, groupBy=["Time", "%site"])
print q
bq.dump_table(bq.run_query(q))
- q=bq.compose_query(computed=["%bytes_sent/%elapsed"])
- print
+ q=bq.compose_query(avg=["%cpu","%bandwidth"], count=["%hostname"], slice="HyperCache")
print q
bq.dump_table(bq.run_query(q))
- q=bq.compose_query(timeField="HourTime", avg=["%cpu"], count=["%hostname"], computed=["%bytes_sent/%elapsed"], groupBy=["HourTime"], orderBy=["HourTime"])
+ q=bq.compose_query(computed=["%bytes_sent/%elapsed"])
print
print q
bq.dump_table(bq.run_query(q))
- """
- q=bq.compose_query(avg=["%cpu"], count=["%hostname"], computed=["%bytes_sent/%elapsed"], service="HPC Service", groupBy=["MinuteTime","%site"])
+ q=bq.compose_query(timeBucket=60*60, avg=["%cpu"], count=["%hostname"], computed=["%bytes_sent/%elapsed"])
print
print q
- result=bq.run_query(q)
- result = bq.only_largest(result, "MinuteTime")
- bq.merge_datamodel_sites(result)
- #bq.dump_table(result)
- print bq.format_result("json_hpcdash", result, q)
+ bq.dump_table(bq.run_query(q))
if __name__ == "__main__":
main()