1 from bigquery_analytics import BigQueryAnalytics
11 if os.path.exists("/home/smbaker/projects/vicci/plstackapi/planetstack"):
12 sys.path.append("/home/smbaker/projects/vicci/plstackapi/planetstack")
14 sys.path.append("/opt/planetstack")
16 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "planetstack.settings")
18 from django.db import connection
19 from core.models import Slice, Sliver, ServiceClass, Reservation, Tag, Network, User, Node, Image, Deployment, Site, NetworkTemplate, NetworkSlice, Service
24 glo_cached_queries = {}
26 class PlanetStackAnalytics(BigQueryAnalytics):
27 def __init__(self, tableName="demoevents"):
28 BigQueryAnalytics.__init__(self, tableName)
30 def service_to_sliceNames(self, serviceName):
31 service=Service.objects.get(name=serviceName)
33 slices = service.slices.all()
35 # BUG in data model -- Slice.service has related name 'service' and
36 # it should be 'slices'
37 slices = service.service.all()
39 return [slice.name for slice in slices]
41 def compose_query(self, slice=None, site=None, node=None, service=None, timeBucket="60", avg=[], sum=[], count=[], computed=[], val=[], groupBy=["Time"], orderBy=["Time"], tableName="demoevents", latest=False, max_age=60*60):
42 max_age = max_age * 1000
43 tablePart = "[%s.%s@-%d--1]" % ("vicci", tableName, max_age)
47 srcFieldNames = ["time"]
49 fields.append("SEC_TO_TIMESTAMP(INTEGER(TIMESTAMP_TO_SEC(time)/%s)*%s) as Time" % (str(timeBucket),str(timeBucket)))
50 #fields.append("INTEGER(TIMESTAMP_TO_SEC(time)/%s)*%s as Time" % (str(timeBucket),str(timeBucket)))
53 fields.append("AVG(%s) as avg_%s" % (fieldName, fieldName.replace("%","")))
54 fieldNames.append("avg_%s" % fieldName.replace("%",""))
55 srcFieldNames.append(fieldName)
58 fields.append("SUM(%s) as sum_%s" % (fieldName, fieldName.replace("%","")))
59 fieldNames.append("sum_%s" % fieldName.replace("%",""))
60 srcFieldNames.append(fieldName)
62 for fieldName in count:
63 fields.append("COUNT(distinct %s) as count_%s" % (fieldName, fieldName.replace("%","")))
64 fieldNames.append("count_%s" % fieldName.replace("%",""))
65 srcFieldNames.append(fieldName)
68 fields.append(fieldName)
69 fieldNames.append(fieldName)
70 srcFieldNames.append(fieldName)
72 for fieldName in computed:
74 parts = fieldName.split("/")
75 computedFieldName = "computed_" + parts[0].replace("%","")+"_div_"+parts[1].replace("%","")
78 parts = computed.split("*")
79 computedFieldName = "computed_" + parts[0].replace("%","")+"_mult_"+parts[1].replace("%","")
80 fields.append("SUM(%s)%sSUM(%s) as %s" % (parts[0], operator, parts[1], computedFieldName))
81 fieldNames.append(computedFieldName)
82 srcFieldNames.append(parts[0])
83 srcFieldNames.append(parts[1])
85 for fieldName in groupBy:
86 if (fieldName not in ["Time"]):
87 fields.append(fieldName)
88 fieldNames.append(fieldName)
89 srcFieldNames.append(fieldName)
91 fields = ", ".join(fields)
96 where.append("%%slice='%s'" % slice)
98 where.append("%%site='%s'" % site)
100 where.append("%%hostname='%s'" % node)
102 sliceNames = self.service_to_sliceNames(service)
104 where.append("(" + " OR ".join(["%%slice='%s'" % sliceName for sliceName in sliceNames]) +")")
107 where = " WHERE " + " AND ".join(where)
112 groupBySub = " GROUP BY " + ",".join(groupBy + ["%hostname"])
113 groupBy = " GROUP BY " + ",".join(groupBy)
115 groupBySub = " GROUP BY %hostname"
119 orderBy = " ORDER BY " + ",".join(orderBy)
124 latestFields = ["table1.%s as %s" % (x,x) for x in srcFieldNames]
125 latestFields = ", ".join(latestFields)
126 tablePart = """(SELECT %s FROM %s AS table1
128 (SELECT %%hostname, event, max(time) as maxtime from %s GROUP BY %%hostname, event) AS latest
130 table1.%%hostname = latest.%%hostname AND table1.event = latest.event AND table1.time = latest.maxtime)""" % (latestFields, tablePart, tablePart)
133 subQuery = "SELECT %%hostname, %s FROM %s" % (fields, tablePart)
135 subQuery = subQuery + where
136 subQuery = subQuery + groupBySub
139 for fieldName in fieldNames:
140 if fieldName.startswith("avg"):
141 sumFields.append("AVG(%s) as avg_%s"%(fieldName,fieldName))
142 sumFields.append("MAX(%s) as max_%s"%(fieldName,fieldName))
143 elif (fieldName.startswith("count")) or (fieldName.startswith("sum")) or (fieldName.startswith("computed")):
144 sumFields.append("SUM(%s) as sum_%s"%(fieldName,fieldName))
146 sumFields.append(fieldName)
148 sumFields = ",".join(sumFields)
150 query = "SELECT %s, %s FROM (%s)" % ("Time", sumFields, subQuery)
152 query = query + groupBy
154 query = query + orderBy
156 query = "SELECT %s FROM %s" % (fields, tablePart)
158 query = query + " " + where
160 query = query + groupBy
162 query = query + orderBy
166 def get_list_from_req(self, req, name, default=[]):
167 value = req.GET.get(name, None)
170 value=value.replace("@","%")
171 return value.split(",")
173 def format_result(self, format, result, query, dataSourceUrl):
174 if (format == "json_dicts"):
175 result = {"query": query, "rows": result, "dataSourceUrl": dataSourceUrl}
176 return ("application/javascript", json.dumps(result))
178 elif (format == "json_arrays"):
182 for key in sorted(row.keys()):
183 new_row.append(row[key])
184 new_result.append(new_row)
185 new_result = {"query": query, "rows": new_result}
186 return ("application/javascript", json.dumps(new_result))
188 elif (format == "html_table"):
192 for key in sorted(row.keys()):
193 new_row.append("<TD>%s</TD>" % str(row[key]))
194 new_rows.append("<TR>%s</TR>" % "".join(new_row))
196 new_result = "<TABLE>%s</TABLE>" % "\n".join(new_rows)
198 return ("text/html", new_result)
200 def merge_datamodel_sites(self, rows, slice=None):
201 """ For a query that included "site" in its groupby, merge in the
202 opencloud site information.
207 slice = Slice.objects.get(name=slice)
212 sitename = row["site"]
214 model_site = Site.objects.get(name=sitename)
216 # we didn't find it in the data model
219 allocated_slivers = 0
220 if model_site and slice:
221 for sliver in slice.slivers.all():
222 if sliver.node.site == model_site:
223 allocated_slivers = allocated_slivers + 1
225 row["lat"] = float(model_site.location.latitude)
226 row["long"] = float(model_site.location.longitude)
227 row["url"] = model_site.site_url
228 row["numNodes"] = model_site.nodes.count()
229 row["allocated_slivers"] = allocated_slivers
231 max_cpu = row.get("max_avg_cpu", row.get("max_cpu",0))
232 cpu=float(max_cpu)/100.0
233 row["hotness"] = max(0.0, ((cpu*RED_LOAD) - BLUE_LOAD)/(RED_LOAD-BLUE_LOAD))
235 def compose_latest_query(self, fieldNames=None, groupByFields=["%hostname", "event"]):
236 """ Compose a query that returns the 'most recent' row for each (hostname, event)
241 fieldNames = ["%hostname", "%bytes_sent", "time", "event", "%site", "%elapsed", "%slice", "%cpu"]
243 fields = ["table1.%s AS %s" % (x,x) for x in fieldNames]
244 fields = ", ".join(fields)
246 tableDesc = "%s.%s" % (self.projectName, self.tableName)
248 groupByOn = ["table1.time = latest.maxtime"]
249 for field in groupByFields:
250 groupByOn.append("table1.%s = latest.%s" % (field, field))
252 groupByOn = " AND ".join(groupByOn)
253 groupByFields = ", ".join(groupByFields)
255 base_query = "SELECT %s FROM [%s@-3600000--1] AS table1 JOIN (SELECT %s, max(time) as maxtime from [%s@-3600000--1] GROUP BY %s) AS latest ON %s" % \
256 (fields, tableDesc, groupByFields, tableDesc, groupByFields, groupByOn)
260 def get_cached_query_results(self, q, wait=True):
261 global glo_cached_queries
263 if q in glo_cached_queries:
264 if (time.time() - glo_cached_queries[q]["time"]) <= 60:
265 print "using cached query"
266 return glo_cached_queries[q]["rows"]
271 print "refreshing cached query"
272 result = self.run_query(q)
273 glo_cached_queries[q] = {"time": time.time(), "rows": result}
277 def process_request(self, req):
280 tqx = req.GET.get("tqx", None)
282 slice = req.GET.get("slice", None)
283 site = req.GET.get("site", None)
284 node = req.GET.get("node", None)
285 service = req.GET.get("service", None)
287 format = req.GET.get("format", "json_dicts")
289 timeField = req.GET.get("timeBucket", "60")
290 avg = self.get_list_from_req(req, "avg")
291 sum = self.get_list_from_req(req, "sum")
292 count = self.get_list_from_req(req, "count")
293 computed = self.get_list_from_req(req, "computed")
294 groupBy = self.get_list_from_req(req, "groupBy", ["Time"])
295 orderBy = self.get_list_from_req(req, "orderBy", ["Time"])
297 maxRows = req.GET.get("maxRows", None)
298 mergeDataModelSites = req.GET.get("mergeDataModelSites", None)
300 cached = req.GET.get("cached", None)
302 q = self.compose_query(slice, site, node, service, timeField, avg, sum, count, computed, [], groupBy, orderBy)
306 dataSourceUrl = "http://" + req.META["SERVER_NAME"] + ":" + req.META["SERVER_PORT"] + req.META["PATH_INFO"] + "?" + req.META["QUERY_STRING"].replace("format=","origFormat=").replace("%","%25") + "&format=charts";
308 if (format=="dataSourceUrl"):
309 result = {"dataSourceUrl": dataSourceUrl}
310 return ("application/javascript", result)
312 elif (format=="raw"):
313 result = self.run_query_raw(q)
314 result["dataSourceUrl"] = dataSourceUrl
316 result = json.dumps(result);
318 return ("application/javascript", result)
320 elif (format=="nodata"):
321 result = {"dataSourceUrl": dataSourceUrl, "query": q}
322 result = json.dumps(result);
323 return {"application/javascript", result}
325 elif (format=="charts"):
326 bq_result = self.run_query_raw(q)
328 # cloudscrutiny code is probably better!
330 table["cols"] = self.schema_to_cols(bq_result["schema"])
332 if "rows" in bq_result:
333 for row in bq_result["rows"]:
335 for (colnum,col) in enumerate(row["f"]):
337 dt = datetime.datetime.fromtimestamp(float(col["v"]))
338 rowcols.append({"v": 'new Date("%s")' % dt.isoformat()})
341 rowcols.append({"v": float(col["v"])})
343 rowcols.append({"v": col["v"]})
344 rows.append({"c": rowcols})
348 reqId = tqx.strip("reqId:")
352 result = {"status": "okColumnChart", "reqId": reqId, "table": table, "version": "0.6"}
354 result = "google.visualization.Query.setResponse(" + json.dumps(result) + ");"
356 def unquote_it(x): return x.group()[1:-1].replace('\\"', '"')
358 p = re.compile(r'"new Date\(\\"[^"]*\\"\)"')
359 result=p.sub(unquote_it, result)
361 return ("application/javascript", result)
365 results = self.get_cached_query_results(self.compose_latest_query())
369 filter["slice"] = slice
371 filter["site"] = site
373 filter["hostname"] = node
375 result = self.postprocess_results(results, filter=filter, sum=sum, count=count, avg=avg, computed=computed, maxDeltaTime=120, groupBy=["doesnotexist"])
377 result = self.run_query(q)
380 result = result[-int(maxRows):]
382 if mergeDataModelSites:
383 self.merge_datamodel_sites(result)
385 return self.format_result(format, result, q, dataSourceUrl)
387 def DoPlanetStackAnalytics(request):
388 bq = PlanetStackAnalytics()
389 result = bq.process_request(request)
394 bq = PlanetStackAnalytics()
396 q = bq.compose_latest_query(groupByFields=["%hostname", "event", "%slice"])
397 results = bq.run_query(q)
399 #results = bq.postprocess_results(results,
400 # filter={"slice": "HyperCache"},
402 # computed=["bytes_sent/elapsed"],
403 # sum=["bytes_sent", "computed_bytes_sent_div_elapsed"], avg=["cpu"],
406 results = bq.postprocess_results(results, filter={"slice": "HyperCache"}, maxi=["cpu"], count=["hostname"], computed=["bytes_sent/elapsed"], groupBy=["Time", "site"], maxDeltaTime=80)
408 bq.dump_table(results)
412 q=bq.compose_query(sum=["%bytes_sent"], avg=["%cpu"], latest=True, groupBy=["Time", "%site"])
414 bq.dump_table(bq.run_query(q))
416 q=bq.compose_query(avg=["%cpu","%bandwidth"], count=["%hostname"], slice="HyperCache")
418 bq.dump_table(bq.run_query(q))
420 q=bq.compose_query(computed=["%bytes_sent/%elapsed"])
423 bq.dump_table(bq.run_query(q))
425 q=bq.compose_query(timeBucket=60*60, avg=["%cpu"], count=["%hostname"], computed=["%bytes_sent/%elapsed"])
428 bq.dump_table(bq.run_query(q))
430 if __name__ == "__main__":