1 from config import config
7 from www.printbadnodes import cmpCategoryVal
12 from policy import get_ticket_id, print_stats, close_rt_backoff, reboot_node
13 from rt import is_host_in_rt_tickets
16 # Time to enforce policy
19 # Where to email the summary
20 SUMTO = "soltesz@cs.princeton.edu"
21 TECHEMAIL="tech-%s@sites.planet-lab.org"
22 PIEMAIL="pi-%s@sites.planet-lab.org"
23 SLICEMAIL="%s@slices.planet-lab.org"
24 PLCEMAIL="support@planet-lab.org"
30 PITHRESH = 7 * SPERDAY
31 SLICETHRESH = 7 * SPERDAY
32 # Days before attempting rins again
33 RINSTHRESH = 5 * SPERDAY
35 # Days before calling the node dead.
36 DEADTHRESH = 30 * SPERDAY
37 # Minimum number of nodes up before squeezing
45 from unified_model import *
48 def __init__(self, l_merge):
49 self.merge_list = l_merge
51 # the hostname to loginbase mapping
52 self.plcdb_hn2lb = database.dbLoad("plcdb_hn2lb")
54 # Previous actions taken on nodes.
55 self.act_all = database.if_cached_else(1, "act_all", lambda : {})
56 self.findbad = database.if_cached_else(1, "findbad", lambda : {})
58 self.cache_all = database.if_cached_else(1, "act_all", lambda : {})
65 # read data from findbad and act_all
66 self.mergeActionsAndBadDB()
67 # pass node_records to RT
68 return self.getRecordList()
70 def accumSickSites(self):
72 Take all nodes, from l_diagnose, look them up in the act_all database,
73 and insert them into sickdb[] as:
75 sickdb[loginbase][nodename] = fb_record
77 # look at all problems reported by findbad
78 l_nodes = self.findbad['nodes'].keys()
80 for nodename in l_nodes:
81 if nodename not in self.merge_list:
82 continue # skip this node, since it's not wanted
85 loginbase = self.plcdb_hn2lb[nodename]
86 values = self.findbad['nodes'][nodename]['values']
89 fb_record['nodename'] = nodename
91 fb_record['category'] = values['category']
95 print self.findbad['nodes'][nodename]
98 fb_record['state'] = values['state']
99 fb_record['comonstats'] = values['comonstats']
100 fb_record['plcnode'] = values['plcnode']
101 fb_record['kernel'] = self.getKernel(values['kernel'])
102 fb_record['stage'] = "findbad"
103 fb_record['message'] = None
104 fb_record['bootcd'] = values['bootcd']
105 fb_record['args'] = None
106 fb_record['info'] = None
107 fb_record['time'] = time.time()
108 fb_record['date_created'] = time.time()
110 if loginbase not in self.sickdb:
111 self.sickdb[loginbase] = {}
113 self.sickdb[loginbase][nodename] = fb_record
115 print "Found %d nodes" % count
117 def getKernel(self, unamestr):
124 def mergeActionsAndBadDB(self):
126 - Look at the sick node_records as reported in findbad,
127 - Then look at the node_records in act_all.
129 There are four cases:
130 1) Problem in findbad, no problem in act_all
131 this ok, b/c it just means it's a new problem
132 2) Problem in findbad, problem in act_all
133 -Did the problem get better or worse?
134 -If Same, or Worse, then continue looking for open tickets.
135 -If Better, or No problem, then "back-off" penalties.
136 This judgement may need to wait until 'Diagnose()'
138 3) No problem in findbad, problem in act_all
139 The the node is operational again according to Findbad()
141 4) No problem in findbad, no problem in act_all
142 There won't be a record in either db, so there's no code.
145 sorted_sites = self.sickdb.keys()
147 # look at all problems reported by findbad
148 for loginbase in sorted_sites:
149 d_fb_nodes = self.sickdb[loginbase]
150 sorted_nodes = d_fb_nodes.keys()
152 for nodename in sorted_nodes:
153 fb_record = self.sickdb[loginbase][nodename]
155 if loginbase not in self.mergedb:
156 self.mergedb[loginbase] = {}
158 # take the info either from act_all or fb-record.
159 # if node not in act_all
160 # then take it from fbrecord, obviously.
161 # else node in act_all
162 # if act_all == 0 length (no previous records)
163 # then take it from fbrecord.
165 # take it from act_all.
168 # We must compare findbad state with act_all state
169 if nodename not in self.act_all:
170 # 1) ok, b/c it's a new problem. set ticket_id to null
171 self.mergedb[loginbase][nodename] = {}
172 self.mergedb[loginbase][nodename].update(x)
173 self.mergedb[loginbase][nodename]['ticket_id'] = ""
174 self.mergedb[loginbase][nodename]['prev_category'] = "NORECORD"
176 if len(self.act_all[nodename]) == 0:
177 self.mergedb[loginbase][nodename] = {}
178 self.mergedb[loginbase][nodename].update(x)
179 self.mergedb[loginbase][nodename]['ticket_id'] = ""
180 self.mergedb[loginbase][nodename]['prev_category'] = "NORECORD"
182 y = self.act_all[nodename][0]
183 y['prev_category'] = y['category']
185 self.mergedb[loginbase][nodename] = {}
186 self.mergedb[loginbase][nodename].update(y)
187 self.mergedb[loginbase][nodename]['comonstats'] = x['comonstats']
188 self.mergedb[loginbase][nodename]['category'] = x['category']
189 self.mergedb[loginbase][nodename]['state'] = x['state']
190 self.mergedb[loginbase][nodename]['kernel']=x['kernel']
191 self.mergedb[loginbase][nodename]['bootcd']=x['bootcd']
192 self.mergedb[loginbase][nodename]['plcnode']=x['plcnode']
193 ticket = get_ticket_id(self.mergedb[loginbase][nodename])
194 self.mergedb[loginbase][nodename]['rt'] = mailer.getTicketStatus(ticket)
196 # delete the entry from cache_all to keep it out of case 3)
197 del self.cache_all[nodename]
199 # 3) nodes that remin in cache_all were not identified by findbad.
200 # Do we keep them or not?
201 # NOTE: i think that since the categories are performed before this
202 # step now, and by a monitor-controlled agent.
206 def getRecordList(self):
207 sorted_sites = self.mergedb.keys()
211 # look at all problems reported by merge
212 for loginbase in sorted_sites:
213 d_merge_nodes = self.mergedb[loginbase]
214 for nodename in d_merge_nodes.keys():
215 record = self.mergedb[loginbase][nodename]
216 ret_list.append(record)
221 def __init__(self, record_list, dbTickets, l_ticket_blacklist, target = None):
222 # Time of last update of ticket DB
223 self.record_list = record_list
224 self.dbTickets = dbTickets
226 self.l_ticket_blacklist = l_ticket_blacklist
232 for diag_node in self.record_list:
233 if diag_node != None:
234 host = diag_node['nodename']
235 (b_host_inticket, r_ticket) = is_host_in_rt_tickets(host, \
236 self.l_ticket_blacklist, \
238 diag_node['found_rt_ticket'] = None
240 #logger.debug("RT: found tickets for %s" %host)
241 diag_node['found_rt_ticket'] = r_ticket['ticket_id']
244 if r_ticket is not None:
245 print "Ignoring ticket %s" % r_ticket['ticket_id']
246 # TODO: why do i return the ticket id for a
247 # blacklisted ticket id?
248 #diag_node['found_rt_ticket'] = r_ticket['ticket_id']
249 self.count = self.count + 1
251 ret_list.append(diag_node)
253 #print "RT processed %d nodes with noticket" % self.count
254 #logger.debug("RT filtered %d noticket nodes" % self.count)
258 def __init__(self, record_list):
259 self.record_list = record_list
260 self.plcdb_hn2lb = database.dbLoad("plcdb_hn2lb")
261 self.findbad = database.if_cached_else(1, "findbad", lambda : {})
263 self.diagnose_in = {}
264 self.diagnose_out = {}
267 self.accumSickSites()
269 #logger.debug("Accumulated %d sick sites" % len(self.diagnose_in.keys()))
272 stats = self.diagnoseAll()
273 except Exception, err:
274 print "----------------"
276 print traceback.print_exc()
278 #if config.policysavedb:
281 #print_stats("sites_observed", stats)
282 #print_stats("sites_diagnosed", stats)
283 #print_stats("nodes_diagnosed", stats)
285 return self.diagnose_out
287 def accumSickSites(self):
289 Take all nodes, from l_diagnose, look them up in the diagnose_out database,
290 and insert them into diagnose_in[] as:
292 diagnose_in[loginbase] = [diag_node1, diag_node2, ...]
294 for node_record in self.record_list:
296 nodename = node_record['nodename']
297 loginbase = self.plcdb_hn2lb[nodename]
299 if loginbase not in self.diagnose_in:
300 self.diagnose_in[loginbase] = {}
302 self.diagnose_in[loginbase][nodename] = node_record
306 def diagnoseAll(self):
308 i_sites_diagnosed = 0
309 i_nodes_diagnosed = 0
314 sorted_sites = self.diagnose_in.keys()
316 self.diagnose_out= {}
317 for loginbase in sorted_sites:
318 l_allsites += [loginbase]
320 d_diag_nodes = self.diagnose_in[loginbase]
321 d_act_records = self.__diagnoseSite(loginbase, d_diag_nodes)
322 # store records in diagnose_out, for saving later.
323 self.diagnose_out.update(d_act_records)
325 if len(d_act_records[loginbase]['nodes'].keys()) > 0:
326 i_nodes_diagnosed += (len(d_act_records[loginbase]['nodes'].keys()))
327 i_sites_diagnosed += 1
328 i_sites_observed += 1
330 return {'sites_observed': i_sites_observed,
331 'sites_diagnosed': i_sites_diagnosed,
332 'nodes_diagnosed': i_nodes_diagnosed,
333 'allsites':l_allsites}
337 def __getDaysDown(self, diag_record, nodename):
339 if diag_record['comonstats']['sshstatus'] != "null":
340 daysdown = int(diag_record['comonstats']['sshstatus']) // (60*60*24)
341 elif diag_record['comonstats']['lastcotop'] != "null":
342 daysdown = int(diag_record['comonstats']['lastcotop']) // (60*60*24)
345 last_contact = diag_record['plcnode']['last_contact']
346 if last_contact == None:
347 # the node has never been up, so give it a break
350 diff = now - last_contact
351 daysdown = diff // (60*60*24)
354 def __getStrDaysDown(self, diag_record, nodename):
355 daysdown = self.__getDaysDown(diag_record, nodename)
357 return "(%d days down)"%daysdown
359 return "Unknown number of days"
361 def __getCDVersion(self, diag_record, nodename):
363 #print "Getting kernel for: %s" % diag_record['nodename']
364 cdversion = diag_record['kernel']
367 def __diagnoseSite(self, loginbase, d_diag_nodes):
369 d_diag_nodes are diagnose_in entries.
371 d_diag_site = {loginbase : { 'config' :
378 sorted_nodes = d_diag_nodes.keys()
380 for nodename in sorted_nodes:
381 node_record = d_diag_nodes[nodename]
382 diag_record = self.__diagnoseNode(loginbase, node_record)
384 if diag_record != None:
385 d_diag_site[loginbase]['nodes'][nodename] = diag_record
387 # NOTE: improvement means, we need to act/squeeze and email.
388 #print "DIAG_RECORD", diag_record
389 if 'monitor-end-record' in diag_record['stage'] or \
390 'nmreset' in diag_record['stage']:
391 # print "resetting loginbase!"
392 d_diag_site[loginbase]['config']['squeeze'] = True
393 d_diag_site[loginbase]['config']['email'] = True
395 # print "NO IMPROVEMENT!!!!"
397 pass # there is nothing to do for this node.
399 # NOTE: these settings can be overridden by command line arguments,
400 # or the state of a record, i.e. if already in RT's Support Queue.
401 pf = PersistFlags(loginbase, 1, db='site_persistflags')
402 nodes_up = pf.nodes_up
404 d_diag_site[loginbase]['config']['squeeze'] = True
406 max_slices = self.getMaxSlices(loginbase)
407 num_nodes = pf.nodes_total #self.getNumNodes(loginbase)
408 # NOTE: when max_slices == 0, this is either a new site (the old way)
409 # or an old disabled site from previous monitor (before site['enabled'])
410 if nodes_up < num_nodes and max_slices != 0:
411 d_diag_site[loginbase]['config']['email'] = True
413 if len(d_diag_site[loginbase]['nodes'].keys()) > 0:
414 print "SITE: %20s : %d nodes up, at most" % (loginbase, nodes_up)
418 def diagRecordByCategory(self, node_record):
419 nodename = node_record['nodename']
420 category = node_record['category']
421 state = node_record['state']
422 loginbase = self.plcdb_hn2lb[nodename]
425 if "ERROR" in category: # i.e. "DOWN"
427 diag_record.update(node_record)
428 daysdown = self.__getDaysDown(diag_record, nodename)
430 # format = "DIAG: %20s : %-40s Down only %s days NOTHING DONE"
431 # print format % (loginbase, nodename, daysdown)
434 s_daysdown = self.__getStrDaysDown(diag_record, nodename)
435 diag_record['message'] = emailTxt.mailtxt.newdown
436 diag_record['args'] = {'nodename': nodename}
437 diag_record['info'] = (nodename, s_daysdown, "")
439 #if 'reboot_node_failed' in node_record:
440 # # there was a previous attempt to use the PCU.
441 # if node_record['reboot_node_failed'] == False:
442 # # then the last attempt apparently, succeeded.
443 # # But, the category is still 'ERROR'. Therefore, the
444 # # PCU-to-Node mapping is broken.
445 # #print "Setting message for ERROR node to PCU2NodeMapping: %s" % nodename
446 # diag_record['message'] = emailTxt.mailtxt.pcutonodemapping
447 # diag_record['email_pcu'] = True
449 if diag_record['ticket_id'] == "":
450 diag_record['log'] = "DOWN: %20s : %-40s == %20s %s" % \
451 (loginbase, nodename, diag_record['info'][1:], diag_record['found_rt_ticket'])
453 diag_record['log'] = "DOWN: %20s : %-40s == %20s %s" % \
454 (loginbase, nodename, diag_record['info'][1:], diag_record['ticket_id'])
456 elif "OLDBOOTCD" in category:
457 # V2 boot cds as determined by findbad
458 s_daysdown = self.__getStrDaysDown(node_record, nodename)
459 s_cdversion = self.__getCDVersion(node_record, nodename)
461 diag_record.update(node_record)
462 #if "2.4" in diag_record['kernel'] or "v2" in diag_record['bootcd']:
463 diag_record['message'] = emailTxt.mailtxt.newbootcd
464 diag_record['args'] = {'nodename': nodename}
465 diag_record['info'] = (nodename, s_daysdown, s_cdversion)
466 if diag_record['ticket_id'] == "":
467 diag_record['log'] = "BTCD: %20s : %-40s == %20s %20s %s" % \
468 (loginbase, nodename, diag_record['kernel'],
469 diag_record['bootcd'], diag_record['found_rt_ticket'])
471 diag_record['log'] = "BTCD: %20s : %-40s == %20s %20s %s" % \
472 (loginbase, nodename, diag_record['kernel'],
473 diag_record['bootcd'], diag_record['ticket_id'])
475 elif "PROD" in category:
477 # Not sure what to do with these yet. Probably need to
479 print "DEBG: %20s : %-40s NOTHING DONE" % (loginbase, nodename)
481 elif "BOOT" in state:
483 # TODO: remove penalties, if any are applied.
485 last_contact = node_record['plcnode']['last_contact']
486 if last_contact == None:
489 time_diff = now - last_contact;
491 if 'improvement' in node_record['stage']:
492 # then we need to pass this on to 'action'
494 diag_record.update(node_record)
495 diag_record['message'] = emailTxt.mailtxt.newthankyou
496 diag_record['args'] = {'nodename': nodename}
497 diag_record['info'] = (nodename, node_record['prev_category'],
498 node_record['category'])
499 #if 'email_pcu' in diag_record:
500 # if diag_record['email_pcu']:
501 # # previously, the pcu failed to reboot, so send
502 # # email. Now, reset these values to try the reboot
504 # diag_record['email_pcu'] = False
505 # del diag_record['reboot_node_failed']
507 if diag_record['ticket_id'] == "":
508 diag_record['log'] = "IMPR: %20s : %-40s == %20s %20s %s %s" % \
509 (loginbase, nodename, diag_record['stage'],
510 state, category, diag_record['found_rt_ticket'])
512 diag_record['log'] = "IMPR: %20s : %-40s == %20s %20s %s %s" % \
513 (loginbase, nodename, diag_record['stage'],
514 state, category, diag_record['ticket_id'])
516 #elif time_diff >= 6*SPERHOUR:
517 # # heartbeat is older than 30 min.
519 # #print "Possible NM problem!! %s - %s = %s" % (now, last_contact, time_diff)
521 # diag_record.update(node_record)
522 # diag_record['message'] = emailTxt.mailtxt.NMReset
523 # diag_record['args'] = {'nodename': nodename}
524 # diag_record['stage'] = "nmreset"
525 # diag_record['info'] = (nodename,
526 # node_record['prev_category'],
527 # node_record['category'])
528 # if diag_record['ticket_id'] == "":
529 # diag_record['log'] = "NM : %20s : %-40s == %20s %20s %s %s" % \
530 # (loginbase, nodename, diag_record['stage'],
531 # state, category, diag_record['found_rt_ticket'])
533 # diag_record['log'] = "NM : %20s : %-40s == %20s" % \
534 # (loginbase, nodename, diag_record['stage'])
542 elif "ALPHA" in category:
544 elif "clock_drift" in category:
546 elif "dns" in category:
548 elif "filerw" in category:
551 print "Unknown category!!!! %s" % category
556 def __diagnoseNode(self, loginbase, node_record):
557 # TODO: change the format of the hostname in this
558 # record to something more natural.
559 nodename = node_record['nodename']
560 category = node_record['category']
561 prev_category = node_record['prev_category']
562 state = node_record['state']
563 #if 'prev_category' in node_record:
564 # prev_category = node_record['prev_category']
566 # prev_category = "ERROR"
567 if node_record['prev_category'] != "NORECORD":
569 val = cmpCategoryVal(category, prev_category)
570 print "%s went from %s -> %s" % (nodename, prev_category, category)
571 if prev_category == "UNKNOWN" and category == "PROD":
572 # sending too many thank you notes to people that don't
574 # TODO: not sure what effect this will have on the node
580 if node_record['ticket_id'] == "" or node_record['ticket_id'] == None:
581 print "closing record with no ticket: ", node_record['nodename']
582 node_record['action'] = ['close_rt']
583 node_record['message'] = None
584 node_record['stage'] = 'monitor-end-record'
587 node_record['stage'] = 'improvement'
589 #if 'monitor-end-record' in node_record['stage']:
590 # # just ignore it if it's already ended.
591 # # otherwise, the status should be worse, and we won't get
593 # print "monitor-end-record: ignoring ", node_record['nodename']
598 # current category is worse than previous, carry on
601 #values are equal, carry on.
602 #print "why are we here?"
605 if 'rt' in node_record and 'Status' in node_record['rt']:
606 if node_record['stage'] == 'ticket_waitforever':
607 if 'resolved' in node_record['rt']['Status']:
608 print "ending waitforever record for: ", node_record['nodename']
609 node_record['action'] = ['noop']
610 node_record['message'] = None
611 node_record['stage'] = 'monitor-end-record'
612 print "oldlog: %s" % node_record['log'],
613 print "%15s" % node_record['action']
615 if 'new' in node_record['rt']['Status'] and \
616 'Queue' in node_record['rt'] and \
617 'Monitor' in node_record['rt']['Queue']:
619 print "RESETTING stage to findbad"
620 node_record['stage'] = 'findbad'
622 #### COMPARE category and prev_category
624 # then assign a stage based on relative priorities
626 # then check category for stats.
627 diag_record = self.diagRecordByCategory(node_record)
628 if diag_record == None:
629 #print "diag_record == None"
633 # TODO: need to record time found, and maybe add a stage for acting on it...
634 # NOTE: after found, if the support ticket is resolved, the block is
635 # not removed. How to remove the block on this?
637 #if 'found_rt_ticket' in diag_record and \
638 # diag_record['found_rt_ticket'] is not None:
639 # if diag_record['stage'] is not 'improvement':
640 # diag_record['stage'] = 'ticket_waitforever'
642 current_time = time.time()
643 # take off four days, for the delay that database caused.
644 # TODO: generalize delays at PLC, and prevent enforcement when there
645 # have been no emails.
646 # NOTE: 7*SPERDAY exists to offset the 'bad week'
647 #delta = current_time - diag_record['time'] - 7*SPERDAY
648 delta = current_time - diag_record['time']
650 message = diag_record['message']
652 act_record.update(diag_record)
655 if 'findbad' in diag_record['stage']:
656 # The node is bad, and there's no previous record of it.
657 act_record['email'] = TECH
658 act_record['action'] = ['noop']
659 act_record['message'] = message[0]
660 act_record['stage'] = 'stage_actinoneweek'
662 elif 'nmreset' in diag_record['stage']:
663 act_record['email'] = ADMIN
664 act_record['action'] = ['reset_nodemanager']
665 act_record['message'] = message[0]
666 act_record['stage'] = 'nmreset'
669 elif 'reboot_node' in diag_record['stage']:
670 act_record['email'] = TECH
671 act_record['action'] = ['noop']
672 act_record['message'] = message[0]
673 act_record['stage'] = 'stage_actinoneweek'
675 elif 'improvement' in diag_record['stage']:
676 # - backoff previous squeeze actions (slice suspend, nocreate)
677 # TODO: add a backoff_squeeze section... Needs to runthrough
678 print "backing off of %s" % nodename
679 act_record['action'] = ['close_rt']
680 act_record['message'] = message[0]
681 act_record['stage'] = 'monitor-end-record'
683 elif 'actinoneweek' in diag_record['stage']:
684 if delta >= 7 * SPERDAY:
685 act_record['email'] = TECH | PI
686 act_record['stage'] = 'stage_actintwoweeks'
687 act_record['message'] = message[1]
688 act_record['action'] = ['nocreate' ]
689 act_record['time'] = current_time # reset clock for waitforever
690 elif delta >= 3* SPERDAY and not 'second-mail-at-oneweek' in act_record:
691 act_record['email'] = TECH
692 act_record['message'] = message[0]
693 act_record['action'] = ['sendmailagain-waitforoneweekaction' ]
694 act_record['second-mail-at-oneweek'] = True
696 act_record['message'] = None
697 act_record['action'] = ['waitforoneweekaction' ]
698 print "ignoring this record for: %s" % act_record['nodename']
699 return None # don't send if there's no action
701 elif 'actintwoweeks' in diag_record['stage']:
702 if delta >= 7 * SPERDAY:
703 act_record['email'] = TECH | PI | USER
704 act_record['stage'] = 'stage_waitforever'
705 act_record['message'] = message[2]
706 act_record['action'] = ['suspendslices']
707 act_record['time'] = current_time # reset clock for waitforever
708 elif delta >= 3* SPERDAY and not 'second-mail-at-twoweeks' in act_record:
709 act_record['email'] = TECH | PI
710 act_record['message'] = message[1]
711 act_record['action'] = ['sendmailagain-waitfortwoweeksaction' ]
712 act_record['second-mail-at-twoweeks'] = True
714 act_record['message'] = None
715 act_record['action'] = ['waitfortwoweeksaction']
716 return None # don't send if there's no action
718 elif 'ticket_waitforever' in diag_record['stage']:
719 act_record['email'] = TECH
720 if 'first-found' not in act_record:
721 act_record['first-found'] = True
722 act_record['log'] += " firstfound"
723 act_record['action'] = ['ticket_waitforever']
724 act_record['message'] = None
725 act_record['time'] = current_time
727 if delta >= 7*SPERDAY:
728 act_record['action'] = ['ticket_waitforever']
729 act_record['message'] = None
730 act_record['time'] = current_time # reset clock
732 act_record['action'] = ['ticket_waitforever']
733 act_record['message'] = None
736 elif 'waitforever' in diag_record['stage']:
737 # more than 3 days since last action
738 # TODO: send only on weekdays.
739 # NOTE: expects that 'time' has been reset before entering waitforever stage
740 if delta >= 3*SPERDAY:
741 act_record['action'] = ['email-againwaitforever']
742 act_record['message'] = message[2]
743 act_record['time'] = current_time # reset clock
745 act_record['action'] = ['waitforever']
746 act_record['message'] = None
747 return None # don't send if there's no action
750 # There is no action to be taken, possibly b/c the stage has
751 # already been performed, but diagnose picked it up again.
753 # 1. stage is unknown, or
754 # 2. delta is not big enough to bump it to the next stage.
755 # TODO: figure out which. for now assume 2.
756 print "UNKNOWN stage for %s; nothing done" % nodename
757 act_record['action'] = ['unknown']
758 act_record['message'] = message[0]
760 act_record['email'] = TECH
761 act_record['action'] = ['noop']
762 act_record['message'] = message[0]
763 act_record['stage'] = 'stage_actinoneweek'
764 act_record['time'] = current_time # reset clock
769 print "%s" % act_record['log'],
770 print "%15s" % act_record['action']
773 def getMaxSlices(self, loginbase):
774 # if sickdb has a loginbase, then it will have at least one node.
777 for nodename in self.diagnose_in[loginbase].keys():
778 if nodename in self.findbad['nodes']:
779 site_stats = self.findbad['nodes'][nodename]['values']['plcsite']
782 if site_stats == None:
783 raise Exception, "loginbase with no nodes in findbad"
785 return site_stats['max_slices']
787 def getNumNodes(self, loginbase):
788 # if sickdb has a loginbase, then it will have at least one node.
791 for nodename in self.diagnose_in[loginbase].keys():
792 if nodename in self.findbad['nodes']:
793 site_stats = self.findbad['nodes'][nodename]['values']['plcsite']
796 if site_stats == None:
797 raise Exception, "loginbase with no nodes in findbad"
799 return site_stats['num_nodes']
802 Returns number of up nodes as the total number *NOT* in act_all with a
803 stage other than 'steady-state' .
805 def getUpAtSite(self, loginbase, d_diag_site):
806 # TODO: THIS DOESN"T WORK!!! it misses all the 'debug' state nodes
807 # that aren't recorded yet.
809 numnodes = self.getNumNodes(loginbase)
810 # NOTE: assume nodes we have no record of are ok. (too conservative)
811 # TODO: make the 'up' value more representative
813 for nodename in d_diag_site[loginbase]['nodes'].keys():
815 rec = d_diag_site[loginbase]['nodes'][nodename]
816 if rec['stage'] != 'monitor-end-record':
819 pass # the node is assumed to be up.
822 # print "ERROR: %s total nodes up and down != %d" % (loginbase, numnodes)
828 def __init__(self, diagnose_out):
829 # the hostname to loginbase mapping
830 self.plcdb_hn2lb = database.dbLoad("plcdb_hn2lb")
833 self.diagnose_db = diagnose_out
835 self.act_all = database.if_cached_else(1, "act_all", lambda : {})
837 # A dict of actions to specific functions. PICKLE doesnt' like lambdas.
839 self.actions['suspendslices'] = lambda args: plc.suspendSlices(args['hostname'])
840 self.actions['nocreate'] = lambda args: plc.removeSliceCreation(args['hostname'])
841 self.actions['close_rt'] = lambda args: close_rt_backoff(args)
842 self.actions['rins'] = lambda args: plc.nodeBootState(args['hostname'], "rins")
843 self.actions['noop'] = lambda args: args
844 self.actions['reboot_node'] = lambda args: reboot_node(args)
845 self.actions['reset_nodemanager'] = lambda args: args # reset_nodemanager(args)
847 self.actions['ticket_waitforever'] = lambda args: args
848 self.actions['waitforever'] = lambda args: args
849 self.actions['unknown'] = lambda args: args
850 self.actions['waitforoneweekaction'] = lambda args: args
851 self.actions['waitfortwoweeksaction'] = lambda args: args
852 self.actions['sendmailagain-waitforoneweekaction'] = lambda args: args
853 self.actions['sendmailagain-waitfortwoweeksaction'] = lambda args: args
854 self.actions['email-againwaitforever'] = lambda args: args
855 self.actions['email-againticket_waitforever'] = lambda args: args
861 #logger.debug("Accumulated %d sick sites" % len(self.sickdb.keys()))
864 stats = self.analyseSites()
865 except Exception, err:
866 print "----------------"
868 print traceback.print_exc()
870 if config.policysavedb:
871 print "Saving Databases... act_all"
872 database.dbDump("act_all", self.act_all)
873 database.dbDump("diagnose_out", self.diagnose_db)
876 #print_stats("sites_observed", stats)
877 #print_stats("sites_diagnosed", stats)
878 #print_stats("nodes_diagnosed", stats)
879 print_stats("sites_emailed", stats)
880 #print_stats("nodes_actedon", stats)
881 print string.join(stats['allsites'], ",")
883 if config.policysavedb:
884 print "Saving Databases... act_all"
885 #database.dbDump("policy.eventlog", self.eventlog)
886 # TODO: remove 'diagnose_out',
887 # or at least the entries that were acted on.
888 database.dbDump("act_all", self.act_all)
889 database.dbDump("diagnose_out", self.diagnose_db)
891 def accumSites(self):
893 Take all nodes, from l_action, look them up in the diagnose_db database,
894 and insert them into sickdb[] as:
896 This way only the given l_action nodes will be acted on regardless
897 of how many from diagnose_db are available.
899 sickdb[loginbase][nodename] = diag_record
901 self.sickdb = self.diagnose_db
903 def __emailSite(self, loginbase, roles, message, args):
905 loginbase is the unique site abbreviation, prepended to slice names.
906 roles contains TECH, PI, USER roles, and derive email aliases.
907 record contains {'message': [<subj>,<body>], 'args': {...}}
910 args.update({'loginbase':loginbase})
912 if not config.mail and not config.debug and config.bcc:
914 if config.mail and config.debug:
920 contacts += [config.email]
922 contacts += [TECHEMAIL % loginbase]
924 contacts += [PIEMAIL % loginbase]
926 slices = plc.slices(loginbase)
929 contacts += [SLICEMAIL % slice]
930 print "SLIC: %20s : %d slices" % (loginbase, len(slices))
932 print "SLIC: %20s : 0 slices" % loginbase
935 subject = message[0] % args
936 body = message[1] % args
939 if 'ticket_id' in args:
940 subj = "Re: [PL #%s] %s" % (args['ticket_id'], subject)
942 subj = "Re: [PL noticket] %s" % subject
943 mailer.email(subj, body, contacts)
944 ticket_id = args['ticket_id']
946 ticket_id = mailer.emailViaRT(subject, body, contacts, args['ticket_id'])
947 except Exception, err:
948 print "exception on message:"
950 print traceback.print_exc()
956 def _format_diaginfo(self, diag_node):
957 info = diag_node['info']
958 if diag_node['stage'] == 'monitor-end-record':
959 hlist = " %s went from '%s' to '%s'\n" % (info[0], info[1], info[2])
961 hlist = " %s %s - %s\n" % (info[0], info[2], info[1]) #(node,ver,daysdn)
965 def get_email_args(self, act_recordlist, loginbase=None):
968 email_args['hostname_list'] = ""
969 email_args['url_list'] = ""
971 for act_record in act_recordlist:
972 email_args['hostname_list'] += act_record['msg_format']
973 email_args['hostname'] = act_record['nodename']
974 email_args['url_list'] += "\thttp://boot2.planet-lab.org/premade-bootcd-alpha/iso/%s.iso\n"
975 email_args['url_list'] += "\thttp://boot2.planet-lab.org/premade-bootcd-alpha/usb/%s.usb\n"
976 email_args['url_list'] += "\n"
977 if 'plcnode' in act_record and \
978 'pcu_ids' in act_record['plcnode'] and \
979 len(act_record['plcnode']['pcu_ids']) > 0:
980 print "setting 'pcu_id' for email_args %s"%email_args['hostname']
981 email_args['pcu_id'] = act_record['plcnode']['pcu_ids'][0]
983 email_args['pcu_id'] = "-1"
985 if 'ticket_id' in act_record:
986 if act_record['ticket_id'] == 0 or act_record['ticket_id'] == '0':
987 print "Enter the ticket_id for %s @ %s" % (loginbase, act_record['nodename'])
989 line = sys.stdin.readline()
991 ticket_id = int(line)
993 print "could not get ticket_id from stdin..."
996 ticket_id = act_record['ticket_id']
998 email_args['ticket_id'] = ticket_id
1002 def get_unique_issues(self, act_recordlist):
1003 # NOTE: only send one email per site, per problem...
1005 for act_record in act_recordlist:
1006 act_key = act_record['action'][0]
1007 if act_key not in unique_issues:
1008 unique_issues[act_key] = []
1010 unique_issues[act_key] += [act_record]
1012 return unique_issues
1015 def __actOnSite(self, loginbase, site_record):
1021 for nodename in site_record['nodes'].keys():
1022 diag_record = site_record['nodes'][nodename]
1023 act_record = self.__actOnNode(diag_record)
1024 #print "nodename: %s %s" % (nodename, act_record)
1025 if act_record is not None:
1026 act_recordlist += [act_record]
1028 unique_issues = self.get_unique_issues(act_recordlist)
1030 for issue in unique_issues.keys():
1031 print "\tworking on issue: %s" % issue
1032 issue_record_list = unique_issues[issue]
1033 email_args = self.get_email_args(issue_record_list, loginbase)
1036 #for act_record in issue_record_list:
1037 # # if there's a pcu record and email config is set
1038 # if 'email_pcu' in act_record:
1039 # if act_record['message'] != None and act_record['email_pcu'] and site_record['config']['email']:
1040 # # and 'reboot_node' in act_record['stage']:
1042 # email_args['hostname'] = act_record['nodename']
1043 # ticket_id = self.__emailSite(loginbase,
1044 # act_record['email'],
1045 # emailTxt.mailtxt.pcudown[0],
1047 # if ticket_id == 0:
1049 # print "got a ticket_id == 0!!!! %s" % act_record['nodename']
1052 # email_args['ticket_id'] = ticket_id
1055 act_record = issue_record_list[0]
1056 # send message before squeezing
1057 print "\t\tconfig.email: %s and %s" % (act_record['message'] != None,
1058 site_record['config']['email'])
1059 if act_record['message'] != None and site_record['config']['email']:
1060 ticket_id = self.__emailSite(loginbase, act_record['email'],
1061 act_record['message'], email_args)
1065 print "ticket_id == 0 for %s %s" % (loginbase, act_record['nodename'])
1070 # Add ticket_id to ALL nodenames
1071 for act_record in issue_record_list:
1072 nodename = act_record['nodename']
1073 # update node record with RT ticket_id
1074 if nodename in self.act_all:
1075 self.act_all[nodename][0]['ticket_id'] = "%s" % ticket_id
1076 # if the ticket was previously resolved, reset it to new.
1077 if 'rt' in act_record and \
1078 'Status' in act_record['rt'] and \
1079 act_record['rt']['Status'] == 'resolved':
1080 mailer.setTicketStatus(ticket_id, "new")
1081 status = mailer.getTicketStatus(ticket_id)
1082 self.act_all[nodename][0]['rt'] = status
1083 if config.mail: i_nodes_emailed += 1
1085 print "\t\tconfig.squeeze: %s and %s" % (config.squeeze,
1086 site_record['config']['squeeze'])
1087 if config.squeeze and site_record['config']['squeeze']:
1088 for act_key in act_record['action']:
1089 self.actions[act_key](email_args)
1090 i_nodes_actedon += 1
1092 if config.policysavedb:
1093 #print "Saving Databases... act_all, diagnose_out"
1094 #database.dbDump("act_all", self.act_all)
1095 # remove site record from diagnose_out, it's in act_all as done.
1096 del self.diagnose_db[loginbase]
1097 #database.dbDump("diagnose_out", self.diagnose_db)
1099 print "sleeping for 1 sec"
1101 #print "Hit enter to continue..."
1103 #line = sys.stdin.readline()
1105 return (i_nodes_actedon, i_nodes_emailed)
1107 def __actOnNode(self, diag_record):
1108 nodename = diag_record['nodename']
1109 message = diag_record['message']
1112 act_record.update(diag_record)
1113 act_record['nodename'] = nodename
1114 act_record['msg_format'] = self._format_diaginfo(diag_record)
1115 print "act_record['stage'] == %s " % act_record['stage']
1117 # avoid end records, and nmreset records
1118 # reboot_node_failed, is set below, so don't reboot repeatedly.
1120 #if 'monitor-end-record' not in act_record['stage'] and \
1121 # 'nmreset' not in act_record['stage'] and \
1122 # 'reboot_node_failed' not in act_record:
1124 # if "DOWN" in act_record['log'] and \
1125 # 'pcu_ids' in act_record['plcnode'] and \
1126 # len(act_record['plcnode']['pcu_ids']) > 0:
1128 # print "%s" % act_record['log'],
1129 # print "%15s" % (['reboot_node'],)
1130 # # Set node to re-install
1131 # plc.nodeBootState(act_record['nodename'], "rins")
1133 # ret = reboot_node({'hostname': act_record['nodename']})
1134 # except Exception, exc:
1135 # print "exception on reboot_node:"
1137 # print traceback.print_exc()
1140 # if ret: # and ( 'reboot_node_failed' not in act_record or act_record['reboot_node_failed'] == False):
1141 # # Reboot Succeeded
1142 # print "reboot succeeded for %s" % act_record['nodename']
1144 # act_record2.update(act_record)
1145 # act_record2['action'] = ['reboot_node']
1146 # act_record2['stage'] = "reboot_node"
1147 # act_record2['reboot_node_failed'] = False
1148 # act_record2['email_pcu'] = False
1150 # if nodename not in self.act_all:
1151 # self.act_all[nodename] = []
1152 # print "inserting 'reboot_node' record into act_all"
1153 # self.act_all[nodename].insert(0,act_record2)
1155 # # return None to avoid further action
1156 # print "Taking no further action"
1159 # print "reboot failed for %s" % act_record['nodename']
1160 # # set email_pcu to also send pcu notice for this record.
1161 # act_record['reboot_node_failed'] = True
1162 # act_record['email_pcu'] = True
1164 # print "%s" % act_record['log'],
1165 # print "%15s" % act_record['action']
1167 if act_record['stage'] is not 'monitor-end-record' and \
1168 act_record['stage'] is not 'nmreset':
1169 if nodename not in self.act_all:
1170 self.act_all[nodename] = []
1172 self.act_all[nodename].insert(0,act_record)
1174 print "Not recording %s in act_all" % nodename
1178 def analyseSites(self):
1179 i_sites_observed = 0
1180 i_sites_diagnosed = 0
1181 i_nodes_diagnosed = 0
1186 sorted_sites = self.sickdb.keys()
1188 for loginbase in sorted_sites:
1189 site_record = self.sickdb[loginbase]
1190 print "sites: %s" % loginbase
1192 i_nodes_diagnosed += len(site_record.keys())
1193 i_sites_diagnosed += 1
1195 (na,ne) = self.__actOnSite(loginbase, site_record)
1197 i_sites_observed += 1
1198 i_nodes_actedon += na
1199 i_sites_emailed += ne
1201 l_allsites += [loginbase]
1203 return {'sites_observed': i_sites_observed,
1204 'sites_diagnosed': i_sites_diagnosed,
1205 'nodes_diagnosed': i_nodes_diagnosed,
1206 'sites_emailed': i_sites_emailed,
1207 'nodes_actedon': i_nodes_actedon,
1208 'allsites':l_allsites}
1210 def print_stats(self, key, stats):
1211 print "%20s : %d" % (key, stats[key])