5 from www.printbadnodes import cmpCategoryVal
10 from policy import get_ticket_id, print_stats, close_rt_backoff, reboot_node
11 from rt import is_host_in_rt_tickets
14 # Time to enforce policy
17 # Where to email the summary
18 SUMTO = "soltesz@cs.princeton.edu"
19 TECHEMAIL="tech-%s@sites.planet-lab.org"
20 PIEMAIL="pi-%s@sites.planet-lab.org"
21 SLICEMAIL="%s@slices.planet-lab.org"
22 PLCEMAIL="support@planet-lab.org"
28 PITHRESH = 7 * SPERDAY
29 SLICETHRESH = 7 * SPERDAY
30 # Days before attempting rins again
31 RINSTHRESH = 5 * SPERDAY
33 # Days before calling the node dead.
34 DEADTHRESH = 30 * SPERDAY
35 # Minimum number of nodes up before squeezing
43 from unified_model import *
46 def __init__(self, l_merge):
47 self.merge_list = l_merge
49 # the hostname to loginbase mapping
50 self.plcdb_hn2lb = database.dbLoad("plcdb_hn2lb")
52 # Previous actions taken on nodes.
53 self.act_all = database.if_cached_else(1, "act_all", lambda : {})
54 self.findbad = database.if_cached_else(1, "findbad", lambda : {})
56 self.cache_all = database.if_cached_else(1, "act_all", lambda : {})
63 # read data from findbad and act_all
64 self.mergeActionsAndBadDB()
65 # pass node_records to RT
66 return self.getRecordList()
68 def accumSickSites(self):
70 Take all nodes, from l_diagnose, look them up in the act_all database,
71 and insert them into sickdb[] as:
73 sickdb[loginbase][nodename] = fb_record
75 # look at all problems reported by findbad
76 l_nodes = self.findbad['nodes'].keys()
78 for nodename in l_nodes:
79 if nodename not in self.merge_list:
80 continue # skip this node, since it's not wanted
83 loginbase = self.plcdb_hn2lb[nodename]
84 values = self.findbad['nodes'][nodename]['values']
87 fb_record['nodename'] = nodename
89 fb_record['category'] = values['category']
93 print self.findbad['nodes'][nodename]
96 fb_record['state'] = values['state']
97 fb_record['comonstats'] = values['comonstats']
98 fb_record['plcnode'] = values['plcnode']
99 fb_record['kernel'] = self.getKernel(values['kernel'])
100 fb_record['stage'] = "findbad"
101 fb_record['message'] = None
102 fb_record['bootcd'] = values['bootcd']
103 fb_record['args'] = None
104 fb_record['info'] = None
105 fb_record['time'] = time.time()
106 fb_record['date_created'] = time.time()
108 if loginbase not in self.sickdb:
109 self.sickdb[loginbase] = {}
111 self.sickdb[loginbase][nodename] = fb_record
113 print "Found %d nodes" % count
115 def getKernel(self, unamestr):
122 def mergeActionsAndBadDB(self):
124 - Look at the sick node_records as reported in findbad,
125 - Then look at the node_records in act_all.
127 There are four cases:
128 1) Problem in findbad, no problem in act_all
129 this ok, b/c it just means it's a new problem
130 2) Problem in findbad, problem in act_all
131 -Did the problem get better or worse?
132 -If Same, or Worse, then continue looking for open tickets.
133 -If Better, or No problem, then "back-off" penalties.
134 This judgement may need to wait until 'Diagnose()'
136 3) No problem in findbad, problem in act_all
137 The the node is operational again according to Findbad()
139 4) No problem in findbad, no problem in act_all
140 There won't be a record in either db, so there's no code.
143 sorted_sites = self.sickdb.keys()
145 # look at all problems reported by findbad
146 for loginbase in sorted_sites:
147 d_fb_nodes = self.sickdb[loginbase]
148 sorted_nodes = d_fb_nodes.keys()
150 for nodename in sorted_nodes:
151 fb_record = self.sickdb[loginbase][nodename]
153 if loginbase not in self.mergedb:
154 self.mergedb[loginbase] = {}
156 # take the info either from act_all or fb-record.
157 # if node not in act_all
158 # then take it from fbrecord, obviously.
159 # else node in act_all
160 # if act_all == 0 length (no previous records)
161 # then take it from fbrecord.
163 # take it from act_all.
166 # We must compare findbad state with act_all state
167 if nodename not in self.act_all:
168 # 1) ok, b/c it's a new problem. set ticket_id to null
169 self.mergedb[loginbase][nodename] = {}
170 self.mergedb[loginbase][nodename].update(x)
171 self.mergedb[loginbase][nodename]['ticket_id'] = ""
172 self.mergedb[loginbase][nodename]['prev_category'] = "NORECORD"
174 if len(self.act_all[nodename]) == 0:
175 self.mergedb[loginbase][nodename] = {}
176 self.mergedb[loginbase][nodename].update(x)
177 self.mergedb[loginbase][nodename]['ticket_id'] = ""
178 self.mergedb[loginbase][nodename]['prev_category'] = "NORECORD"
180 y = self.act_all[nodename][0]
181 y['prev_category'] = y['category']
183 self.mergedb[loginbase][nodename] = {}
184 self.mergedb[loginbase][nodename].update(y)
185 self.mergedb[loginbase][nodename]['comonstats'] = x['comonstats']
186 self.mergedb[loginbase][nodename]['category'] = x['category']
187 self.mergedb[loginbase][nodename]['state'] = x['state']
188 self.mergedb[loginbase][nodename]['kernel']=x['kernel']
189 self.mergedb[loginbase][nodename]['bootcd']=x['bootcd']
190 self.mergedb[loginbase][nodename]['plcnode']=x['plcnode']
191 ticket = get_ticket_id(self.mergedb[loginbase][nodename])
192 self.mergedb[loginbase][nodename]['rt'] = mailer.getTicketStatus(ticket)
194 # delete the entry from cache_all to keep it out of case 3)
195 del self.cache_all[nodename]
197 # 3) nodes that remin in cache_all were not identified by findbad.
198 # Do we keep them or not?
199 # NOTE: i think that since the categories are performed before this
200 # step now, and by a monitor-controlled agent.
204 def getRecordList(self):
205 sorted_sites = self.mergedb.keys()
209 # look at all problems reported by merge
210 for loginbase in sorted_sites:
211 d_merge_nodes = self.mergedb[loginbase]
212 for nodename in d_merge_nodes.keys():
213 record = self.mergedb[loginbase][nodename]
214 ret_list.append(record)
219 def __init__(self, record_list, dbTickets, l_ticket_blacklist, target = None):
220 # Time of last update of ticket DB
221 self.record_list = record_list
222 self.dbTickets = dbTickets
224 self.l_ticket_blacklist = l_ticket_blacklist
230 for diag_node in self.record_list:
231 if diag_node != None:
232 host = diag_node['nodename']
233 (b_host_inticket, r_ticket) = is_host_in_rt_tickets(host, \
234 self.l_ticket_blacklist, \
236 diag_node['found_rt_ticket'] = None
238 #logger.debug("RT: found tickets for %s" %host)
239 diag_node['found_rt_ticket'] = r_ticket['ticket_id']
242 if r_ticket is not None:
243 print "Ignoring ticket %s" % r_ticket['ticket_id']
244 # TODO: why do i return the ticket id for a
245 # blacklisted ticket id?
246 #diag_node['found_rt_ticket'] = r_ticket['ticket_id']
247 self.count = self.count + 1
249 ret_list.append(diag_node)
251 #print "RT processed %d nodes with noticket" % self.count
252 #logger.debug("RT filtered %d noticket nodes" % self.count)
256 def __init__(self, record_list):
257 self.record_list = record_list
258 self.plcdb_hn2lb = database.dbLoad("plcdb_hn2lb")
259 self.findbad = database.if_cached_else(1, "findbad", lambda : {})
261 self.diagnose_in = {}
262 self.diagnose_out = {}
265 self.accumSickSites()
267 #logger.debug("Accumulated %d sick sites" % len(self.diagnose_in.keys()))
270 stats = self.diagnoseAll()
271 except Exception, err:
272 print "----------------"
274 print traceback.print_exc()
276 #if config.policysavedb:
279 #print_stats("sites_observed", stats)
280 #print_stats("sites_diagnosed", stats)
281 #print_stats("nodes_diagnosed", stats)
283 return self.diagnose_out
285 def accumSickSites(self):
287 Take all nodes, from l_diagnose, look them up in the diagnose_out database,
288 and insert them into diagnose_in[] as:
290 diagnose_in[loginbase] = [diag_node1, diag_node2, ...]
292 for node_record in self.record_list:
294 nodename = node_record['nodename']
295 loginbase = self.plcdb_hn2lb[nodename]
297 if loginbase not in self.diagnose_in:
298 self.diagnose_in[loginbase] = {}
300 self.diagnose_in[loginbase][nodename] = node_record
304 def diagnoseAll(self):
306 i_sites_diagnosed = 0
307 i_nodes_diagnosed = 0
312 sorted_sites = self.diagnose_in.keys()
314 self.diagnose_out= {}
315 for loginbase in sorted_sites:
316 l_allsites += [loginbase]
318 d_diag_nodes = self.diagnose_in[loginbase]
319 d_act_records = self.__diagnoseSite(loginbase, d_diag_nodes)
320 # store records in diagnose_out, for saving later.
321 self.diagnose_out.update(d_act_records)
323 if len(d_act_records[loginbase]['nodes'].keys()) > 0:
324 i_nodes_diagnosed += (len(d_act_records[loginbase]['nodes'].keys()))
325 i_sites_diagnosed += 1
326 i_sites_observed += 1
328 return {'sites_observed': i_sites_observed,
329 'sites_diagnosed': i_sites_diagnosed,
330 'nodes_diagnosed': i_nodes_diagnosed,
331 'allsites':l_allsites}
335 def __getDaysDown(self, diag_record, nodename):
337 if diag_record['comonstats']['sshstatus'] != "null":
338 daysdown = int(diag_record['comonstats']['sshstatus']) // (60*60*24)
339 elif diag_record['comonstats']['lastcotop'] != "null":
340 daysdown = int(diag_record['comonstats']['lastcotop']) // (60*60*24)
343 last_contact = diag_record['plcnode']['last_contact']
344 if last_contact == None:
345 # the node has never been up, so give it a break
348 diff = now - last_contact
349 daysdown = diff // (60*60*24)
352 def __getStrDaysDown(self, diag_record, nodename):
353 daysdown = self.__getDaysDown(diag_record, nodename)
355 return "(%d days down)"%daysdown
357 return "Unknown number of days"
359 def __getCDVersion(self, diag_record, nodename):
361 #print "Getting kernel for: %s" % diag_record['nodename']
362 cdversion = diag_record['kernel']
365 def __diagnoseSite(self, loginbase, d_diag_nodes):
367 d_diag_nodes are diagnose_in entries.
369 d_diag_site = {loginbase : { 'config' :
376 sorted_nodes = d_diag_nodes.keys()
378 for nodename in sorted_nodes:
379 node_record = d_diag_nodes[nodename]
380 diag_record = self.__diagnoseNode(loginbase, node_record)
382 if diag_record != None:
383 d_diag_site[loginbase]['nodes'][nodename] = diag_record
385 # NOTE: improvement means, we need to act/squeeze and email.
386 #print "DIAG_RECORD", diag_record
387 if 'monitor-end-record' in diag_record['stage'] or \
388 'nmreset' in diag_record['stage']:
389 # print "resetting loginbase!"
390 d_diag_site[loginbase]['config']['squeeze'] = True
391 d_diag_site[loginbase]['config']['email'] = True
393 # print "NO IMPROVEMENT!!!!"
395 pass # there is nothing to do for this node.
397 # NOTE: these settings can be overridden by command line arguments,
398 # or the state of a record, i.e. if already in RT's Support Queue.
399 pf = PersistFlags(loginbase, 1, db='site_persistflags')
400 nodes_up = pf.nodes_up
402 d_diag_site[loginbase]['config']['squeeze'] = True
404 max_slices = self.getMaxSlices(loginbase)
405 num_nodes = pf.nodes_total #self.getNumNodes(loginbase)
406 # NOTE: when max_slices == 0, this is either a new site (the old way)
407 # or an old disabled site from previous monitor (before site['enabled'])
408 if nodes_up < num_nodes and max_slices != 0:
409 d_diag_site[loginbase]['config']['email'] = True
411 if len(d_diag_site[loginbase]['nodes'].keys()) > 0:
412 print "SITE: %20s : %d nodes up, at most" % (loginbase, nodes_up)
416 def diagRecordByCategory(self, node_record):
417 nodename = node_record['nodename']
418 category = node_record['category']
419 state = node_record['state']
420 loginbase = self.plcdb_hn2lb[nodename]
423 if "ERROR" in category: # i.e. "DOWN"
425 diag_record.update(node_record)
426 daysdown = self.__getDaysDown(diag_record, nodename)
428 # format = "DIAG: %20s : %-40s Down only %s days NOTHING DONE"
429 # print format % (loginbase, nodename, daysdown)
432 s_daysdown = self.__getStrDaysDown(diag_record, nodename)
433 diag_record['message'] = emailTxt.mailtxt.newdown
434 diag_record['args'] = {'nodename': nodename}
435 diag_record['info'] = (nodename, s_daysdown, "")
437 #if 'reboot_node_failed' in node_record:
438 # # there was a previous attempt to use the PCU.
439 # if node_record['reboot_node_failed'] == False:
440 # # then the last attempt apparently, succeeded.
441 # # But, the category is still 'ERROR'. Therefore, the
442 # # PCU-to-Node mapping is broken.
443 # #print "Setting message for ERROR node to PCU2NodeMapping: %s" % nodename
444 # diag_record['message'] = emailTxt.mailtxt.pcutonodemapping
445 # diag_record['email_pcu'] = True
447 if diag_record['ticket_id'] == "":
448 diag_record['log'] = "DOWN: %20s : %-40s == %20s %s" % \
449 (loginbase, nodename, diag_record['info'][1:], diag_record['found_rt_ticket'])
451 diag_record['log'] = "DOWN: %20s : %-40s == %20s %s" % \
452 (loginbase, nodename, diag_record['info'][1:], diag_record['ticket_id'])
454 elif "OLDBOOTCD" in category:
455 # V2 boot cds as determined by findbad
456 s_daysdown = self.__getStrDaysDown(node_record, nodename)
457 s_cdversion = self.__getCDVersion(node_record, nodename)
459 diag_record.update(node_record)
460 #if "2.4" in diag_record['kernel'] or "v2" in diag_record['bootcd']:
461 diag_record['message'] = emailTxt.mailtxt.newbootcd
462 diag_record['args'] = {'nodename': nodename}
463 diag_record['info'] = (nodename, s_daysdown, s_cdversion)
464 if diag_record['ticket_id'] == "":
465 diag_record['log'] = "BTCD: %20s : %-40s == %20s %20s %s" % \
466 (loginbase, nodename, diag_record['kernel'],
467 diag_record['bootcd'], diag_record['found_rt_ticket'])
469 diag_record['log'] = "BTCD: %20s : %-40s == %20s %20s %s" % \
470 (loginbase, nodename, diag_record['kernel'],
471 diag_record['bootcd'], diag_record['ticket_id'])
473 elif "PROD" in category:
475 # Not sure what to do with these yet. Probably need to
477 print "DEBG: %20s : %-40s NOTHING DONE" % (loginbase, nodename)
479 elif "BOOT" in state:
481 # TODO: remove penalties, if any are applied.
483 last_contact = node_record['plcnode']['last_contact']
484 if last_contact == None:
487 time_diff = now - last_contact;
489 if 'improvement' in node_record['stage']:
490 # then we need to pass this on to 'action'
492 diag_record.update(node_record)
493 diag_record['message'] = emailTxt.mailtxt.newthankyou
494 diag_record['args'] = {'nodename': nodename}
495 diag_record['info'] = (nodename, node_record['prev_category'],
496 node_record['category'])
497 #if 'email_pcu' in diag_record:
498 # if diag_record['email_pcu']:
499 # # previously, the pcu failed to reboot, so send
500 # # email. Now, reset these values to try the reboot
502 # diag_record['email_pcu'] = False
503 # del diag_record['reboot_node_failed']
505 if diag_record['ticket_id'] == "":
506 diag_record['log'] = "IMPR: %20s : %-40s == %20s %20s %s %s" % \
507 (loginbase, nodename, diag_record['stage'],
508 state, category, diag_record['found_rt_ticket'])
510 diag_record['log'] = "IMPR: %20s : %-40s == %20s %20s %s %s" % \
511 (loginbase, nodename, diag_record['stage'],
512 state, category, diag_record['ticket_id'])
514 #elif time_diff >= 6*SPERHOUR:
515 # # heartbeat is older than 30 min.
517 # #print "Possible NM problem!! %s - %s = %s" % (now, last_contact, time_diff)
519 # diag_record.update(node_record)
520 # diag_record['message'] = emailTxt.mailtxt.NMReset
521 # diag_record['args'] = {'nodename': nodename}
522 # diag_record['stage'] = "nmreset"
523 # diag_record['info'] = (nodename,
524 # node_record['prev_category'],
525 # node_record['category'])
526 # if diag_record['ticket_id'] == "":
527 # diag_record['log'] = "NM : %20s : %-40s == %20s %20s %s %s" % \
528 # (loginbase, nodename, diag_record['stage'],
529 # state, category, diag_record['found_rt_ticket'])
531 # diag_record['log'] = "NM : %20s : %-40s == %20s" % \
532 # (loginbase, nodename, diag_record['stage'])
540 elif "ALPHA" in category:
542 elif "clock_drift" in category:
544 elif "dns" in category:
546 elif "filerw" in category:
549 print "Unknown category!!!! %s" % category
554 def __diagnoseNode(self, loginbase, node_record):
555 # TODO: change the format of the hostname in this
556 # record to something more natural.
557 nodename = node_record['nodename']
558 category = node_record['category']
559 prev_category = node_record['prev_category']
560 state = node_record['state']
561 #if 'prev_category' in node_record:
562 # prev_category = node_record['prev_category']
564 # prev_category = "ERROR"
565 if node_record['prev_category'] != "NORECORD":
567 val = cmpCategoryVal(category, prev_category)
568 print "%s went from %s -> %s" % (nodename, prev_category, category)
569 if prev_category == "UNKNOWN" and category == "PROD":
570 # sending too many thank you notes to people that don't
572 # TODO: not sure what effect this will have on the node
578 if node_record['ticket_id'] == "" or node_record['ticket_id'] == None:
579 print "closing record with no ticket: ", node_record['nodename']
580 node_record['action'] = ['close_rt']
581 node_record['message'] = None
582 node_record['stage'] = 'monitor-end-record'
585 node_record['stage'] = 'improvement'
587 #if 'monitor-end-record' in node_record['stage']:
588 # # just ignore it if it's already ended.
589 # # otherwise, the status should be worse, and we won't get
591 # print "monitor-end-record: ignoring ", node_record['nodename']
596 # current category is worse than previous, carry on
599 #values are equal, carry on.
600 #print "why are we here?"
603 if 'rt' in node_record and 'Status' in node_record['rt']:
604 if node_record['stage'] == 'ticket_waitforever':
605 if 'resolved' in node_record['rt']['Status']:
606 print "ending waitforever record for: ", node_record['nodename']
607 node_record['action'] = ['noop']
608 node_record['message'] = None
609 node_record['stage'] = 'monitor-end-record'
610 print "oldlog: %s" % node_record['log'],
611 print "%15s" % node_record['action']
613 if 'new' in node_record['rt']['Status'] and \
614 'Queue' in node_record['rt'] and \
615 'Monitor' in node_record['rt']['Queue']:
617 print "RESETTING stage to findbad"
618 node_record['stage'] = 'findbad'
620 #### COMPARE category and prev_category
622 # then assign a stage based on relative priorities
624 # then check category for stats.
625 diag_record = self.diagRecordByCategory(node_record)
626 if diag_record == None:
627 #print "diag_record == None"
631 # TODO: need to record time found, and maybe add a stage for acting on it...
632 # NOTE: after found, if the support ticket is resolved, the block is
633 # not removed. How to remove the block on this?
635 #if 'found_rt_ticket' in diag_record and \
636 # diag_record['found_rt_ticket'] is not None:
637 # if diag_record['stage'] is not 'improvement':
638 # diag_record['stage'] = 'ticket_waitforever'
640 current_time = time.time()
641 # take off four days, for the delay that database caused.
642 # TODO: generalize delays at PLC, and prevent enforcement when there
643 # have been no emails.
644 # NOTE: 7*SPERDAY exists to offset the 'bad week'
645 #delta = current_time - diag_record['time'] - 7*SPERDAY
646 delta = current_time - diag_record['time']
648 message = diag_record['message']
650 act_record.update(diag_record)
653 if 'findbad' in diag_record['stage']:
654 # The node is bad, and there's no previous record of it.
655 act_record['email'] = TECH
656 act_record['action'] = ['noop']
657 act_record['message'] = message[0]
658 act_record['stage'] = 'stage_actinoneweek'
660 elif 'nmreset' in diag_record['stage']:
661 act_record['email'] = ADMIN
662 act_record['action'] = ['reset_nodemanager']
663 act_record['message'] = message[0]
664 act_record['stage'] = 'nmreset'
667 elif 'reboot_node' in diag_record['stage']:
668 act_record['email'] = TECH
669 act_record['action'] = ['noop']
670 act_record['message'] = message[0]
671 act_record['stage'] = 'stage_actinoneweek'
673 elif 'improvement' in diag_record['stage']:
674 # - backoff previous squeeze actions (slice suspend, nocreate)
675 # TODO: add a backoff_squeeze section... Needs to runthrough
676 print "backing off of %s" % nodename
677 act_record['action'] = ['close_rt']
678 act_record['message'] = message[0]
679 act_record['stage'] = 'monitor-end-record'
681 elif 'actinoneweek' in diag_record['stage']:
682 if delta >= 7 * SPERDAY:
683 act_record['email'] = TECH | PI
684 act_record['stage'] = 'stage_actintwoweeks'
685 act_record['message'] = message[1]
686 act_record['action'] = ['nocreate' ]
687 act_record['time'] = current_time # reset clock for waitforever
688 elif delta >= 3* SPERDAY and not 'second-mail-at-oneweek' in act_record:
689 act_record['email'] = TECH
690 act_record['message'] = message[0]
691 act_record['action'] = ['sendmailagain-waitforoneweekaction' ]
692 act_record['second-mail-at-oneweek'] = True
694 act_record['message'] = None
695 act_record['action'] = ['waitforoneweekaction' ]
696 print "ignoring this record for: %s" % act_record['nodename']
697 return None # don't send if there's no action
699 elif 'actintwoweeks' in diag_record['stage']:
700 if delta >= 7 * SPERDAY:
701 act_record['email'] = TECH | PI | USER
702 act_record['stage'] = 'stage_waitforever'
703 act_record['message'] = message[2]
704 act_record['action'] = ['suspendslices']
705 act_record['time'] = current_time # reset clock for waitforever
706 elif delta >= 3* SPERDAY and not 'second-mail-at-twoweeks' in act_record:
707 act_record['email'] = TECH | PI
708 act_record['message'] = message[1]
709 act_record['action'] = ['sendmailagain-waitfortwoweeksaction' ]
710 act_record['second-mail-at-twoweeks'] = True
712 act_record['message'] = None
713 act_record['action'] = ['waitfortwoweeksaction']
714 return None # don't send if there's no action
716 elif 'ticket_waitforever' in diag_record['stage']:
717 act_record['email'] = TECH
718 if 'first-found' not in act_record:
719 act_record['first-found'] = True
720 act_record['log'] += " firstfound"
721 act_record['action'] = ['ticket_waitforever']
722 act_record['message'] = None
723 act_record['time'] = current_time
725 if delta >= 7*SPERDAY:
726 act_record['action'] = ['ticket_waitforever']
727 act_record['message'] = None
728 act_record['time'] = current_time # reset clock
730 act_record['action'] = ['ticket_waitforever']
731 act_record['message'] = None
734 elif 'waitforever' in diag_record['stage']:
735 # more than 3 days since last action
736 # TODO: send only on weekdays.
737 # NOTE: expects that 'time' has been reset before entering waitforever stage
738 if delta >= 3*SPERDAY:
739 act_record['action'] = ['email-againwaitforever']
740 act_record['message'] = message[2]
741 act_record['time'] = current_time # reset clock
743 act_record['action'] = ['waitforever']
744 act_record['message'] = None
745 return None # don't send if there's no action
748 # There is no action to be taken, possibly b/c the stage has
749 # already been performed, but diagnose picked it up again.
751 # 1. stage is unknown, or
752 # 2. delta is not big enough to bump it to the next stage.
753 # TODO: figure out which. for now assume 2.
754 print "UNKNOWN stage for %s; nothing done" % nodename
755 act_record['action'] = ['unknown']
756 act_record['message'] = message[0]
758 act_record['email'] = TECH
759 act_record['action'] = ['noop']
760 act_record['message'] = message[0]
761 act_record['stage'] = 'stage_actinoneweek'
762 act_record['time'] = current_time # reset clock
767 print "%s" % act_record['log'],
768 print "%15s" % act_record['action']
771 def getMaxSlices(self, loginbase):
772 # if sickdb has a loginbase, then it will have at least one node.
775 for nodename in self.diagnose_in[loginbase].keys():
776 if nodename in self.findbad['nodes']:
777 site_stats = self.findbad['nodes'][nodename]['values']['plcsite']
780 if site_stats == None:
781 raise Exception, "loginbase with no nodes in findbad"
783 return site_stats['max_slices']
785 def getNumNodes(self, loginbase):
786 # if sickdb has a loginbase, then it will have at least one node.
789 for nodename in self.diagnose_in[loginbase].keys():
790 if nodename in self.findbad['nodes']:
791 site_stats = self.findbad['nodes'][nodename]['values']['plcsite']
794 if site_stats == None:
795 raise Exception, "loginbase with no nodes in findbad"
797 return site_stats['num_nodes']
800 Returns number of up nodes as the total number *NOT* in act_all with a
801 stage other than 'steady-state' .
803 def getUpAtSite(self, loginbase, d_diag_site):
804 # TODO: THIS DOESN"T WORK!!! it misses all the 'debug' state nodes
805 # that aren't recorded yet.
807 numnodes = self.getNumNodes(loginbase)
808 # NOTE: assume nodes we have no record of are ok. (too conservative)
809 # TODO: make the 'up' value more representative
811 for nodename in d_diag_site[loginbase]['nodes'].keys():
813 rec = d_diag_site[loginbase]['nodes'][nodename]
814 if rec['stage'] != 'monitor-end-record':
817 pass # the node is assumed to be up.
820 # print "ERROR: %s total nodes up and down != %d" % (loginbase, numnodes)
826 def __init__(self, diagnose_out):
827 # the hostname to loginbase mapping
828 self.plcdb_hn2lb = database.dbLoad("plcdb_hn2lb")
831 self.diagnose_db = diagnose_out
833 self.act_all = database.if_cached_else(1, "act_all", lambda : {})
835 # A dict of actions to specific functions. PICKLE doesnt' like lambdas.
837 self.actions['suspendslices'] = lambda args: plc.suspendSlices(args['hostname'])
838 self.actions['nocreate'] = lambda args: plc.removeSliceCreation(args['hostname'])
839 self.actions['close_rt'] = lambda args: close_rt_backoff(args)
840 self.actions['rins'] = lambda args: plc.nodeBootState(args['hostname'], "rins")
841 self.actions['noop'] = lambda args: args
842 self.actions['reboot_node'] = lambda args: reboot_node(args)
843 self.actions['reset_nodemanager'] = lambda args: args # reset_nodemanager(args)
845 self.actions['ticket_waitforever'] = lambda args: args
846 self.actions['waitforever'] = lambda args: args
847 self.actions['unknown'] = lambda args: args
848 self.actions['waitforoneweekaction'] = lambda args: args
849 self.actions['waitfortwoweeksaction'] = lambda args: args
850 self.actions['sendmailagain-waitforoneweekaction'] = lambda args: args
851 self.actions['sendmailagain-waitfortwoweeksaction'] = lambda args: args
852 self.actions['email-againwaitforever'] = lambda args: args
853 self.actions['email-againticket_waitforever'] = lambda args: args
859 #logger.debug("Accumulated %d sick sites" % len(self.sickdb.keys()))
862 stats = self.analyseSites()
863 except Exception, err:
864 print "----------------"
866 print traceback.print_exc()
868 if config.policysavedb:
869 print "Saving Databases... act_all"
870 database.dbDump("act_all", self.act_all)
871 database.dbDump("diagnose_out", self.diagnose_db)
874 #print_stats("sites_observed", stats)
875 #print_stats("sites_diagnosed", stats)
876 #print_stats("nodes_diagnosed", stats)
877 print_stats("sites_emailed", stats)
878 #print_stats("nodes_actedon", stats)
879 print string.join(stats['allsites'], ",")
881 if config.policysavedb:
882 print "Saving Databases... act_all"
883 #database.dbDump("policy.eventlog", self.eventlog)
884 # TODO: remove 'diagnose_out',
885 # or at least the entries that were acted on.
886 database.dbDump("act_all", self.act_all)
887 database.dbDump("diagnose_out", self.diagnose_db)
889 def accumSites(self):
891 Take all nodes, from l_action, look them up in the diagnose_db database,
892 and insert them into sickdb[] as:
894 This way only the given l_action nodes will be acted on regardless
895 of how many from diagnose_db are available.
897 sickdb[loginbase][nodename] = diag_record
899 self.sickdb = self.diagnose_db
901 def __emailSite(self, loginbase, roles, message, args):
903 loginbase is the unique site abbreviation, prepended to slice names.
904 roles contains TECH, PI, USER roles, and derive email aliases.
905 record contains {'message': [<subj>,<body>], 'args': {...}}
908 args.update({'loginbase':loginbase})
910 if not config.mail and not config.debug and config.bcc:
912 if config.mail and config.debug:
918 contacts += [config.email]
920 contacts += [TECHEMAIL % loginbase]
922 contacts += [PIEMAIL % loginbase]
924 slices = plc.slices(loginbase)
927 contacts += [SLICEMAIL % slice]
928 print "SLIC: %20s : %d slices" % (loginbase, len(slices))
930 print "SLIC: %20s : 0 slices" % loginbase
933 subject = message[0] % args
934 body = message[1] % args
937 if 'ticket_id' in args:
938 subj = "Re: [PL #%s] %s" % (args['ticket_id'], subject)
940 subj = "Re: [PL noticket] %s" % subject
941 mailer.email(subj, body, contacts)
942 ticket_id = args['ticket_id']
944 ticket_id = mailer.emailViaRT(subject, body, contacts, args['ticket_id'])
945 except Exception, err:
946 print "exception on message:"
948 print traceback.print_exc()
954 def _format_diaginfo(self, diag_node):
955 info = diag_node['info']
956 if diag_node['stage'] == 'monitor-end-record':
957 hlist = " %s went from '%s' to '%s'\n" % (info[0], info[1], info[2])
959 hlist = " %s %s - %s\n" % (info[0], info[2], info[1]) #(node,ver,daysdn)
963 def get_email_args(self, act_recordlist, loginbase=None):
966 email_args['hostname_list'] = ""
967 email_args['url_list'] = ""
969 for act_record in act_recordlist:
970 email_args['hostname_list'] += act_record['msg_format']
971 email_args['hostname'] = act_record['nodename']
972 email_args['url_list'] += "\thttp://boot2.planet-lab.org/premade-bootcd-alpha/iso/%s.iso\n"
973 email_args['url_list'] += "\thttp://boot2.planet-lab.org/premade-bootcd-alpha/usb/%s.usb\n"
974 email_args['url_list'] += "\n"
975 if 'plcnode' in act_record and \
976 'pcu_ids' in act_record['plcnode'] and \
977 len(act_record['plcnode']['pcu_ids']) > 0:
978 print "setting 'pcu_id' for email_args %s"%email_args['hostname']
979 email_args['pcu_id'] = act_record['plcnode']['pcu_ids'][0]
981 email_args['pcu_id'] = "-1"
983 if 'ticket_id' in act_record:
984 if act_record['ticket_id'] == 0 or act_record['ticket_id'] == '0':
985 print "Enter the ticket_id for %s @ %s" % (loginbase, act_record['nodename'])
987 line = sys.stdin.readline()
989 ticket_id = int(line)
991 print "could not get ticket_id from stdin..."
994 ticket_id = act_record['ticket_id']
996 email_args['ticket_id'] = ticket_id
1000 def get_unique_issues(self, act_recordlist):
1001 # NOTE: only send one email per site, per problem...
1003 for act_record in act_recordlist:
1004 act_key = act_record['action'][0]
1005 if act_key not in unique_issues:
1006 unique_issues[act_key] = []
1008 unique_issues[act_key] += [act_record]
1010 return unique_issues
1013 def __actOnSite(self, loginbase, site_record):
1019 for nodename in site_record['nodes'].keys():
1020 diag_record = site_record['nodes'][nodename]
1021 act_record = self.__actOnNode(diag_record)
1022 #print "nodename: %s %s" % (nodename, act_record)
1023 if act_record is not None:
1024 act_recordlist += [act_record]
1026 unique_issues = self.get_unique_issues(act_recordlist)
1028 for issue in unique_issues.keys():
1029 print "\tworking on issue: %s" % issue
1030 issue_record_list = unique_issues[issue]
1031 email_args = self.get_email_args(issue_record_list, loginbase)
1034 #for act_record in issue_record_list:
1035 # # if there's a pcu record and email config is set
1036 # if 'email_pcu' in act_record:
1037 # if act_record['message'] != None and act_record['email_pcu'] and site_record['config']['email']:
1038 # # and 'reboot_node' in act_record['stage']:
1040 # email_args['hostname'] = act_record['nodename']
1041 # ticket_id = self.__emailSite(loginbase,
1042 # act_record['email'],
1043 # emailTxt.mailtxt.pcudown[0],
1045 # if ticket_id == 0:
1047 # print "got a ticket_id == 0!!!! %s" % act_record['nodename']
1050 # email_args['ticket_id'] = ticket_id
1053 act_record = issue_record_list[0]
1054 # send message before squeezing
1055 print "\t\tconfig.email: %s and %s" % (act_record['message'] != None,
1056 site_record['config']['email'])
1057 if act_record['message'] != None and site_record['config']['email']:
1058 ticket_id = self.__emailSite(loginbase, act_record['email'],
1059 act_record['message'], email_args)
1063 print "ticket_id == 0 for %s %s" % (loginbase, act_record['nodename'])
1068 # Add ticket_id to ALL nodenames
1069 for act_record in issue_record_list:
1070 nodename = act_record['nodename']
1071 # update node record with RT ticket_id
1072 if nodename in self.act_all:
1073 self.act_all[nodename][0]['ticket_id'] = "%s" % ticket_id
1074 # if the ticket was previously resolved, reset it to new.
1075 if 'rt' in act_record and \
1076 'Status' in act_record['rt'] and \
1077 act_record['rt']['Status'] == 'resolved':
1078 mailer.setTicketStatus(ticket_id, "new")
1079 status = mailer.getTicketStatus(ticket_id)
1080 self.act_all[nodename][0]['rt'] = status
1081 if config.mail: i_nodes_emailed += 1
1083 print "\t\tconfig.squeeze: %s and %s" % (config.squeeze,
1084 site_record['config']['squeeze'])
1085 if config.squeeze and site_record['config']['squeeze']:
1086 for act_key in act_record['action']:
1087 self.actions[act_key](email_args)
1088 i_nodes_actedon += 1
1090 if config.policysavedb:
1091 #print "Saving Databases... act_all, diagnose_out"
1092 #database.dbDump("act_all", self.act_all)
1093 # remove site record from diagnose_out, it's in act_all as done.
1094 del self.diagnose_db[loginbase]
1095 #database.dbDump("diagnose_out", self.diagnose_db)
1097 print "sleeping for 1 sec"
1099 #print "Hit enter to continue..."
1101 #line = sys.stdin.readline()
1103 return (i_nodes_actedon, i_nodes_emailed)
1105 def __actOnNode(self, diag_record):
1106 nodename = diag_record['nodename']
1107 message = diag_record['message']
1110 act_record.update(diag_record)
1111 act_record['nodename'] = nodename
1112 act_record['msg_format'] = self._format_diaginfo(diag_record)
1113 print "act_record['stage'] == %s " % act_record['stage']
1115 # avoid end records, and nmreset records
1116 # reboot_node_failed, is set below, so don't reboot repeatedly.
1118 #if 'monitor-end-record' not in act_record['stage'] and \
1119 # 'nmreset' not in act_record['stage'] and \
1120 # 'reboot_node_failed' not in act_record:
1122 # if "DOWN" in act_record['log'] and \
1123 # 'pcu_ids' in act_record['plcnode'] and \
1124 # len(act_record['plcnode']['pcu_ids']) > 0:
1126 # print "%s" % act_record['log'],
1127 # print "%15s" % (['reboot_node'],)
1128 # # Set node to re-install
1129 # plc.nodeBootState(act_record['nodename'], "rins")
1131 # ret = reboot_node({'hostname': act_record['nodename']})
1132 # except Exception, exc:
1133 # print "exception on reboot_node:"
1135 # print traceback.print_exc()
1138 # if ret: # and ( 'reboot_node_failed' not in act_record or act_record['reboot_node_failed'] == False):
1139 # # Reboot Succeeded
1140 # print "reboot succeeded for %s" % act_record['nodename']
1142 # act_record2.update(act_record)
1143 # act_record2['action'] = ['reboot_node']
1144 # act_record2['stage'] = "reboot_node"
1145 # act_record2['reboot_node_failed'] = False
1146 # act_record2['email_pcu'] = False
1148 # if nodename not in self.act_all:
1149 # self.act_all[nodename] = []
1150 # print "inserting 'reboot_node' record into act_all"
1151 # self.act_all[nodename].insert(0,act_record2)
1153 # # return None to avoid further action
1154 # print "Taking no further action"
1157 # print "reboot failed for %s" % act_record['nodename']
1158 # # set email_pcu to also send pcu notice for this record.
1159 # act_record['reboot_node_failed'] = True
1160 # act_record['email_pcu'] = True
1162 # print "%s" % act_record['log'],
1163 # print "%15s" % act_record['action']
1165 if act_record['stage'] is not 'monitor-end-record' and \
1166 act_record['stage'] is not 'nmreset':
1167 if nodename not in self.act_all:
1168 self.act_all[nodename] = []
1170 self.act_all[nodename].insert(0,act_record)
1172 print "Not recording %s in act_all" % nodename
1176 def analyseSites(self):
1177 i_sites_observed = 0
1178 i_sites_diagnosed = 0
1179 i_nodes_diagnosed = 0
1184 sorted_sites = self.sickdb.keys()
1186 for loginbase in sorted_sites:
1187 site_record = self.sickdb[loginbase]
1188 print "sites: %s" % loginbase
1190 i_nodes_diagnosed += len(site_record.keys())
1191 i_sites_diagnosed += 1
1193 (na,ne) = self.__actOnSite(loginbase, site_record)
1195 i_sites_observed += 1
1196 i_nodes_actedon += na
1197 i_sites_emailed += ne
1199 l_allsites += [loginbase]
1201 return {'sites_observed': i_sites_observed,
1202 'sites_diagnosed': i_sites_diagnosed,
1203 'nodes_diagnosed': i_nodes_diagnosed,
1204 'sites_emailed': i_sites_emailed,
1205 'nodes_actedon': i_nodes_actedon,
1206 'allsites':l_allsites}
1208 def print_stats(self, key, stats):
1209 print "%20s : %d" % (key, stats[key])