1 from config import config
7 from www.printbadnodes import cmpCategoryVal
12 from policy import get_ticket_id, print_stats, close_rt_backoff, reboot_node
13 from rt import is_host_in_rt_tickets
16 # Time to enforce policy
19 # Where to email the summary
20 SUMTO = "soltesz@cs.princeton.edu"
21 TECHEMAIL="tech-%s@sites.planet-lab.org"
22 PIEMAIL="pi-%s@sites.planet-lab.org"
23 SLICEMAIL="%s@slices.planet-lab.org"
24 PLCEMAIL="support@planet-lab.org"
30 PITHRESH = 7 * SPERDAY
31 SLICETHRESH = 7 * SPERDAY
32 # Days before attempting rins again
33 RINSTHRESH = 5 * SPERDAY
35 # Days before calling the node dead.
36 DEADTHRESH = 30 * SPERDAY
37 # Minimum number of nodes up before squeezing
46 def __init__(self, l_merge):
47 self.merge_list = l_merge
49 # the hostname to loginbase mapping
50 self.plcdb_hn2lb = soltesz.dbLoad("plcdb_hn2lb")
52 # Previous actions taken on nodes.
53 self.act_all = soltesz.if_cached_else(1, "act_all", lambda : {})
54 self.findbad = soltesz.if_cached_else(1, "findbad", lambda : {})
56 self.cache_all = soltesz.if_cached_else(1, "act_all", lambda : {})
63 # read data from findbad and act_all
64 self.mergeActionsAndBadDB()
65 # pass node_records to RT
66 return self.getRecordList()
68 def accumSickSites(self):
70 Take all nodes, from l_diagnose, look them up in the act_all database,
71 and insert them into sickdb[] as:
73 sickdb[loginbase][nodename] = fb_record
75 # look at all problems reported by findbad
76 l_nodes = self.findbad['nodes'].keys()
78 for nodename in l_nodes:
79 if nodename not in self.merge_list:
80 continue # skip this node, since it's not wanted
83 loginbase = self.plcdb_hn2lb[nodename]
84 values = self.findbad['nodes'][nodename]['values']
87 fb_record['nodename'] = nodename
89 fb_record['category'] = values['category']
93 print self.findbad['nodes'][nodename]
96 fb_record['state'] = values['state']
97 fb_record['comonstats'] = values['comonstats']
98 fb_record['plcnode'] = values['plcnode']
99 fb_record['kernel'] = self.getKernel(values['kernel'])
100 fb_record['stage'] = "findbad"
101 fb_record['message'] = None
102 fb_record['bootcd'] = values['bootcd']
103 fb_record['args'] = None
104 fb_record['info'] = None
105 fb_record['time'] = time.time()
106 fb_record['date_created'] = time.time()
108 if loginbase not in self.sickdb:
109 self.sickdb[loginbase] = {}
111 self.sickdb[loginbase][nodename] = fb_record
113 print "Found %d nodes" % count
115 def getKernel(self, unamestr):
122 def mergeActionsAndBadDB(self):
124 - Look at the sick node_records as reported in findbad,
125 - Then look at the node_records in act_all.
127 There are four cases:
128 1) Problem in findbad, no problem in act_all
129 this ok, b/c it just means it's a new problem
130 2) Problem in findbad, problem in act_all
131 -Did the problem get better or worse?
132 -If Same, or Worse, then continue looking for open tickets.
133 -If Better, or No problem, then "back-off" penalties.
134 This judgement may need to wait until 'Diagnose()'
136 3) No problem in findbad, problem in act_all
137 The the node is operational again according to Findbad()
139 4) No problem in findbad, no problem in act_all
140 There won't be a record in either db, so there's no code.
143 sorted_sites = self.sickdb.keys()
145 # look at all problems reported by findbad
146 for loginbase in sorted_sites:
147 d_fb_nodes = self.sickdb[loginbase]
148 sorted_nodes = d_fb_nodes.keys()
150 for nodename in sorted_nodes:
151 fb_record = self.sickdb[loginbase][nodename]
153 if loginbase not in self.mergedb:
154 self.mergedb[loginbase] = {}
156 # take the info either from act_all or fb-record.
157 # if node not in act_all
158 # then take it from fbrecord, obviously.
159 # else node in act_all
160 # if act_all == 0 length (no previous records)
161 # then take it from fbrecord.
163 # take it from act_all.
166 # We must compare findbad state with act_all state
167 if nodename not in self.act_all:
168 # 1) ok, b/c it's a new problem. set ticket_id to null
169 self.mergedb[loginbase][nodename] = {}
170 self.mergedb[loginbase][nodename].update(x)
171 self.mergedb[loginbase][nodename]['ticket_id'] = ""
172 self.mergedb[loginbase][nodename]['prev_category'] = "NORECORD"
174 if len(self.act_all[nodename]) == 0:
175 self.mergedb[loginbase][nodename] = {}
176 self.mergedb[loginbase][nodename].update(x)
177 self.mergedb[loginbase][nodename]['ticket_id'] = ""
178 self.mergedb[loginbase][nodename]['prev_category'] = "NORECORD"
180 y = self.act_all[nodename][0]
181 y['prev_category'] = y['category']
183 self.mergedb[loginbase][nodename] = {}
184 self.mergedb[loginbase][nodename].update(y)
185 self.mergedb[loginbase][nodename]['comonstats'] = x['comonstats']
186 self.mergedb[loginbase][nodename]['category'] = x['category']
187 self.mergedb[loginbase][nodename]['state'] = x['state']
188 self.mergedb[loginbase][nodename]['kernel']=x['kernel']
189 self.mergedb[loginbase][nodename]['bootcd']=x['bootcd']
190 self.mergedb[loginbase][nodename]['plcnode']=x['plcnode']
191 ticket = get_ticket_id(self.mergedb[loginbase][nodename])
192 self.mergedb[loginbase][nodename]['rt'] = mailer.getTicketStatus(ticket)
194 # delete the entry from cache_all to keep it out of case 3)
195 del self.cache_all[nodename]
197 # 3) nodes that remin in cache_all were not identified by findbad.
198 # Do we keep them or not?
199 # NOTE: i think that since the categories are performed before this
200 # step now, and by a monitor-controlled agent.
204 def getRecordList(self):
205 sorted_sites = self.mergedb.keys()
209 # look at all problems reported by merge
210 for loginbase in sorted_sites:
211 d_merge_nodes = self.mergedb[loginbase]
212 for nodename in d_merge_nodes.keys():
213 record = self.mergedb[loginbase][nodename]
214 ret_list.append(record)
219 def __init__(self, record_list, dbTickets, l_ticket_blacklist, target = None):
220 # Time of last update of ticket DB
221 self.record_list = record_list
222 self.dbTickets = dbTickets
224 self.l_ticket_blacklist = l_ticket_blacklist
230 for diag_node in self.record_list:
231 if diag_node != None:
232 host = diag_node['nodename']
233 (b_host_inticket, r_ticket) = is_host_in_rt_tickets(host, \
234 self.l_ticket_blacklist, \
236 diag_node['found_rt_ticket'] = None
238 #logger.debug("RT: found tickets for %s" %host)
239 diag_node['found_rt_ticket'] = r_ticket['ticket_id']
242 if r_ticket is not None:
243 print "Ignoring ticket %s" % r_ticket['ticket_id']
244 # TODO: why do i return the ticket id for a
245 # blacklisted ticket id?
246 #diag_node['found_rt_ticket'] = r_ticket['ticket_id']
247 self.count = self.count + 1
249 ret_list.append(diag_node)
251 #print "RT processed %d nodes with noticket" % self.count
252 #logger.debug("RT filtered %d noticket nodes" % self.count)
256 def __init__(self, record_list):
257 self.record_list = record_list
258 self.plcdb_hn2lb = soltesz.dbLoad("plcdb_hn2lb")
259 self.findbad = soltesz.if_cached_else(1, "findbad", lambda : {})
261 self.diagnose_in = {}
262 self.diagnose_out = {}
265 self.accumSickSites()
267 #logger.debug("Accumulated %d sick sites" % len(self.diagnose_in.keys()))
270 stats = self.diagnoseAll()
271 except Exception, err:
272 print "----------------"
274 print traceback.print_exc()
276 #if config.policysavedb:
279 #print_stats("sites_observed", stats)
280 #print_stats("sites_diagnosed", stats)
281 #print_stats("nodes_diagnosed", stats)
283 return self.diagnose_out
285 def accumSickSites(self):
287 Take all nodes, from l_diagnose, look them up in the diagnose_out database,
288 and insert them into diagnose_in[] as:
290 diagnose_in[loginbase] = [diag_node1, diag_node2, ...]
292 for node_record in self.record_list:
294 nodename = node_record['nodename']
295 loginbase = self.plcdb_hn2lb[nodename]
297 if loginbase not in self.diagnose_in:
298 self.diagnose_in[loginbase] = {}
300 self.diagnose_in[loginbase][nodename] = node_record
304 def diagnoseAll(self):
306 i_sites_diagnosed = 0
307 i_nodes_diagnosed = 0
312 sorted_sites = self.diagnose_in.keys()
314 self.diagnose_out= {}
315 for loginbase in sorted_sites:
316 l_allsites += [loginbase]
318 d_diag_nodes = self.diagnose_in[loginbase]
319 d_act_records = self.__diagnoseSite(loginbase, d_diag_nodes)
320 # store records in diagnose_out, for saving later.
321 self.diagnose_out.update(d_act_records)
323 if len(d_act_records[loginbase]['nodes'].keys()) > 0:
324 i_nodes_diagnosed += (len(d_act_records[loginbase]['nodes'].keys()))
325 i_sites_diagnosed += 1
326 i_sites_observed += 1
328 return {'sites_observed': i_sites_observed,
329 'sites_diagnosed': i_sites_diagnosed,
330 'nodes_diagnosed': i_nodes_diagnosed,
331 'allsites':l_allsites}
335 def __getDaysDown(self, diag_record, nodename):
337 if diag_record['comonstats']['sshstatus'] != "null":
338 daysdown = int(diag_record['comonstats']['sshstatus']) // (60*60*24)
339 elif diag_record['comonstats']['lastcotop'] != "null":
340 daysdown = int(diag_record['comonstats']['lastcotop']) // (60*60*24)
343 last_contact = diag_record['plcnode']['last_contact']
344 if last_contact == None:
345 # the node has never been up, so give it a break
348 diff = now - last_contact
349 daysdown = diff // (60*60*24)
352 def __getStrDaysDown(self, diag_record, nodename):
353 daysdown = self.__getDaysDown(diag_record, nodename)
355 return "(%d days down)"%daysdown
357 return "Unknown number of days"
359 def __getCDVersion(self, diag_record, nodename):
361 #print "Getting kernel for: %s" % diag_record['nodename']
362 cdversion = diag_record['kernel']
365 def __diagnoseSite(self, loginbase, d_diag_nodes):
367 d_diag_nodes are diagnose_in entries.
369 d_diag_site = {loginbase : { 'config' :
376 sorted_nodes = d_diag_nodes.keys()
378 for nodename in sorted_nodes:
379 node_record = d_diag_nodes[nodename]
380 diag_record = self.__diagnoseNode(loginbase, node_record)
382 if diag_record != None:
383 d_diag_site[loginbase]['nodes'][nodename] = diag_record
385 # NOTE: improvement means, we need to act/squeeze and email.
386 #print "DIAG_RECORD", diag_record
387 if 'monitor-end-record' in diag_record['stage'] or \
388 'nmreset' in diag_record['stage']:
389 # print "resetting loginbase!"
390 d_diag_site[loginbase]['config']['squeeze'] = True
391 d_diag_site[loginbase]['config']['email'] = True
393 # print "NO IMPROVEMENT!!!!"
395 pass # there is nothing to do for this node.
397 # NOTE: these settings can be overridden by command line arguments,
398 # or the state of a record, i.e. if already in RT's Support Queue.
399 nodes_up = self.getUpAtSite(loginbase, d_diag_site)
401 d_diag_site[loginbase]['config']['squeeze'] = True
403 max_slices = self.getMaxSlices(loginbase)
404 num_nodes = self.getNumNodes(loginbase)
405 # NOTE: when max_slices == 0, this is either a new site (the old way)
406 # or an old disabled site from previous monitor (before site['enabled'])
407 if nodes_up < num_nodes and max_slices != 0:
408 d_diag_site[loginbase]['config']['email'] = True
410 if len(d_diag_site[loginbase]['nodes'].keys()) > 0:
411 print "SITE: %20s : %d nodes up, at most" % (loginbase, nodes_up)
415 def diagRecordByCategory(self, node_record):
416 nodename = node_record['nodename']
417 category = node_record['category']
418 state = node_record['state']
419 loginbase = self.plcdb_hn2lb[nodename]
422 if "ERROR" in category: # i.e. "DOWN"
424 diag_record.update(node_record)
425 daysdown = self.__getDaysDown(diag_record, nodename)
427 # format = "DIAG: %20s : %-40s Down only %s days NOTHING DONE"
428 # print format % (loginbase, nodename, daysdown)
431 s_daysdown = self.__getStrDaysDown(diag_record, nodename)
432 diag_record['message'] = emailTxt.mailtxt.newdown
433 diag_record['args'] = {'nodename': nodename}
434 diag_record['info'] = (nodename, s_daysdown, "")
436 if 'reboot_node_failed' in node_record:
437 # there was a previous attempt to use the PCU.
438 if node_record['reboot_node_failed'] == False:
439 # then the last attempt apparently, succeeded.
440 # But, the category is still 'ERROR'. Therefore, the
441 # PCU-to-Node mapping is broken.
442 #print "Setting message for ERROR node to PCU2NodeMapping: %s" % nodename
443 diag_record['message'] = emailTxt.mailtxt.pcutonodemapping
444 diag_record['email_pcu'] = True
446 if diag_record['ticket_id'] == "":
447 diag_record['log'] = "DOWN: %20s : %-40s == %20s %s" % \
448 (loginbase, nodename, diag_record['info'][1:], diag_record['found_rt_ticket'])
450 diag_record['log'] = "DOWN: %20s : %-40s == %20s %s" % \
451 (loginbase, nodename, diag_record['info'][1:], diag_record['ticket_id'])
453 elif "OLDBOOTCD" in category:
454 # V2 boot cds as determined by findbad
455 s_daysdown = self.__getStrDaysDown(node_record, nodename)
456 s_cdversion = self.__getCDVersion(node_record, nodename)
458 diag_record.update(node_record)
459 #if "2.4" in diag_record['kernel'] or "v2" in diag_record['bootcd']:
460 diag_record['message'] = emailTxt.mailtxt.newbootcd
461 diag_record['args'] = {'nodename': nodename}
462 diag_record['info'] = (nodename, s_daysdown, s_cdversion)
463 if diag_record['ticket_id'] == "":
464 diag_record['log'] = "BTCD: %20s : %-40s == %20s %20s %s" % \
465 (loginbase, nodename, diag_record['kernel'],
466 diag_record['bootcd'], diag_record['found_rt_ticket'])
468 diag_record['log'] = "BTCD: %20s : %-40s == %20s %20s %s" % \
469 (loginbase, nodename, diag_record['kernel'],
470 diag_record['bootcd'], diag_record['ticket_id'])
472 elif "PROD" in category:
474 # Not sure what to do with these yet. Probably need to
476 print "DEBG: %20s : %-40s NOTHING DONE" % (loginbase, nodename)
478 elif "BOOT" in state:
480 # TODO: remove penalties, if any are applied.
482 last_contact = node_record['plcnode']['last_contact']
483 if last_contact == None:
486 time_diff = now - last_contact;
488 if 'improvement' in node_record['stage']:
489 # then we need to pass this on to 'action'
491 diag_record.update(node_record)
492 diag_record['message'] = emailTxt.mailtxt.newthankyou
493 diag_record['args'] = {'nodename': nodename}
494 diag_record['info'] = (nodename, node_record['prev_category'],
495 node_record['category'])
496 if 'email_pcu' in diag_record:
497 if diag_record['email_pcu']:
498 # previously, the pcu failed to reboot, so send
499 # email. Now, reset these values to try the reboot
501 diag_record['email_pcu'] = False
502 del diag_record['reboot_node_failed']
504 if diag_record['ticket_id'] == "":
505 diag_record['log'] = "IMPR: %20s : %-40s == %20s %20s %s %s" % \
506 (loginbase, nodename, diag_record['stage'],
507 state, category, diag_record['found_rt_ticket'])
509 diag_record['log'] = "IMPR: %20s : %-40s == %20s %20s %s %s" % \
510 (loginbase, nodename, diag_record['stage'],
511 state, category, diag_record['ticket_id'])
513 #elif time_diff >= 6*SPERHOUR:
514 # # heartbeat is older than 30 min.
516 # #print "Possible NM problem!! %s - %s = %s" % (now, last_contact, time_diff)
518 # diag_record.update(node_record)
519 # diag_record['message'] = emailTxt.mailtxt.NMReset
520 # diag_record['args'] = {'nodename': nodename}
521 # diag_record['stage'] = "nmreset"
522 # diag_record['info'] = (nodename,
523 # node_record['prev_category'],
524 # node_record['category'])
525 # if diag_record['ticket_id'] == "":
526 # diag_record['log'] = "NM : %20s : %-40s == %20s %20s %s %s" % \
527 # (loginbase, nodename, diag_record['stage'],
528 # state, category, diag_record['found_rt_ticket'])
530 # diag_record['log'] = "NM : %20s : %-40s == %20s" % \
531 # (loginbase, nodename, diag_record['stage'])
539 elif "ALPHA" in category:
541 elif "clock_drift" in category:
543 elif "dns" in category:
545 elif "filerw" in category:
548 print "Unknown category!!!! %s" % category
553 def __diagnoseNode(self, loginbase, node_record):
554 # TODO: change the format of the hostname in this
555 # record to something more natural.
556 nodename = node_record['nodename']
557 category = node_record['category']
558 prev_category = node_record['prev_category']
559 state = node_record['state']
560 #if 'prev_category' in node_record:
561 # prev_category = node_record['prev_category']
563 # prev_category = "ERROR"
564 if node_record['prev_category'] != "NORECORD":
566 val = cmpCategoryVal(category, prev_category)
567 print "%s went from %s -> %s" % (nodename, prev_category, category)
568 if prev_category == "UNKNOWN" and category == "PROD":
569 # sending too many thank you notes to people that don't
571 # TODO: not sure what effect this will have on the node
577 if node_record['ticket_id'] == "" or node_record['ticket_id'] == None:
578 print "closing record with no ticket: ", node_record['nodename']
579 node_record['action'] = ['close_rt']
580 node_record['message'] = None
581 node_record['stage'] = 'monitor-end-record'
584 node_record['stage'] = 'improvement'
586 #if 'monitor-end-record' in node_record['stage']:
587 # # just ignore it if it's already ended.
588 # # otherwise, the status should be worse, and we won't get
590 # print "monitor-end-record: ignoring ", node_record['nodename']
595 # current category is worse than previous, carry on
598 #values are equal, carry on.
599 #print "why are we here?"
602 if 'rt' in node_record and 'Status' in node_record['rt']:
603 if node_record['stage'] == 'ticket_waitforever':
604 if 'resolved' in node_record['rt']['Status']:
605 print "ending waitforever record for: ", node_record['nodename']
606 node_record['action'] = ['noop']
607 node_record['message'] = None
608 node_record['stage'] = 'monitor-end-record'
609 print "oldlog: %s" % node_record['log'],
610 print "%15s" % node_record['action']
612 if 'new' in node_record['rt']['Status'] and \
613 'Queue' in node_record['rt'] and \
614 'Monitor' in node_record['rt']['Queue']:
616 print "RESETTING stage to findbad"
617 node_record['stage'] = 'findbad'
619 #### COMPARE category and prev_category
621 # then assign a stage based on relative priorities
623 # then check category for stats.
624 diag_record = self.diagRecordByCategory(node_record)
625 if diag_record == None:
626 #print "diag_record == None"
630 # TODO: need to record time found, and maybe add a stage for acting on it...
631 # NOTE: after found, if the support ticket is resolved, the block is
632 # not removed. How to remove the block on this?
634 #if 'found_rt_ticket' in diag_record and \
635 # diag_record['found_rt_ticket'] is not None:
636 # if diag_record['stage'] is not 'improvement':
637 # diag_record['stage'] = 'ticket_waitforever'
639 current_time = time.time()
640 # take off four days, for the delay that database caused.
641 # TODO: generalize delays at PLC, and prevent enforcement when there
642 # have been no emails.
643 # NOTE: 7*SPERDAY exists to offset the 'bad week'
644 #delta = current_time - diag_record['time'] - 7*SPERDAY
645 delta = current_time - diag_record['time']
647 message = diag_record['message']
649 act_record.update(diag_record)
652 if 'findbad' in diag_record['stage']:
653 # The node is bad, and there's no previous record of it.
654 act_record['email'] = TECH
655 act_record['action'] = ['noop']
656 act_record['message'] = message[0]
657 act_record['stage'] = 'stage_actinoneweek'
659 elif 'nmreset' in diag_record['stage']:
660 act_record['email'] = ADMIN
661 act_record['action'] = ['reset_nodemanager']
662 act_record['message'] = message[0]
663 act_record['stage'] = 'nmreset'
666 elif 'reboot_node' in diag_record['stage']:
667 act_record['email'] = TECH
668 act_record['action'] = ['noop']
669 act_record['message'] = message[0]
670 act_record['stage'] = 'stage_actinoneweek'
672 elif 'improvement' in diag_record['stage']:
673 # - backoff previous squeeze actions (slice suspend, nocreate)
674 # TODO: add a backoff_squeeze section... Needs to runthrough
675 print "backing off of %s" % nodename
676 act_record['action'] = ['close_rt']
677 act_record['message'] = message[0]
678 act_record['stage'] = 'monitor-end-record'
680 elif 'actinoneweek' in diag_record['stage']:
681 if delta >= 7 * SPERDAY:
682 act_record['email'] = TECH | PI
683 act_record['stage'] = 'stage_actintwoweeks'
684 act_record['message'] = message[1]
685 act_record['action'] = ['nocreate' ]
686 act_record['time'] = current_time # reset clock for waitforever
687 elif delta >= 3* SPERDAY and not 'second-mail-at-oneweek' in act_record:
688 act_record['email'] = TECH
689 act_record['message'] = message[0]
690 act_record['action'] = ['sendmailagain-waitforoneweekaction' ]
691 act_record['second-mail-at-oneweek'] = True
693 act_record['message'] = None
694 act_record['action'] = ['waitforoneweekaction' ]
695 print "ignoring this record for: %s" % act_record['nodename']
696 return None # don't send if there's no action
698 elif 'actintwoweeks' in diag_record['stage']:
699 if delta >= 7 * SPERDAY:
700 act_record['email'] = TECH | PI | USER
701 act_record['stage'] = 'stage_waitforever'
702 act_record['message'] = message[2]
703 act_record['action'] = ['suspendslices']
704 act_record['time'] = current_time # reset clock for waitforever
705 elif delta >= 3* SPERDAY and not 'second-mail-at-twoweeks' in act_record:
706 act_record['email'] = TECH | PI
707 act_record['message'] = message[1]
708 act_record['action'] = ['sendmailagain-waitfortwoweeksaction' ]
709 act_record['second-mail-at-twoweeks'] = True
711 act_record['message'] = None
712 act_record['action'] = ['waitfortwoweeksaction']
713 return None # don't send if there's no action
715 elif 'ticket_waitforever' in diag_record['stage']:
716 act_record['email'] = TECH
717 if 'first-found' not in act_record:
718 act_record['first-found'] = True
719 act_record['log'] += " firstfound"
720 act_record['action'] = ['ticket_waitforever']
721 act_record['message'] = None
722 act_record['time'] = current_time
724 if delta >= 7*SPERDAY:
725 act_record['action'] = ['ticket_waitforever']
726 act_record['message'] = None
727 act_record['time'] = current_time # reset clock
729 act_record['action'] = ['ticket_waitforever']
730 act_record['message'] = None
733 elif 'waitforever' in diag_record['stage']:
734 # more than 3 days since last action
735 # TODO: send only on weekdays.
736 # NOTE: expects that 'time' has been reset before entering waitforever stage
737 if delta >= 3*SPERDAY:
738 act_record['action'] = ['email-againwaitforever']
739 act_record['message'] = message[2]
740 act_record['time'] = current_time # reset clock
742 act_record['action'] = ['waitforever']
743 act_record['message'] = None
744 return None # don't send if there's no action
747 # There is no action to be taken, possibly b/c the stage has
748 # already been performed, but diagnose picked it up again.
750 # 1. stage is unknown, or
751 # 2. delta is not big enough to bump it to the next stage.
752 # TODO: figure out which. for now assume 2.
753 print "UNKNOWN stage for %s; nothing done" % nodename
754 act_record['action'] = ['unknown']
755 act_record['message'] = message[0]
757 act_record['email'] = TECH
758 act_record['action'] = ['noop']
759 act_record['message'] = message[0]
760 act_record['stage'] = 'stage_actinoneweek'
761 act_record['time'] = current_time # reset clock
766 print "%s" % act_record['log'],
767 print "%15s" % act_record['action']
770 def getMaxSlices(self, loginbase):
771 # if sickdb has a loginbase, then it will have at least one node.
774 for nodename in self.diagnose_in[loginbase].keys():
775 if nodename in self.findbad['nodes']:
776 site_stats = self.findbad['nodes'][nodename]['values']['plcsite']
779 if site_stats == None:
780 raise Exception, "loginbase with no nodes in findbad"
782 return site_stats['max_slices']
784 def getNumNodes(self, loginbase):
785 # if sickdb has a loginbase, then it will have at least one node.
788 for nodename in self.diagnose_in[loginbase].keys():
789 if nodename in self.findbad['nodes']:
790 site_stats = self.findbad['nodes'][nodename]['values']['plcsite']
793 if site_stats == None:
794 raise Exception, "loginbase with no nodes in findbad"
796 return site_stats['num_nodes']
799 Returns number of up nodes as the total number *NOT* in act_all with a
800 stage other than 'steady-state' .
802 def getUpAtSite(self, loginbase, d_diag_site):
803 # TODO: THIS DOESN"T WORK!!! it misses all the 'debug' state nodes
804 # that aren't recorded yet.
806 numnodes = self.getNumNodes(loginbase)
807 # NOTE: assume nodes we have no record of are ok. (too conservative)
808 # TODO: make the 'up' value more representative
810 for nodename in d_diag_site[loginbase]['nodes'].keys():
812 rec = d_diag_site[loginbase]['nodes'][nodename]
813 if rec['stage'] != 'monitor-end-record':
816 pass # the node is assumed to be up.
819 # print "ERROR: %s total nodes up and down != %d" % (loginbase, numnodes)
825 def __init__(self, diagnose_out):
826 # the hostname to loginbase mapping
827 self.plcdb_hn2lb = soltesz.dbLoad("plcdb_hn2lb")
830 self.diagnose_db = diagnose_out
832 self.act_all = soltesz.if_cached_else(1, "act_all", lambda : {})
834 # A dict of actions to specific functions. PICKLE doesnt' like lambdas.
836 self.actions['suspendslices'] = lambda args: plc.suspendSlices(args['hostname'])
837 self.actions['nocreate'] = lambda args: plc.removeSliceCreation(args['hostname'])
838 self.actions['close_rt'] = lambda args: close_rt_backoff(args)
839 self.actions['rins'] = lambda args: plc.nodeBootState(args['hostname'], "rins")
840 self.actions['noop'] = lambda args: args
841 self.actions['reboot_node'] = lambda args: reboot_node(args)
842 self.actions['reset_nodemanager'] = lambda args: args # reset_nodemanager(args)
844 self.actions['ticket_waitforever'] = lambda args: args
845 self.actions['waitforever'] = lambda args: args
846 self.actions['unknown'] = lambda args: args
847 self.actions['waitforoneweekaction'] = lambda args: args
848 self.actions['waitfortwoweeksaction'] = lambda args: args
849 self.actions['sendmailagain-waitforoneweekaction'] = lambda args: args
850 self.actions['sendmailagain-waitfortwoweeksaction'] = lambda args: args
851 self.actions['email-againwaitforever'] = lambda args: args
852 self.actions['email-againticket_waitforever'] = lambda args: args
858 #logger.debug("Accumulated %d sick sites" % len(self.sickdb.keys()))
861 stats = self.analyseSites()
862 except Exception, err:
863 print "----------------"
865 print traceback.print_exc()
867 if config.policysavedb:
868 print "Saving Databases... act_all"
869 soltesz.dbDump("act_all", self.act_all)
872 #print_stats("sites_observed", stats)
873 #print_stats("sites_diagnosed", stats)
874 #print_stats("nodes_diagnosed", stats)
875 print_stats("sites_emailed", stats)
876 #print_stats("nodes_actedon", stats)
877 print string.join(stats['allsites'], ",")
879 if config.policysavedb:
880 print "Saving Databases... act_all"
881 #soltesz.dbDump("policy.eventlog", self.eventlog)
882 # TODO: remove 'diagnose_out',
883 # or at least the entries that were acted on.
884 soltesz.dbDump("act_all", self.act_all)
886 def accumSites(self):
888 Take all nodes, from l_action, look them up in the diagnose_db database,
889 and insert them into sickdb[] as:
891 This way only the given l_action nodes will be acted on regardless
892 of how many from diagnose_db are available.
894 sickdb[loginbase][nodename] = diag_record
896 self.sickdb = self.diagnose_db
898 def __emailSite(self, loginbase, roles, message, args):
900 loginbase is the unique site abbreviation, prepended to slice names.
901 roles contains TECH, PI, USER roles, and derive email aliases.
902 record contains {'message': [<subj>,<body>], 'args': {...}}
905 args.update({'loginbase':loginbase})
907 if not config.mail and not config.debug and config.bcc:
909 if config.mail and config.debug:
915 contacts += [config.email]
917 contacts += [TECHEMAIL % loginbase]
919 contacts += [PIEMAIL % loginbase]
921 slices = plc.slices(loginbase)
924 contacts += [SLICEMAIL % slice]
925 print "SLIC: %20s : %d slices" % (loginbase, len(slices))
927 print "SLIC: %20s : 0 slices" % loginbase
930 subject = message[0] % args
931 body = message[1] % args
934 if 'ticket_id' in args:
935 subj = "Re: [PL #%s] %s" % (args['ticket_id'], subject)
937 subj = "Re: [PL noticket] %s" % subject
938 mailer.email(subj, body, contacts)
939 ticket_id = args['ticket_id']
941 ticket_id = mailer.emailViaRT(subject, body, contacts, args['ticket_id'])
942 except Exception, err:
943 print "exception on message:"
945 print traceback.print_exc()
951 def _format_diaginfo(self, diag_node):
952 info = diag_node['info']
953 if diag_node['stage'] == 'monitor-end-record':
954 hlist = " %s went from '%s' to '%s'\n" % (info[0], info[1], info[2])
956 hlist = " %s %s - %s\n" % (info[0], info[2], info[1]) #(node,ver,daysdn)
960 def get_email_args(self, act_recordlist, loginbase=None):
963 email_args['hostname_list'] = ""
964 email_args['url_list'] = ""
966 for act_record in act_recordlist:
967 email_args['hostname_list'] += act_record['msg_format']
968 email_args['hostname'] = act_record['nodename']
969 email_args['url_list'] += "\thttp://boot2.planet-lab.org/premade-bootcd-alpha/iso/%s.iso\n"
970 email_args['url_list'] += "\thttp://boot2.planet-lab.org/premade-bootcd-alpha/usb/%s.usb\n"
971 email_args['url_list'] += "\n"
972 if 'plcnode' in act_record and \
973 'pcu_ids' in act_record['plcnode'] and \
974 len(act_record['plcnode']['pcu_ids']) > 0:
975 print "setting 'pcu_id' for email_args %s"%email_args['hostname']
976 email_args['pcu_id'] = act_record['plcnode']['pcu_ids'][0]
978 email_args['pcu_id'] = "-1"
980 if 'ticket_id' in act_record:
981 if act_record['ticket_id'] == 0 or act_record['ticket_id'] == '0':
982 print "Enter the ticket_id for %s @ %s" % (loginbase, act_record['nodename'])
984 line = sys.stdin.readline()
986 ticket_id = int(line)
988 print "could not get ticket_id from stdin..."
991 ticket_id = act_record['ticket_id']
993 email_args['ticket_id'] = ticket_id
997 def get_unique_issues(self, act_recordlist):
998 # NOTE: only send one email per site, per problem...
1000 for act_record in act_recordlist:
1001 act_key = act_record['action'][0]
1002 if act_key not in unique_issues:
1003 unique_issues[act_key] = []
1005 unique_issues[act_key] += [act_record]
1007 return unique_issues
1010 def __actOnSite(self, loginbase, site_record):
1016 for nodename in site_record['nodes'].keys():
1017 diag_record = site_record['nodes'][nodename]
1018 act_record = self.__actOnNode(diag_record)
1019 #print "nodename: %s %s" % (nodename, act_record)
1020 if act_record is not None:
1021 act_recordlist += [act_record]
1023 unique_issues = self.get_unique_issues(act_recordlist)
1025 for issue in unique_issues.keys():
1026 print "\tworking on issue: %s" % issue
1027 issue_record_list = unique_issues[issue]
1028 email_args = self.get_email_args(issue_record_list, loginbase)
1031 for act_record in issue_record_list:
1032 # if there's a pcu record and email config is set
1033 if 'email_pcu' in act_record:
1034 if act_record['message'] != None and act_record['email_pcu'] and site_record['config']['email']:
1035 # and 'reboot_node' in act_record['stage']:
1037 email_args['hostname'] = act_record['nodename']
1038 ticket_id = self.__emailSite(loginbase,
1039 act_record['email'],
1040 emailTxt.mailtxt.pcudown[0],
1044 print "got a ticket_id == 0!!!! %s" % act_record['nodename']
1047 email_args['ticket_id'] = ticket_id
1050 act_record = issue_record_list[0]
1051 # send message before squeezing
1052 print "\t\tconfig.email: %s and %s" % (act_record['message'] != None,
1053 site_record['config']['email'])
1054 if act_record['message'] != None and site_record['config']['email']:
1055 ticket_id = self.__emailSite(loginbase, act_record['email'],
1056 act_record['message'], email_args)
1060 print "ticket_id == 0 for %s %s" % (loginbase, act_record['nodename'])
1064 # Add ticket_id to ALL nodenames
1065 for act_record in issue_record_list:
1066 nodename = act_record['nodename']
1067 # update node record with RT ticket_id
1068 if nodename in self.act_all:
1069 self.act_all[nodename][0]['ticket_id'] = "%s" % ticket_id
1070 # if the ticket was previously resolved, reset it to new.
1071 if 'rt' in act_record and \
1072 'Status' in act_record['rt'] and \
1073 act_record['rt']['Status'] == 'resolved':
1074 mailer.setTicketStatus(ticket_id, "new")
1075 status = mailer.getTicketStatus(ticket_id)
1076 self.act_all[nodename][0]['rt'] = status
1077 if config.mail: i_nodes_emailed += 1
1079 print "\t\tconfig.squeeze: %s and %s" % (config.squeeze,
1080 site_record['config']['squeeze'])
1081 if config.squeeze and site_record['config']['squeeze']:
1082 for act_key in act_record['action']:
1083 self.actions[act_key](email_args)
1084 i_nodes_actedon += 1
1086 if config.policysavedb:
1087 print "Saving Databases... act_all, diagnose_out"
1088 soltesz.dbDump("act_all", self.act_all)
1089 # remove site record from diagnose_out, it's in act_all as done.
1090 del self.diagnose_db[loginbase]
1091 #soltesz.dbDump("diagnose_out", self.diagnose_db)
1093 print "sleeping for 1 sec"
1095 #print "Hit enter to continue..."
1097 #line = sys.stdin.readline()
1099 return (i_nodes_actedon, i_nodes_emailed)
1101 def __actOnNode(self, diag_record):
1102 nodename = diag_record['nodename']
1103 message = diag_record['message']
1106 act_record.update(diag_record)
1107 act_record['nodename'] = nodename
1108 act_record['msg_format'] = self._format_diaginfo(diag_record)
1109 print "act_record['stage'] == %s " % act_record['stage']
1111 # avoid end records, and nmreset records
1112 # reboot_node_failed, is set below, so don't reboot repeatedly.
1114 if 'monitor-end-record' not in act_record['stage'] and \
1115 'nmreset' not in act_record['stage'] and \
1116 'reboot_node_failed' not in act_record:
1118 if "DOWN" in act_record['log'] and \
1119 'pcu_ids' in act_record['plcnode'] and \
1120 len(act_record['plcnode']['pcu_ids']) > 0:
1122 print "%s" % act_record['log'],
1123 print "%15s" % (['reboot_node'],)
1124 # Set node to re-install
1125 plc.nodeBootState(act_record['nodename'], "rins")
1127 ret = reboot_node({'hostname': act_record['nodename']})
1128 except Exception, exc:
1129 print "exception on reboot_node:"
1131 print traceback.print_exc()
1134 if ret: # and ( 'reboot_node_failed' not in act_record or act_record['reboot_node_failed'] == False):
1136 print "reboot succeeded for %s" % act_record['nodename']
1138 act_record2.update(act_record)
1139 act_record2['action'] = ['reboot_node']
1140 act_record2['stage'] = "reboot_node"
1141 act_record2['reboot_node_failed'] = False
1142 act_record2['email_pcu'] = False
1144 if nodename not in self.act_all:
1145 self.act_all[nodename] = []
1146 print "inserting 'reboot_node' record into act_all"
1147 self.act_all[nodename].insert(0,act_record2)
1149 # return None to avoid further action
1150 print "Taking no further action"
1153 print "reboot failed for %s" % act_record['nodename']
1154 # set email_pcu to also send pcu notice for this record.
1155 act_record['reboot_node_failed'] = True
1156 act_record['email_pcu'] = True
1158 print "%s" % act_record['log'],
1159 print "%15s" % act_record['action']
1161 if act_record['stage'] is not 'monitor-end-record' and \
1162 act_record['stage'] is not 'nmreset':
1163 if nodename not in self.act_all:
1164 self.act_all[nodename] = []
1166 self.act_all[nodename].insert(0,act_record)
1168 print "Not recording %s in act_all" % nodename
1172 def analyseSites(self):
1173 i_sites_observed = 0
1174 i_sites_diagnosed = 0
1175 i_nodes_diagnosed = 0
1180 sorted_sites = self.sickdb.keys()
1182 for loginbase in sorted_sites:
1183 site_record = self.sickdb[loginbase]
1184 print "sites: %s" % loginbase
1186 i_nodes_diagnosed += len(site_record.keys())
1187 i_sites_diagnosed += 1
1189 (na,ne) = self.__actOnSite(loginbase, site_record)
1191 i_sites_observed += 1
1192 i_nodes_actedon += na
1193 i_sites_emailed += ne
1195 l_allsites += [loginbase]
1197 return {'sites_observed': i_sites_observed,
1198 'sites_diagnosed': i_sites_diagnosed,
1199 'nodes_diagnosed': i_nodes_diagnosed,
1200 'sites_emailed': i_sites_emailed,
1201 'nodes_actedon': i_nodes_actedon,
1202 'allsites':l_allsites}
1204 def print_stats(self, key, stats):
1205 print "%20s : %d" % (key, stats[key])