-import reboot
-import soltesz
-import string
-from config import config
-config = config()
-
-DAT="./monitor.dat"
-
-logger = logging.getLogger("monitor")
-
-# Time to enforce policy
-POLSLEEP = 7200
-
-# Where to email the summary
-SUMTO = "soltesz@cs.princeton.edu"
-TECHEMAIL="tech-%s@sites.planet-lab.org"
-PIEMAIL="pi-%s@sites.planet-lab.org"
-SLICEMAIL="%s@slices.planet-lab.org"
-PLCEMAIL="support@planet-lab.org"
-
-#Thresholds (DAYS)
-SPERDAY = 86400
-PITHRESH = 7 * SPERDAY
-SLICETHRESH = 7 * SPERDAY
-# Days before attempting rins again
-RINSTHRESH = 5 * SPERDAY
-
-# Days before calling the node dead.
-DEADTHRESH = 30 * SPERDAY
-# Minimum number of nodes up before squeezing
-MINUP = 2
-
-TECH=1
-PI=2
-USER=4
-
-# IF:
-# no SSH, down.
-# bad disk, down
-# DNS, kinda down (sick)
-# clock, kinda down (sick)
-# Full disk, going to be down
-
-# Actions:
-# Email
-# suspend slice creation
-# kill slices
-
-class PLC: pass
-
-class Policy(Thread):
- def __init__(self, comonthread, sickNoTicket, emailed):
- self.comon = comonthread
-
- # the hostname to loginbase mapping
- self.plcdb_hn2lb = soltesz.dbLoad("plcdb_hn2lb")
-
- # Actions taken on nodes.
- self.cache_all = soltesz.if_cached_else(1, "act_all", lambda : {})
- self.act_all= soltesz.if_cached_else(1, "act_all", lambda : {})
-
- # A dict of actions to specific functions. PICKLE doesnt' like lambdas.
- self.actions = {}
- self.actions['suspendslices'] = lambda hn: plc.suspendSlices(hn)
- self.actions['nocreate'] = lambda hn: plc.removeSliceCreation(hn);
- self.actions['rins'] = lambda hn: plc.nodeBootState(hn, "rins")
- self.actions['noop'] = lambda hn: hn
-
- self.bootcds = soltesz.dbLoad("bootcds")
- self.emailed = emailed # host - > (time of email, type of email)
-
- # all sick nodes w/o tickets
- # from thread
- self.sickNoTicket = sickNoTicket
-
-
- # sick nodes with no tickets
- # sickdb{loginbase: [{hostname1: [buckets]}, {...}]}
- self.sickdb = {}
- Thread.__init__(self)
-
- def mergePreviousActions(self):
- """
- look at the sick node_records as reported by comon, and then look at the
- node_records in act_all. There are four cases:
- 1) problem in comon but not in act_all
- this ok, b/c it just means it's a new problem
- 2) problem in comon and in act_all
- we need to figure out the mis-match. Did the problem get better
- or worse? Reset the stage clock to 'initial', if it's better,
- continue if it's gotten worse. Hard to make this judgement here, though.
- 3) no problem in comon, problem in act_all
- this may mean that the node is operational again, or that monitor
- knows how to define a problem that comon does not. For now, if
- comon does not report a problem, monitor obeys. Ultimately,
- however, we want to catch problems that comon can't see.
- 4) no problem in comon, no problem in act_all
- there won't be a record in either db, so there's no code.
-
- TODO: this is where back-offs will be acknowledged. If the nodes get
- better, it should be possible to 're-enable' the site, or slice, etc.
- """
- sorted_sites = self.sickdb.keys()
- sorted_sites.sort()
- # look at all problems reported by comon
- for loginbase in sorted_sites:
- rec_nodedict = self.sickdb[loginbase]
- sorted_nodes = rec_nodedict.keys()
- sorted_nodes.sort()
- #for rec_node in rec_nodelist:
- for nodename in sorted_nodes:
- rec_node = rec_nodedict[nodename]
- hn = nodename
- x = self.sickdb[loginbase][hn]
- if hn in self.act_all:
- y = self.act_all[hn][0]
- if x['bucket'][0] != y['bucket'][0]:
- # 2a) mismatch, need a policy for how to resolve
- print "COMON and MONITOR have a mismatch: %s vs %s" % \
- (x['bucket'], y['bucket'])
- else:
- # 2b) ok, b/c they agree that there's still a problem..
- pass
-
- # for now, overwrite the comon entry for the one in act_all
- self.sickdb[loginbase][hn] = y
- # delete the entry from cache_all to keep it out of case 3)
- del self.cache_all[hn]
- else:
- # 1) ok, b/c it's a new problem.
- pass
-
- # 3) nodes that remin in cache_all were not identified by comon as
- # down. Do we keep them or not?
- for hn in self.cache_all.keys():
- y = self.act_all[hn][0]
- if 'monitor' in y['bucket']:
- loginbase = self.plcdb_hn2lb[hn]
- if loginbase not in self.sickdb:
- self.sickdb[loginbase] = {}
- self.sickdb[loginbase][hn] = y
- else:
- del self.cache_all[hn]
-
- print "len of cache_all: %d" % len(self.cache_all.keys())
-
- return
-
- def accumSickSites(self):
- """
- Take all sick nodes, find their sites, and put in
- sickdb[loginbase] = [diag_node1, diag_node2, ...]
- """
- while 1:
- diag_node = self.sickNoTicket.get(block = True)
- if diag_node == "None":
- break
-
- #for bucket in self.comon.comon_buckets.keys():
- # if (hostname in getattr(self.comon, bucket)):
- # buckets_per_node.append(bucket)
-
- #########################################################
- # TODO: this will break with more than one comon bucket!!
- nodename = diag_node['nodename']
- loginbase = self.plcdb_hn2lb[nodename] # plc.siteId(node)
-
- if loginbase not in self.sickdb:
- self.sickdb[loginbase] = {}
- #self.sickdb[loginbase][nodename] = []
- #else:
- #if nodename not in self.sickdb[loginbase]:
- # self.sickdb[loginbase][nodename] = []
-
- #self.sickdb[loginbase][nodename].append(diag_node)
- self.sickdb[loginbase][nodename] = diag_node
- # TODO: this will break with more than one comon bucket!!
- #########################################################
-
-
- def __actOnDebug(self, node):
- """
- If in debug, set the node to rins, reboot via PCU/POD
- """
- daysdown = self.comon.codata[node]['sshstatus'] // (60*60*24)
- logger.info("POLICY: Node %s in dbg. down for %s" %(node,daysdown))
- plc.nodeBootState(node, "rins")
- # TODO: only reboot if BootCD > 3.0
- # if bootcd[node] > 3.0:
- # if NODE_KEY in planet.cnf:
- # plc.nodeBootState(node, "rins")
- # reboot.reboot(node)
- # else:
- # email to update planet.cnf file
-
- # If it has a PCU
- reboot.reboot(node)
- # else:
- # email upgrade bootcd message, and treat as down.
- # Log it
- self.actionlogdb[node] = ['rins', daysdown, time.time()]
-
- def __emailSite(self, loginbase, roles, message, args):
- """
- loginbase is the unique site abbreviation, prepended to slice names.
- roles contains TECH, PI, USER roles, and derive email aliases.
- record contains {'message': [<subj>,<body>], 'args': {...}}
- """
- args.update({'loginbase':loginbase})
- # build targets
- contacts = []
- if TECH & roles:
- contacts += [TECHEMAIL % loginbase]
- elif PI & roles:
- contacts += [PIEMAIL % loginbase]
- elif USER & roles:
- slices = plc.slices(loginbase)
- if len(slices) >= 1:
- for slice in slices:
- contacts += [SLICEMAIL % slice]
- else:
- print "Received no slices for site: %s" % loginbase