3 # This script is used to manipulate the operational state of nodes in
4 # different node groups. These are basically set operations on nodes via the
7 # Take the ng name as an argument....
9 # * get a list of nodes in the given nodegroup.
10 # * set some or all in the set to rins.
12 # * do something else to them all.
16 api = plc.getAuthAPI()
21 from optparse import OptionParser
24 from nodecommon import *
25 from nodequery import verify,query_to_dict,node_select
27 from unified_model import *
31 import parser as parsermodule
34 import bootman # debug nodes
35 import reboot # down nodes without pcu
36 import mailmonitor # down nodes with pcu
37 from emailTxt import mailtxt
42 def __init__(self, fbnode):
45 def _send_pcunotice(self, host):
47 args['hostname'] = host
49 args['pcu_id'] = plc.getpcu(host)['pcu_id']
53 m = PersistMessage(host, mailtxt.pcudown_one[0] % args,
54 mailtxt.pcudown_one[1] % args, True, db='pcu_persistmessages')
56 loginbase = plc.siteId(host)
57 m.send([const.TECHEMAIL % loginbase])
60 # TODO: It should be possible to diagnose the various conditions of
61 # the PCU here, and send different messages as appropriate.
62 print "'%s'" % self.fbnode['pcu']
63 if self.fbnode['pcu'] == "PCU" or "PCUOK" in self.fbnode['pcu']:
64 self.action = "reboot.reboot('%s')" % host
66 pflags = PersistFlags(host, 2*60*60*24, db='pcu_persistflags')
67 #pflags.resetRecentFlag('pcutried')
68 if not pflags.getRecentFlag('pcutried'):
70 print "CALLING REBOOT!!!"
71 ret = reboot.reboot(host)
73 pflags.setRecentFlag('pcutried')
78 print traceback.print_exc(); print e
80 # NOTE: this failure could be an implementation issue on
81 # our end. So, extra notices are confusing...
82 # self._send_pcunotice(host)
84 pflags.setRecentFlag('pcufailed')
88 elif not pflags.getRecentFlag('pcu_rins_tried'):
90 # set node to 'rins' boot state.
91 print "CALLING REBOOT +++ RINS"
92 plc.nodeBootState(host, 'rins')
93 ret = reboot.reboot(host)
95 pflags.setRecentFlag('pcu_rins_tried')
100 print traceback.print_exc(); print e
102 # NOTE: this failure could be an implementation issue on
103 # our end. So, extra notices are confusing...
104 # self._send_pcunotice(host)
106 pflags.setRecentFlag('pcufailed')
110 # we've tried the pcu recently, but it didn't work,
111 # so did we send a message about it recently?
112 if not pflags.getRecentFlag('pcumessagesent'):
114 self._send_pcunotice(host)
116 pflags.setRecentFlag('pcumessagesent')
119 # This will result in mail() being called next, to try to
120 # engage the technical contact to take care of it also.
121 print "RETURNING FALSE"
129 def mail(self, host):
131 # Reset every 4 weeks or so
132 pflags = PersistFlags(host, 27*60*60*24, db='mail_persistflags')
133 if not pflags.getRecentFlag('endrecord'):
134 node_end_record(host)
135 pflags.setRecentFlag('endrecord')
138 # Then in either case, run mailmonitor.reboot()
139 self.action = "mailmonitor.reboot('%s')" % host
141 return mailmonitor.reboot(host)
143 print traceback.print_exc(); print e
146 class RebootDebug(Reboot):
148 def direct(self, host):
149 self.action = "bootman.reboot('%s', config, None)" % host
150 return bootman.reboot(host, config, None)
152 class RebootBoot(Reboot):
154 def direct(self, host):
155 self.action = "bootman.reboot('%s', config, 'reboot')" % host
156 return bootman.reboot(host, config, 'reboot')
158 class RebootDown(Reboot):
160 def direct(self, host):
162 return False # this always fails, since the node will be down.
164 def set_node_to_rins(host, fb):
166 node = api.GetNodes(host, ['boot_state', 'last_contact', 'last_updated', 'date_created'])
167 record = {'observation' : node[0],
168 'model' : 'USER_REQUEST',
169 'action' : 'api.UpdateNode(%s, {"boot_state" : "rins"})' % host,
170 'time' : time.time()}
171 l = Log(host, record)
173 ret = api.UpdateNode(host, {'boot_state' : 'rins'})
175 # it's nice to see the current status rather than the previous status on the console
176 node = api.GetNodes(host)[0]
178 print "%-2d" % (i-1), nodegroup_display(node, fb)
181 print "FAILED TO UPDATE NODE BOOT STATE : %s" % host
186 rebootlog = database.dbLoad("rebootlog")
188 rebootlog = LogRoll()
190 parser = parsermodule.getParser(['nodesets'])
191 parser.set_defaults( timewait=0,
202 parser.add_option("", "--stopselect", dest="stopselect", metavar="",
203 help="The select string that must evaluate to true for the node to be considered 'done'")
204 parser.add_option("", "--findbad", dest="findbad", action="store_true",
205 help="Re-run findbad on the nodes we're going to check before acting.")
206 parser.add_option("", "--force", dest="force", action="store_true",
207 help="Force action regardless of previous actions/logs.")
208 parser.add_option("", "--rins", dest="rins", action="store_true",
209 help="Set the boot_state to 'rins' for all nodes.")
210 parser.add_option("", "--reboot", dest="reboot", action="store_true",
211 help="Actively try to reboot the nodes, keeping a log of actions.")
213 parser.add_option("", "--verbose", dest="verbose", action="store_true",
214 help="Extra debug output messages.")
215 parser.add_option("", "--nosetup", dest="nosetup", action="store_true",
216 help="Do not perform the orginary setup phase.")
217 parser.add_option("", "--skip", dest="skip",
218 help="Number of machines to skip on the input queue.")
219 parser.add_option("", "--timewait", dest="timewait",
220 help="Minutes to wait between iterations of 10 nodes.")
222 parser = parsermodule.getParser(['defaults'], parser)
223 config = parsermodule.parse_args(parser)
225 # COLLECT nodegroups, nodes and node lists
227 ng = api.GetNodeGroups({'name' : config.nodegroup})
228 nodelist = api.GetNodes(ng[0]['node_ids'])
229 hostnames = [ n['hostname'] for n in nodelist ]
232 site = api.GetSites(config.site)
233 l_nodes = api.GetNodes(site[0]['node_ids'], ['hostname'])
234 hostnames = [ n['hostname'] for n in l_nodes ]
236 if config.node or config.nodelist:
237 if config.node: hostnames = [ config.node ]
238 else: hostnames = util.file.getListFromFile(config.nodelist)
240 fb = database.dbLoad("findbad")
242 if config.nodeselect:
243 hostnames = node_select(config.nodeselect, fb['nodes'].keys(), fb)
246 # rerun findbad with the nodes in the given nodes.
248 util.file.setFileFromList(file, hostnames)
249 os.system("./findbad.py --cachenodes --increment --nodelist %s" % file)
250 # TODO: shouldn't we reload the node list now?
252 l_blacklist = database.if_cached_else(1, "l_blacklist", lambda : [])
256 #print "hosts: %s" % hostnames
257 for host in hostnames:
259 #if 'echo' in host or 'hptest-1' in host: continue
263 node = api.GetNodes(host)[0]
265 print traceback.print_exc();
266 print "FAILED GETNODES for host: %s" % host
269 print "%-2d" % i, nodegroup_display(node, fb)
271 if i-1 <= int(config.skip): continue
272 if host in l_blacklist:
273 print "%s is blacklisted. Skipping." % host
276 if config.stopselect:
277 dict_query = query_to_dict(config.stopselect)
278 fbnode = fb['nodes'][host]['values']
279 observed_state = get_current_state(fbnode)
281 if verify(dict_query, fbnode) and observed_state != "dbg ":
282 # evaluates to true, therefore skip.
283 print "%s evaluates true for %s ; skipping..." % ( config.stopselect, host )
285 # todo: clean up act_all record here.
286 # todo: send thank you, etc.
287 mailmonitor.reboot(host)
289 print traceback.print_exc(); print e
293 #print "%s failed to match %s: -%s-" % ( host, dict_query, observed_state )
296 if not config.force and rebootlog.find(host, {'action' : ".*reboot"}, 60*60*2):
297 print "recently rebooted %s. skipping... " % host
302 fbnode = fb['nodes'][host]['values']
303 observed_state = get_current_state(fbnode)
305 if observed_state == "dbg ":
306 o = RebootDebug(fbnode)
308 elif observed_state == "boot" :
310 l = set_node_to_rins(host, fb)
311 if l: rebootlog.add(l)
313 o = RebootBoot(fbnode)
315 elif observed_state == "down":
317 l = set_node_to_rins(host, fb)
318 if l: rebootlog.add(l)
320 o = RebootDown(fbnode)
324 record = {'observation' : "DIRECT_SUCCESS: %s" % observed_state,
327 'time' : time.time()}
329 record = {'observation' : "PCU_SUCCESS: %s" % observed_state,
332 'time' : time.time()}
334 record = {'observation' : "MAIL_SUCCESS: %s" % observed_state,
337 'time' : time.time()}
339 record = {'observation' : "REBOOT_FAILED: %s" % observed_state,
340 'action' : "log failure",
342 'time' : time.time()}
344 print "ALL METHODS OF RESTARTING %s FAILED" % host
346 args['hostname'] = host
347 #m = PersistMessage(host, "ALL METHODS FAILED for %(hostname)s" % args,
348 # "CANNOT CONTACT", False, db='suspect_persistmessages')
350 #m.send(['monitor-list@lists.planet-lab.org'])
352 l = Log(host, record)
355 except KeyboardInterrupt:
356 print "Killed by interrupt"
359 print traceback.print_exc();
360 print "Continuing..."
364 print "Saving rebootlog"
365 database.dbDump("rebootlog", rebootlog)
366 wait_time = int(config.timewait)
367 print "Sleeping %d minutes" % wait_time
369 print "Minutes slept: ",
371 while ti < wait_time:
379 print "Saving rebootlog"
380 database.dbDump("rebootlog", rebootlog)