3 # This script is used to manipulate the operational state of nodes in
4 # different node groups. These are basically set operations on nodes via the
7 # Take the ng name as an argument....
9 # * get a list of nodes in the given nodegroup.
10 # * set some or all in the set to rins.
12 # * do something else to them all.
16 api = plc.getAuthAPI()
21 from optparse import OptionParser
24 from nodecommon import *
25 from nodequery import verify,query_to_dict,node_select
27 from unified_model import *
31 import parser as parsermodule
34 import bootman # debug nodes
35 import reboot # down nodes without pcu
36 import mailmonitor # down nodes with pcu
37 from emailTxt import mailtxt
42 def __init__(self, fbnode):
45 def _send_pcunotice(self, host):
47 args['hostname'] = host
49 args['pcu_id'] = plc.getpcu(host)['pcu_id']
53 m = PersistMessage(host, mailtxt.pcudown_one[0] % args,
54 mailtxt.pcudown_one[1] % args, True, db='pcu_persistmessages')
56 loginbase = plc.siteId(host)
57 m.send([const.TECHEMAIL % loginbase])
60 # TODO: It should be possible to diagnose the various conditions of
61 # the PCU here, and send different messages as appropriate.
62 print "'%s'" % self.fbnode['pcu']
63 if self.fbnode['pcu'] == "PCU" or "PCUOK" in self.fbnode['pcu']:
64 self.action = "reboot.reboot('%s')" % host
66 pflags = PersistFlags(host, 2*60*60*24, db='pcu_persistflags')
67 #pflags.resetRecentFlag('pcutried')
68 if not pflags.getRecentFlag('pcutried'):
70 print "CALLING REBOOT!!!"
71 ret = reboot.reboot(host)
73 pflags.setRecentFlag('pcutried')
79 print traceback.print_exc(); print e
81 # NOTE: this failure could be an implementation issue on
82 # our end. So, extra notices are confusing...
83 # self._send_pcunotice(host)
85 pflags.setRecentFlag('pcufailed')
89 elif not pflags.getRecentFlag('pcu_rins_tried'):
91 # set node to 'rins' boot state.
92 print "CALLING REBOOT +++ RINS"
93 plc.nodeBootState(host, 'rins')
94 ret = reboot.reboot(host)
96 pflags.setRecentFlag('pcu_rins_tried')
102 print traceback.print_exc(); print e
104 # NOTE: this failure could be an implementation issue on
105 # our end. So, extra notices are confusing...
106 # self._send_pcunotice(host)
108 pflags.setRecentFlag('pcufailed')
112 # we've tried the pcu recently, but it didn't work,
113 # so did we send a message about it recently?
114 if not pflags.getRecentFlag('pcumessagesent'):
116 self._send_pcunotice(host)
118 pflags.setRecentFlag('pcumessagesent')
121 # This will result in mail() being called next, to try to
122 # engage the technical contact to take care of it also.
123 print "RETURNING FALSE"
131 def mail(self, host):
133 # Reset every 4 weeks or so
134 pflags = PersistFlags(host, 27*60*60*24, db='mail_persistflags')
135 if not pflags.getRecentFlag('endrecord'):
136 node_end_record(host)
137 pflags.setRecentFlag('endrecord')
140 # Then in either case, run mailmonitor.reboot()
141 self.action = "mailmonitor.reboot('%s')" % host
143 return mailmonitor.reboot(host)
145 email_exception(host)
146 print traceback.print_exc(); print e
149 class RebootDebug(Reboot):
151 def direct(self, host):
152 self.action = "bootman.reboot('%s', config, None)" % host
153 return bootman.reboot(host, config, None)
155 class RebootBoot(Reboot):
157 def direct(self, host):
158 self.action = "bootman.reboot('%s', config, 'reboot')" % host
159 return bootman.reboot(host, config, 'reboot')
161 class RebootDown(Reboot):
163 def direct(self, host):
165 return False # this always fails, since the node will be down.
167 def set_node_to_rins(host, fb):
169 node = api.GetNodes(host, ['boot_state', 'last_contact', 'last_updated', 'date_created'])
170 record = {'observation' : node[0],
171 'model' : 'USER_REQUEST',
172 'action' : 'api.UpdateNode(%s, {"boot_state" : "rins"})' % host,
173 'time' : time.time()}
174 l = Log(host, record)
176 ret = api.UpdateNode(host, {'boot_state' : 'rins'})
178 # it's nice to see the current status rather than the previous status on the console
179 node = api.GetNodes(host)[0]
181 print "%-2d" % (i-1), nodegroup_display(node, fb)
184 print "FAILED TO UPDATE NODE BOOT STATE : %s" % host
189 rebootlog = database.dbLoad("rebootlog")
191 rebootlog = LogRoll()
193 parser = parsermodule.getParser(['nodesets'])
194 parser.set_defaults( timewait=0,
205 parser.add_option("", "--stopselect", dest="stopselect", metavar="",
206 help="The select string that must evaluate to true for the node to be considered 'done'")
207 parser.add_option("", "--findbad", dest="findbad", action="store_true",
208 help="Re-run findbad on the nodes we're going to check before acting.")
209 parser.add_option("", "--force", dest="force", action="store_true",
210 help="Force action regardless of previous actions/logs.")
211 parser.add_option("", "--rins", dest="rins", action="store_true",
212 help="Set the boot_state to 'rins' for all nodes.")
213 parser.add_option("", "--reboot", dest="reboot", action="store_true",
214 help="Actively try to reboot the nodes, keeping a log of actions.")
216 parser.add_option("", "--verbose", dest="verbose", action="store_true",
217 help="Extra debug output messages.")
218 parser.add_option("", "--nosetup", dest="nosetup", action="store_true",
219 help="Do not perform the orginary setup phase.")
220 parser.add_option("", "--skip", dest="skip",
221 help="Number of machines to skip on the input queue.")
222 parser.add_option("", "--timewait", dest="timewait",
223 help="Minutes to wait between iterations of 10 nodes.")
225 parser = parsermodule.getParser(['defaults'], parser)
226 config = parsermodule.parse_args(parser)
228 # COLLECT nodegroups, nodes and node lists
230 ng = api.GetNodeGroups({'name' : config.nodegroup})
231 nodelist = api.GetNodes(ng[0]['node_ids'])
232 hostnames = [ n['hostname'] for n in nodelist ]
235 site = api.GetSites(config.site)
236 l_nodes = api.GetNodes(site[0]['node_ids'], ['hostname'])
237 hostnames = [ n['hostname'] for n in l_nodes ]
239 if config.node or config.nodelist:
240 if config.node: hostnames = [ config.node ]
241 else: hostnames = util.file.getListFromFile(config.nodelist)
243 fb = database.dbLoad("findbad")
245 if config.nodeselect:
246 hostnames = node_select(config.nodeselect, fb['nodes'].keys(), fb)
249 # rerun findbad with the nodes in the given nodes.
251 util.file.setFileFromList(file, hostnames)
252 os.system("./findbad.py --cachenodes --increment --nodelist %s" % file)
253 # TODO: shouldn't we reload the node list now?
255 l_blacklist = database.if_cached_else(1, "l_blacklist", lambda : [])
259 #print "hosts: %s" % hostnames
260 for host in hostnames:
262 #if 'echo' in host or 'hptest-1' in host: continue
266 node = api.GetNodes(host)[0]
269 print traceback.print_exc();
270 print "FAILED GETNODES for host: %s" % host
273 print "%-2d" % i, nodegroup_display(node, fb)
275 if i-1 <= int(config.skip): continue
276 if host in l_blacklist:
277 print "%s is blacklisted. Skipping." % host
280 if config.stopselect:
281 dict_query = query_to_dict(config.stopselect)
282 fbnode = fb['nodes'][host]['values']
283 observed_state = get_current_state(fbnode)
285 if verify(dict_query, fbnode) and observed_state != "dbg ":
286 # evaluates to true, therefore skip.
287 print "%s evaluates true for %s ; skipping..." % ( config.stopselect, host )
289 # todo: clean up act_all record here.
290 # todo: send thank you, etc.
291 mailmonitor.reboot(host)
294 print traceback.print_exc(); print e
298 #print "%s failed to match %s: -%s-" % ( host, dict_query, observed_state )
301 if not config.force and rebootlog.find(host, {'action' : ".*reboot"}, 60*60*2):
302 print "recently rebooted %s. skipping... " % host
307 fbnode = fb['nodes'][host]['values']
308 observed_state = get_current_state(fbnode)
310 if observed_state == "dbg ":
311 o = RebootDebug(fbnode)
313 elif observed_state == "boot" :
315 l = set_node_to_rins(host, fb)
316 if l: rebootlog.add(l)
318 o = RebootBoot(fbnode)
320 elif observed_state == "down":
322 l = set_node_to_rins(host, fb)
323 if l: rebootlog.add(l)
325 o = RebootDown(fbnode)
329 record = {'observation' : "DIRECT_SUCCESS: %s" % observed_state,
332 'time' : time.time()}
334 record = {'observation' : "PCU_SUCCESS: %s" % observed_state,
337 'time' : time.time()}
339 record = {'observation' : "MAIL_SUCCESS: %s" % observed_state,
342 'time' : time.time()}
344 record = {'observation' : "REBOOT_FAILED: %s" % observed_state,
345 'action' : "log failure",
347 'time' : time.time()}
349 print "ALL METHODS OF RESTARTING %s FAILED" % host
351 args['hostname'] = host
352 #m = PersistMessage(host, "ALL METHODS FAILED for %(hostname)s" % args,
353 # "CANNOT CONTACT", False, db='suspect_persistmessages')
355 #m.send(['monitor-list@lists.planet-lab.org'])
357 l = Log(host, record)
360 except KeyboardInterrupt:
361 print "Killed by interrupt"
365 print traceback.print_exc();
366 print "Continuing..."
370 print "Saving rebootlog"
371 database.dbDump("rebootlog", rebootlog)
372 wait_time = int(config.timewait)
373 print "Sleeping %d minutes" % wait_time
375 print "Minutes slept: ",
377 while ti < wait_time:
385 print "Saving rebootlog"
386 database.dbDump("rebootlog", rebootlog)