3 # This script is used to manipulate the operational state of nodes in
4 # different node groups. These are basically set operations on nodes via the
7 # Take the ng name as an argument....
9 # * get a list of nodes in the given nodegroup.
10 # * set some or all in the set to rins.
12 # * do something else to them all.
16 api = plc.getAuthAPI()
20 from config import config as cfg
22 from optparse import OptionParser
24 from nodecommon import *
25 from nodequery import verify,query_to_dict,node_select
27 from unified_model import *
31 import parser as parsermodule
34 import bootman # debug nodes
35 import monitor # down nodes with pcu
36 import reboot # down nodes without pcu
37 from emailTxt import mailtxt
42 def __init__(self, fbnode):
45 def _send_pcunotice(self, host):
47 args['hostname'] = host
49 args['pcu_id'] = plc.getpcu(host)['pcu_id']
53 m = PersistMessage(host, mailtxt.pcudown_one[0] % args,
54 mailtxt.pcudown_one[1] % args, True, db='pcu_persistmessages')
56 loginbase = plc.siteId(host)
57 m.send([policy.TECHEMAIL % loginbase])
60 # TODO: It should be possible to diagnose the various conditions of
61 # the PCU here, and send different messages as appropriate.
62 if self.fbnode['pcu'] == "PCU":
63 self.action = "reboot.reboot('%s')" % host
65 pflags = PersistFlags(host, 2*60*60*24, db='pcu_persistflags')
66 if not pflags.getRecentFlag('pcutried'):
67 pflags.setRecentFlag('pcutried')
69 ret = reboot.reboot(host)
75 print traceback.print_exc(); print e
77 # NOTE: this failure could be an implementation issue on
78 # our end. So, extra notices are confusing...
79 # self._send_pcunotice(host)
81 pflags.setRecentFlag('pcufailed')
85 # we've tried the pcu recently, but it didn't work,
86 # so did we send a message about it recently?
87 if not pflags.getRecentFlag('pcumessagesent'):
89 self._send_pcunotice(host)
91 pflags.setRecentFlag('pcumessagesent')
93 # NOTE: this will result in just one message sent at a time.
102 def mail(self, host):
104 # Reset every 4 weeks or so
105 pflags = PersistFlags(host, 27*60*60*24, db='mail_persistflags')
106 if not pflags.getRecentFlag('endrecord'):
107 node_end_record(host)
108 pflags.setRecentFlag('endrecord')
111 # Then in either case, run monitor.reboot()
112 self.action = "monitor.reboot('%s')" % host
114 return monitor.reboot(host)
116 print traceback.print_exc(); print e
119 class RebootDebug(Reboot):
121 def direct(self, host):
122 self.action = "bootman.reboot('%s', config, None)" % host
123 return bootman.reboot(host, config, None)
125 class RebootBoot(Reboot):
127 def direct(self, host):
128 self.action = "bootman.reboot('%s', config, 'reboot')" % host
129 return bootman.reboot(host, config, 'reboot')
131 class RebootDown(Reboot):
133 def direct(self, host):
135 return False # this always fails, since the node will be down.
137 def set_node_to_rins(host, fb):
139 node = api.GetNodes(host, ['boot_state', 'last_contact', 'last_updated', 'date_created'])
140 record = {'observation' : node[0],
141 'model' : 'USER_REQUEST',
142 'action' : 'api.UpdateNode(%s, {"boot_state" : "rins"})' % host,
143 'time' : time.time()}
144 l = Log(host, record)
146 ret = api.UpdateNode(host, {'boot_state' : 'rins'})
148 # it's nice to see the current status rather than the previous status on the console
149 node = api.GetNodes(host)[0]
151 print "%-2d" % (i-1), nodegroup_display(node, fb)
154 print "FAILED TO UPDATE NODE BOOT STATE : %s" % host
159 rebootlog = database.dbLoad("rebootlog")
161 rebootlog = LogRoll()
163 parser = parsermodule.getParser(['nodesets'])
164 parser.set_defaults( timewait=0,
177 parser.add_option("", "--stopselect", dest="stopselect", metavar="",
178 help="The select string that must evaluate to true for the node to be considered 'done'")
179 parser.add_option("", "--findbad", dest="findbad", action="store_true",
180 help="Re-run findbad on the nodes we're going to check before acting.")
181 parser.add_option("", "--force", dest="force", action="store_true",
182 help="Force action regardless of previous actions/logs.")
183 parser.add_option("", "--rins", dest="rins", action="store_true",
184 help="Set the boot_state to 'rins' for all nodes.")
185 parser.add_option("", "--reboot", dest="reboot", action="store_true",
186 help="Actively try to reboot the nodes, keeping a log of actions.")
188 parser.add_option("", "--verbose", dest="verbose", action="store_true",
189 help="Extra debug output messages.")
190 parser.add_option("", "--nosetup", dest="nosetup", action="store_true",
191 help="Do not perform the orginary setup phase.")
192 parser.add_option("", "--skip", dest="skip",
193 help="Number of machines to skip on the input queue.")
194 parser.add_option("", "--timewait", dest="timewait",
195 help="Minutes to wait between iterations of 10 nodes.")
197 parser = parsermodule.getParser(['defaults'], parser)
198 config = parsermodule.parse_args(parser)
200 # COLLECT nodegroups, nodes and node lists
202 ng = api.GetNodeGroups({'name' : config.nodegroup})
203 nodelist = api.GetNodes(ng[0]['node_ids'])
204 hostnames = [ n['hostname'] for n in nodelist ]
206 if config.node or config.nodelist:
207 if config.node: hostnames = [ config.node ]
208 else: hostnames = config.getListFromFile(config.nodelist)
210 if config.nodeselect:
211 hostnames = node_select(config.nodeselect)
214 # rerun findbad with the nodes in the given nodes.
216 util.file.setFileFromList(file, hostnames)
217 os.system("./findbad.py --cachenodes --debug=0 --dbname=findbad --increment --nodelist %s" % file)
219 fb = database.dbLoad("findbad")
223 for host in hostnames:
225 #if 'echo' in host or 'hptest-1' in host: continue
228 node = api.GetNodes(host)[0]
230 print traceback.print_exc();
231 print "FAILED GETNODES for host: %s" % host
234 print "%-2d" % i, nodegroup_display(node, fb)
236 if i < int(config.skip): continue
238 if config.stopselect:
239 dict_query = query_to_dict(config.stopselect)
240 fbnode = fb['nodes'][host]['values']
241 observed_state = get_current_state(fbnode)
243 if verify(dict_query, fbnode) and observed_state != "dbg ":
244 # evaluates to true, therefore skip.
245 print "%s evaluates true for %s ; skipping..." % ( config.stopselect, host )
248 if config.stopkey and config.stopvalue:
249 fbnode = fb['nodes'][host]['values']
250 observed_state = get_current_state(fbnode)
252 if config.stopkey in fbnode:
253 if config.stopvalue in fbnode[config.stopkey] and observed_state != "dbg ":
254 print "%s has stopvalue; skipping..." % host
257 print "stopkey %s not in fbnode record for %s; skipping..." % (config.stopkey, host)
261 if not config.force and rebootlog.find(host, {'action' : ".*reboot"}, 60*60*2):
262 print "recently rebooted %s. skipping... " % host
267 fbnode = fb['nodes'][host]['values']
268 observed_state = get_current_state(fbnode)
270 if observed_state == "dbg ":
271 o = RebootDebug(fbnode)
273 elif observed_state == "boot" :
275 l = set_node_to_rins(host, fb)
276 if l: rebootlog.add(l)
278 o = RebootBoot(fbnode)
280 elif observed_state == "down":
282 l = set_node_to_rins(host, fb)
283 if l: rebootlog.add(l)
285 o = RebootDown(fbnode)
289 record = {'observation' : "DIRECT_SUCCESS: %s" % observed_state,
292 'time' : time.time()}
294 record = {'observation' : "PCU_SUCCESS: %s" % observed_state,
297 'time' : time.time()}
299 record = {'observation' : "MAIL_SUCCESS: %s" % observed_state,
302 'time' : time.time()}
304 record = {'observation' : "REBOOT_FAILED: %s" % observed_state,
305 'action' : "log failure",
307 'time' : time.time()}
309 print "ALL METHODS OF RESTARTING %s FAILED" % host
311 args['hostname'] = host
312 m = PersistMessage(host, "ALL METHODS FAILED for %(hostname)s" % args,
313 "CANNOT CONTACT", False, db='suspect_persistmessages')
315 m.send(['monitor-list@lists.planet-lab.org'])
317 l = Log(host, record)
320 except KeyboardInterrupt:
321 print "Killed by interrupt"
324 print traceback.print_exc();
325 print "Continuing..."
329 print "Saving rebootlog"
330 database.dbDump("rebootlog", rebootlog)
331 wait_time = int(config.timewait)
332 print "Sleeping %d minutes" % wait_time
334 print "Minutes slept: ",
336 while ti < wait_time:
344 print "Saving rebootlog"
345 database.dbDump("rebootlog", rebootlog)