3 # This script is used to manipulate the operational state of nodes in
4 # different node groups. These are basically set operations on nodes via the
7 # Take the ng name as an argument....
9 # * get a list of nodes in the given nodegroup.
10 # * set some or all in the set to rins.
12 # * do something else to them all.
17 api = plc.PLC(auth.auth, auth.plc)
21 from config import config as cfg
22 import config as configmodule
23 from optparse import OptionParser
25 from nodecommon import *
26 from nodequery import verify,query_to_dict,node_select
28 from unified_model import *
34 import bootman # debug nodes
35 import monitor # down nodes with pcu
36 import reboot # down nodes without pcu
37 from emailTxt import mailtxt
42 def __init__(self, fbnode):
45 def _send_pcunotice(self, host):
47 args['hostname'] = host
49 args['pcu_id'] = plc.getpcu(host)['pcu_id']
53 m = PersistMessage(host, mailtxt.pcudown_one[0] % args,
54 mailtxt.pcudown_one[1] % args, True, db='pcu_persistmessages')
56 loginbase = plc.siteId(host)
57 m.send([policy.TECHEMAIL % loginbase])
60 # TODO: It should be possible to diagnose the various conditions of
61 # the PCU here, and send different messages as appropriate.
62 if self.fbnode['pcu'] == "PCU":
63 self.action = "reboot.reboot('%s')" % host
65 pflags = PersistFlags(host, 2*60*60*24, db='pcu_persistflags')
66 if not pflags.getRecentFlag('pcutried'):
67 pflags.setRecentFlag('pcutried')
69 ret = reboot.reboot(host)
75 print traceback.print_exc(); print e
77 # NOTE: this failure could be an implementation issue on
78 # our end. So, extra notices are confusing...
79 # self._send_pcunotice(host)
81 pflags.setRecentFlag('pcufailed')
85 # we've tried the pcu recently, but it didn't work,
86 # so did we send a message about it recently?
87 if not pflags.getRecentFlag('pcumessagesent'):
89 self._send_pcunotice(host)
91 pflags.setRecentFlag('pcumessagesent')
93 # NOTE: this will result in just one message sent at a time.
102 def mail(self, host):
104 # Reset every 4 weeks or so
105 pflags = PersistFlags(host, 27*60*60*24, db='mail_persistflags')
106 if not pflags.getRecentFlag('endrecord'):
107 node_end_record(host)
108 pflags.setRecentFlag('endrecord')
111 # Then in either case, run monitor.reboot()
112 self.action = "monitor.reboot('%s')" % host
114 return monitor.reboot(host)
116 print traceback.print_exc(); print e
119 class RebootDebug(Reboot):
121 def direct(self, host):
122 self.action = "bootman.reboot('%s', config, None)" % host
123 return bootman.reboot(host, config, None)
125 class RebootBoot(Reboot):
127 def direct(self, host):
128 self.action = "bootman.reboot('%s', config, 'reboot')" % host
129 return bootman.reboot(host, config, 'reboot')
131 class RebootDown(Reboot):
133 def direct(self, host):
135 return False # this always fails, since the node will be down.
137 def set_node_to_rins(host, fb):
139 node = api.GetNodes(host, ['boot_state', 'last_contact', 'last_updated', 'date_created'])
140 record = {'observation' : node[0],
141 'model' : 'USER_REQUEST',
142 'action' : 'api.UpdateNode(%s, {"boot_state" : "rins"})' % host,
143 'time' : time.time()}
144 l = Log(host, record)
146 ret = api.UpdateNode(host, {'boot_state' : 'rins'})
148 # it's nice to see the current status rather than the previous status on the console
149 node = api.GetNodes(host)[0]
151 print "%-2d" % (i-1), nodegroup_display(node, fb)
154 print "FAILED TO UPDATE NODE BOOT STATE : %s" % host
159 rebootlog = database.dbLoad("rebootlog")
161 rebootlog = LogRoll()
163 parser = OptionParser()
164 parser.set_defaults(nodegroup=None,
180 parser.add_option("", "--node", dest="node", metavar="nodename.edu",
181 help="A single node name to add to the nodegroup")
182 parser.add_option("", "--nodelist", dest="nodelist", metavar="list.txt",
183 help="Use all nodes in the given file for operation.")
184 parser.add_option("", "--nodegroup", dest="nodegroup", metavar="NodegroupName",
185 help="Specify a nodegroup to perform actions on")
186 parser.add_option("", "--nodeselect", dest="nodeselect", metavar="querystring",
187 help="Specify a query to perform on findbad db")
189 parser.add_option("", "--verbose", dest="verbose", action="store_true",
190 help="Extra debug output messages.")
191 parser.add_option("", "--nosetup", dest="nosetup", action="store_true",
192 help="Do not perform the orginary setup phase.")
194 parser.add_option("", "--skip", dest="skip",
195 help="Number of machines to skip on the input queue.")
196 parser.add_option("", "--timewait", dest="timewait",
197 help="Minutes to wait between iterations of 10 nodes.")
199 parser.add_option("", "--stopselect", dest="stopselect", metavar="",
200 help="The select string that must evaluate to true for the node to be considered 'done'")
202 parser.add_option("", "--stopkey", dest="stopkey", metavar="",
204 parser.add_option("", "--stopvalue", dest="stopvalue", metavar="",
207 parser.add_option("", "--findbad", dest="findbad", action="store_true",
208 help="Re-run findbad on the nodes we're going to check before acting.")
209 parser.add_option("", "--force", dest="force", action="store_true",
210 help="Force action regardless of previous actions/logs.")
211 parser.add_option("", "--rins", dest="rins", action="store_true",
212 help="Set the boot_state to 'rins' for all nodes.")
213 parser.add_option("", "--reboot", dest="reboot", action="store_true",
214 help="Actively try to reboot the nodes, keeping a log of actions.")
215 #config = config(parser)
219 # COLLECT nodegroups, nodes and node lists
221 ng = api.GetNodeGroups({'name' : config.nodegroup})
222 nodelist = api.GetNodes(ng[0]['node_ids'])
223 hostnames = [ n['hostname'] for n in nodelist ]
225 if config.node or config.nodelist:
226 if config.node: hostnames = [ config.node ]
227 else: hostnames = config.getListFromFile(config.nodelist)
229 if config.nodeselect:
230 hostnames = node_select(config.nodeselect)
233 # rerun findbad with the nodes in the given nodes.
235 configmodule.setFileFromList(file, hostnames)
236 os.system("./findbad.py --cachenodes --debug=0 --dbname=findbad --increment --nodelist %s" % file)
238 fb = database.dbLoad("findbad")
242 for host in hostnames:
244 #if 'echo' in host or 'hptest-1' in host: continue
247 node = api.GetNodes(host)[0]
249 print traceback.print_exc();
250 print "FAILED GETNODES for host: %s" % host
253 print "%-2d" % i, nodegroup_display(node, fb)
255 if i < int(config.skip): continue
257 if config.stopselect:
258 dict_query = query_to_dict(config.stopselect)
259 fbnode = fb['nodes'][host]['values']
260 observed_state = get_current_state(fbnode)
262 if verify(dict_query, fbnode) and observed_state != "dbg ":
263 # evaluates to true, therefore skip.
264 print "%s evaluates true for %s ; skipping..." % ( config.stopselect, host )
267 if config.stopkey and config.stopvalue:
268 fbnode = fb['nodes'][host]['values']
269 observed_state = get_current_state(fbnode)
271 if config.stopkey in fbnode:
272 if config.stopvalue in fbnode[config.stopkey] and observed_state != "dbg ":
273 print "%s has stopvalue; skipping..." % host
276 print "stopkey %s not in fbnode record for %s; skipping..." % (config.stopkey, host)
280 if not config.force and rebootlog.find(host, {'action' : ".*reboot"}, 60*60*2):
281 print "recently rebooted %s. skipping... " % host
286 fbnode = fb['nodes'][host]['values']
287 observed_state = get_current_state(fbnode)
289 if observed_state == "dbg ":
290 o = RebootDebug(fbnode)
292 elif observed_state == "boot" :
294 l = set_node_to_rins(host, fb)
295 if l: rebootlog.add(l)
297 o = RebootBoot(fbnode)
299 elif observed_state == "down":
301 l = set_node_to_rins(host, fb)
302 if l: rebootlog.add(l)
304 o = RebootDown(fbnode)
308 record = {'observation' : "DIRECT_SUCCESS: %s" % observed_state,
311 'time' : time.time()}
313 record = {'observation' : "PCU_SUCCESS: %s" % observed_state,
316 'time' : time.time()}
318 record = {'observation' : "MAIL_SUCCESS: %s" % observed_state,
321 'time' : time.time()}
323 record = {'observation' : "REBOOT_FAILED: %s" % observed_state,
324 'action' : "log failure",
326 'time' : time.time()}
328 print "ALL METHODS OF RESTARTING %s FAILED" % host
330 args['hostname'] = host
331 m = PersistMessage(host, "ALL METHODS FAILED for %(hostname)s" % args,
332 "CANNOT CONTACT", False, db='suspect_persistmessages')
334 m.send(['monitor-list@lists.planet-lab.org'])
336 l = Log(host, record)
339 except KeyboardInterrupt:
340 print "Killed by interrupt"
343 print traceback.print_exc();
344 print "Continuing..."
348 print "Saving rebootlog"
349 database.dbDump("rebootlog", rebootlog)
350 wait_time = int(config.timewait)
351 print "Sleeping %d minutes" % wait_time
353 print "Minutes slept: ",
355 while ti < wait_time:
363 print "Saving rebootlog"
364 database.dbDump("rebootlog", rebootlog)