3 # Attempt to reboot a node in debug state.
7 api = plc.PLC(auth.auth, auth.plc)
13 from getsshkeys import SSHKnownHosts
21 import ssh.pxssh as pxssh
22 import ssh.fdpexpect as fdpexpect
23 import ssh.pexpect as pexpect
24 from unified_model import *
25 from emailTxt import mailtxt
30 class Sopen(subprocess.Popen):
31 def kill(self, signal = signal.SIGTERM):
32 os.kill(self.pid, signal)
34 #from Rpyc import SocketConnection, Async
35 from Rpyc import SocketConnection, Async
36 from Rpyc.Utils import *
39 fb = database.dbLoad("findbad")
40 fbnode = fb['nodes'][node]['values']
44 def __init__(self, connection, node, config):
49 def get_boot_state(self):
50 if self.c.modules.os.path.exists('/tmp/source'):
52 elif self.c.modules.os.path.exists('/vservers'):
58 self.c.modules.os.system("dmesg > /var/log/dmesg.bm.log")
59 download(self.c, "/var/log/dmesg.bm.log", "log/dmesg.%s.log" % self.node)
60 log = open("log/dmesg.%s.log" % self.node, 'r')
63 def get_bootmanager_log(self):
64 download(self.c, "/tmp/bm.log", "log/bm.%s.log.gz" % self.node)
65 os.system("zcat log/bm.%s.log.gz > log/bm.%s.log" % (self.node, self.node))
66 log = open("log/bm.%s.log" % self.node, 'r')
69 def dump_plconf_file(self):
71 self.c.modules.sys.path.append("/tmp/source/")
72 self.c.modules.os.chdir('/tmp/source')
74 log = c.modules.BootManager.log('/tmp/new.log')
75 bm = c.modules.BootManager.BootManager(log,'boot')
77 BootManagerException = c.modules.Exceptions.BootManagerException
78 InitializeBootManager = c.modules.BootManager.InitializeBootManager
79 ReadNodeConfiguration = c.modules.BootManager.ReadNodeConfiguration
82 InitializeBootManager.Run(bm.VARS, bm.LOG)
83 try: ReadNodeConfiguration.Run(bm.VARS, bm.LOG)
87 print " Possibly, unable to find valid configuration file"
89 if bm_continue and self.config and not self.config.quiet:
90 for key in bm.VARS.keys():
91 print key, " == ", bm.VARS[key]
93 if self.config and not self.config.quiet: print " Unable to read Node Configuration"
96 def compare_and_repair_nodekeys(self):
98 self.c.modules.sys.path.append("/tmp/source/")
99 self.c.modules.os.chdir('/tmp/source')
101 log = c.modules.BootManager.log('/tmp/new.log')
102 bm = c.modules.BootManager.BootManager(log,'boot')
104 BootManagerException = c.modules.Exceptions.BootManagerException
105 InitializeBootManager = c.modules.BootManager.InitializeBootManager
106 ReadNodeConfiguration = c.modules.BootManager.ReadNodeConfiguration
109 plcnode = api.GetNodes({'hostname': self.node}, None)[0]
111 InitializeBootManager.Run(bm.VARS, bm.LOG)
112 try: ReadNodeConfiguration.Run(bm.VARS, bm.LOG)
117 print " Possibly, unable to find valid configuration file"
120 print " NODE: %s" % bm.VARS['NODE_KEY']
121 print " PLC : %s" % plcnode['key']
123 if bm.VARS['NODE_KEY'] == plcnode['key']:
126 if api.UpdateNode(self.node, {'key': bm.VARS['NODE_KEY']}):
127 print " Successfully updated NODE_KEY with PLC"
132 #for key in bm.VARS.keys():
133 # print key, " == ", bm.VARS[key]
135 print " Unable to retrieve NODE_KEY"
137 def bootmanager_running(self):
138 if self.c.modules.os.path.exists('/tmp/BM_RUNNING'):
143 def set_nodestate(self, state='boot'):
144 return api.UpdateNode(self.node, {'boot_state' : state})
146 def restart_node(self, state='boot'):
147 api.UpdateNode(self.node, {'boot_state' : state})
149 pflags = PersistFlags(self.node, 1*60*60*24, db='restart_persistflags')
150 if not pflags.getRecentFlag('gentlekill'):
151 print " Killing all slice processes... : %s" % self.node
152 cmd_slicekill = "ls -d /proc/virtual/[0-9]* | awk -F '/' '{print $4}' | xargs -I{} /usr/sbin/vkill -s 9 --xid {} -- 0"
153 self.c.modules.os.system(cmd_slicekill)
154 cmd = """ shutdown -r +1 & """
155 print " Restarting %s : %s" % ( self.node, cmd)
156 self.c.modules.os.system(cmd)
158 pflags.setRecentFlag('gentlekill')
161 print " Restarting with sysrq 'sub' %s" % self.node
162 cmd = """ (sleep 5; echo 's' > /proc/sysrq-trigger; echo 'u' > /proc/sysrq-trigger; echo 'b' > /proc/sysrq-trigger ) & """
163 self.c.modules.os.system(cmd)
167 def restart_bootmanager(self, forceState):
169 self.c.modules.os.chdir('/tmp/source')
170 if self.c.modules.os.path.exists('/tmp/BM_RUNNING'):
171 print " BootManager is already running: try again soon..."
173 print " Starting 'BootManager.py %s' on %s " % (forceState, self.node)
174 cmd = "( touch /tmp/BM_RUNNING ; " + \
175 " python ./BootManager.py %s &> server.log < /dev/null ; " + \
176 " rm -f /tmp/BM_RUNNING " + \
178 cmd = cmd % forceState
179 self.c.modules.os.system(cmd)
185 class PlanetLabSession:
186 globalport = 22000 + int(random.random()*1000)
188 def __init__(self, node, nosetup, verbose):
189 self.verbose = verbose
192 self.nosetup = nosetup
196 def get_connection(self, config):
197 return NodeConnection(SocketConnection("localhost", self.port), self.node, config)
199 def setup_host(self):
200 self.port = PlanetLabSession.globalport
201 PlanetLabSession.globalport = PlanetLabSession.globalport + 1
204 args['port'] = self.port
205 args['user'] = 'root'
206 args['hostname'] = self.node
207 args['monitordir'] = monitorconfig.MONITOR_SCRIPT_ROOT
211 print "Skipping setup"
214 # COPY Rpyc files to host
215 cmd = "rsync -qv -az -e ssh %(monitordir)s/Rpyc/ %(user)s@%(hostname)s:Rpyc 2> /dev/null" % args
216 if self.verbose: print cmd
219 localos = moncommands.CMD()
221 ret = localos.system(cmd, timeout)
224 print "\tUNKNOWN SSH KEY FOR %s; making an exception" % self.node
225 #print "MAKE EXPLICIT EXCEPTION FOR %s" % self.node
226 k = SSHKnownHosts(); k.updateDirect(self.node); k.write(); del k
227 ret = localos.system(cmd, timeout)
230 print "\tFAILED TWICE"
232 raise Exception("Failed twice trying to login with updated ssh host key")
235 # KILL any already running servers.
236 ssh = moncommands.SSH(args['user'], args['hostname'], ssh_port)
237 (ov,ev) = ssh.run_noexcept2("""<<\EOF
239 echo "kill server" >> out.log
240 ps ax | grep Rpyc | grep -v grep | awk '{print $1}' | xargs kill 2> /dev/null ;
241 echo "export" >> out.log
242 export PYTHONPATH=$HOME ;
243 echo "start server" >> out.log
244 python Rpyc/Servers/forking_server.py &> server.log &
245 echo "done" >> out.log
247 #cmd = """ssh %(user)s@%(hostname)s """ + \
248 # """'ps ax | grep Rpyc | grep -v grep | awk "{print \$1}" | xargs kill 2> /dev/null' """
250 #if self.verbose: print cmd
252 #print localos.system(cmd,timeout)
254 ## START a new rpyc server.
255 #cmd = """ssh -n %(user)s@%(hostname)s "export PYTHONPATH=\$HOME; """ + \
256 # """python Rpyc/Servers/forking_server.py &> server.log < /dev/null &" """
258 #if self.verbose: print cmd
259 #print localos.system(cmd,timeout)
263 # This was tricky to make synchronous. The combination of ssh-clients-4.7p1,
264 # and the following options seems to work well.
265 cmd = """ssh -o ExitOnForwardFailure=yes -o BatchMode=yes """ + \
266 """-o PermitLocalCommand=yes -o LocalCommand='echo "READY"' """ + \
267 """-o ConnectTimeout=120 """ + \
268 """-n -N -L %(port)s:localhost:18812 """ + \
269 """%(user)s@%(hostname)s"""
271 if self.verbose: print cmd
272 self.command = Sopen(cmd, shell=True, stdout=subprocess.PIPE)
273 # TODO: the read() here may block indefinitely. Need a better
274 # approach therefore, that includes a timeout.
275 #ret = self.command.stdout.read(5)
276 ret = moncommands.read_t(self.command.stdout, 5)
280 # NOTE: There is still a slight race for machines that are slow...
281 self.timeout = 2*(t2-t1)
282 print "Sleeping for %s sec" % self.timeout
283 time.sleep(self.timeout)
286 if self.command.returncode is not None:
287 print "Failed to establish tunnel!"
288 raise Exception("SSH Tunnel exception : %s %s" % (self.node, self.command.returncode))
290 raise Exception("Unknown SSH Tunnel Exception: still running, but did not report 'READY'")
294 if self.verbose: print "Killing SSH session %s" % self.port
298 def steps_to_list(steps):
300 for (id,label) in steps:
301 ret_list.append(label)
304 def index_to_id(steps,index):
305 if index < len(steps):
306 return steps[index][0]
310 def reboot(hostname, config=None, forced_action=None):
312 # NOTE: Nothing works if the bootcd is REALLY old.
313 # So, this is the first step.
314 fbnode = get_fbnode(hostname)
315 if fbnode['category'] == "OLDBOOTCD":
316 print "...NOTIFY OWNER TO UPDATE BOOTCD!!!"
318 args['hostname_list'] = " %s" % hostname
320 m = PersistMessage(hostname, "Please Update Boot Image for %s" % hostname,
321 mailtxt.newbootcd_one[1] % args, True, db='bootcd_persistmessages')
323 loginbase = plc.siteId(hostname)
324 m.send([policy.PIEMAIL % loginbase, policy.TECHEMAIL % loginbase])
326 print "\tDisabling %s due to out-of-date BOOTCD" % hostname
327 api.UpdateNode(hostname, {'boot_state' : 'disable'})
331 print "Creating session for %s" % node
332 # update known_hosts file (in case the node has rebooted since last run)
333 if config and not config.quiet: print "...updating known_hosts ssh-rsa key for %s" % node
335 k = SSHKnownHosts(); k.update(node); k.write(); del k
337 import traceback; print traceback.print_exc()
342 session = PlanetLabSession(node, False, True)
344 session = PlanetLabSession(node, config.nosetup, config.verbose)
346 print "ERROR setting up session for %s" % hostname
347 import traceback; print traceback.print_exc()
352 conn = session.get_connection(config)
354 # NOTE: sometimes the wait in setup_host() is not long enough.
355 # So, here we try to wait a little longer before giving up entirely.
357 time.sleep(session.timeout*4)
358 conn = session.get_connection(config)
360 import traceback; print traceback.print_exc()
364 if forced_action == "reboot":
365 conn.restart_node('rins')
368 boot_state = conn.get_boot_state()
369 if boot_state == "boot":
370 print "...Boot state of %s already completed : skipping..." % node
372 elif boot_state == "unknown":
373 print "...Unknown bootstate for %s : skipping..."% node
378 if conn.bootmanager_running():
379 print "...BootManager is currently running. Skipping host %s" % node
384 # conn.restart_bootmanager(config.force)
387 # Read persistent flags, tagged on one week intervals.
388 pflags = PersistFlags(hostname, 3*60*60*24, db='debug_persistflags')
391 if config and not config.quiet: print "...downloading dmesg from %s" % node
392 dmesg = conn.get_dmesg()
393 child = fdpexpect.fdspawn(dmesg)
398 ('scsierror' , 'SCSI error : <\d+ \d+ \d+ \d+> return code = 0x\d+'),
399 ('ioerror' , 'end_request: I/O error, dev sd\w+, sector \d+'),
400 ('ccisserror' , 'cciss: cmd \w+ has CHECK CONDITION byte \w+ = \w+'),
402 ('buffererror', 'Buffer I/O error on device dm-\d, logical block \d+'),
403 ('atareadyerror' , 'ata\d+: status=0x\d+ { DriveReady SeekComplete Error }'),
404 ('atacorrecterror' , 'ata\d+: error=0x\d+ { UncorrectableError }'),
405 ('sdXerror' , 'sd\w: Current: sense key: Medium Error'),
406 ('ext3error' , 'EXT3-fs error (device dm-\d+): ext3_find_entry: reading directory #\d+ offset \d+'),
407 ('floppytimeout','floppy0: floppy timeout called'),
408 ('floppyerror', 'end_request: I/O error, dev fd\w+, sector \d+'),
410 # floppy0: floppy timeout called
411 # end_request: I/O error, dev fd0, sector 0
413 #Buffer I/O error on device dm-2, logical block 8888896
414 #ata1: status=0x51 { DriveReady SeekComplete Error }
415 #ata1: error=0x40 { UncorrectableError }
416 #SCSI error : <0 0 0 0> return code = 0x8000002
417 #sda: Current: sense key: Medium Error
418 # Additional sense: Unrecovered read error - auto reallocate failed
420 #SCSI error : <0 2 0 0> return code = 0x40001
421 #end_request: I/O error, dev sda, sector 572489600
423 id = index_to_id(steps, child.expect( steps_to_list(steps) + [ pexpect.EOF ]))
430 if config and not config.quiet: print "\tSET: ", s
433 print "...Potential drive errors on %s" % node
434 if len(s) == 2 and 'floppyerror' in s:
435 print "...Should investigate. Continuing with node."
437 print "...Should investigate. Skipping node."
438 # TODO: send message related to these errors.
440 args['hostname'] = hostname
441 args['log'] = conn.get_dmesg().read()
443 m = PersistMessage(hostname, mailtxt.baddisk[0] % args,
444 mailtxt.baddisk[1] % args, True, db='hardware_persistmessages')
446 loginbase = plc.siteId(hostname)
447 m.send([policy.PIEMAIL % loginbase, policy.TECHEMAIL % loginbase])
448 conn.set_nodestate('diag')
451 print "...Downloading bm.log from %s" % node
452 log = conn.get_bootmanager_log()
453 child = fdpexpect.fdspawn(log)
456 if config.collect: return True
462 if config and not config.quiet: print "...Scanning bm.log for errors"
468 ('bminit' , 'Initializing the BootManager.'),
469 ('cfg' , 'Reading node configuration file.'),
470 ('auth' , 'Authenticating node with PLC.'),
471 ('getplc' , 'Retrieving details of node from PLC.'),
472 ('update' , 'Updating node boot state at PLC.'),
473 ('hardware' , 'Checking if hardware requirements met.'),
474 ('installinit' , 'Install: Initializing.'),
475 ('installdisk' , 'Install: partitioning disks.'),
476 ('installbootfs', 'Install: bootstrapfs tarball.'),
477 ('installcfg' , 'Install: Writing configuration files.'),
478 ('installstop' , 'Install: Shutting down installer.'),
479 ('update2' , 'Updating node boot state at PLC.'),
480 ('installinit2' , 'Install: Initializing.'),
481 ('validate' , 'Validating node installation.'),
482 ('rebuildinitrd', 'Rebuilding initrd'),
483 ('netcfg' , 'Install: Writing Network Configuration files.'),
484 ('update3' , 'Updating node configuration.'),
485 ('disk' , 'Checking for unused disks to add to LVM.'),
486 ('update4' , 'Sending hardware configuration to PLC.'),
487 ('debug' , 'Starting debug mode'),
488 ('bmexceptmount', 'BootManagerException during mount'),
489 ('bmexceptvgscan', 'BootManagerException during vgscan/vgchange'),
490 ('bmexceptrmfail', 'Unable to remove directory tree: /tmp/mnt'),
491 ('exception' , 'Exception'),
492 ('nocfg' , 'Found configuration file planet.cnf on floppy, but was unable to parse it.'),
493 ('protoerror' , 'XML RPC protocol error'),
494 ('nodehostname' , 'Configured node hostname does not resolve'),
495 ('implementerror', 'Implementation Error'),
496 ('readonlyfs' , '[Errno 30] Read-only file system'),
497 ('noinstall' , 'notinstalled'),
498 ('bziperror' , 'bzip2: Data integrity error when decompressing.'),
499 ('noblockdev' , "No block devices detected."),
500 ('downloadfail' , 'Unable to download main tarball /boot/bootstrapfs-planetlab-i386.tar.bz2 from server.'),
501 ('disktoosmall' , 'The total usable disk size of all disks is insufficient to be usable as a PlanetLab node.'),
502 ('hardwarerequirefail' , 'Hardware requirements not met'),
503 ('mkfsfail' , 'while running: Running mkfs.ext2 -q -m 0 -j /dev/planetlab/vservers failed'),
504 ('nofilereference', "No such file or directory: '/tmp/mnt/sysimg//vservers/.vref/planetlab-f8-i386/etc/hosts'"),
505 ('chrootfail' , 'Running chroot /tmp/mnt/sysimg'),
506 ('modulefail' , 'Unable to get list of system modules'),
507 ('writeerror' , 'write error: No space left on device'),
508 ('nospace' , "No space left on device"),
509 ('nonode' , 'Failed to authenticate call: No such node'),
510 ('authfail' , 'Failed to authenticate call: Call could not be authenticated'),
511 ('bootcheckfail' , 'BootCheckAuthentication'),
512 ('bootupdatefail' , 'BootUpdateNode'),
514 list = steps_to_list(steps)
515 index = child.expect( list + [ pexpect.EOF ])
516 id = index_to_id(steps,index)
519 if id == "exception":
520 if config and not config.quiet: print "...Found An Exception!!!"
521 elif index == len(list):
525 s = "-".join(sequence)
526 print " FOUND SEQUENCE: ", s
528 # NOTE: We get or set the flag based on the current sequence identifier.
529 # By using the sequence identifier, we guarantee that there will be no
530 # frequent loops. I'm guessing there is a better way to track loops,
532 if not config.force and pflags.getRecentFlag(s):
533 pflags.setRecentFlag(s)
535 print "... flag is set or it has already run recently. Skipping %s" % node
541 # restart_bootmanager_boot
542 for n in ["bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-update3-disk-update4-done",
543 "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-disk-update4-update3-exception-protoerror-update-protoerror-debug-done",
544 "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-disk-update4-update3-update3-implementerror-bootupdatefail-update-debug-done",
545 "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-disk-update4-update3-exception-protoerror-update-debug-done",
546 "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-disk-update4-update3-exception-chrootfail-update-debug-done",
547 "bminit-cfg-auth-getplc-update-debug-done",
548 "bminit-cfg-auth-getplc-exception-protoerror-update-protoerror-debug-done",
549 "bminit-cfg-auth-protoerror-exception-update-protoerror-debug-done",
550 "bminit-cfg-auth-protoerror-exception-update-bootupdatefail-authfail-debug-done",
551 "bminit-cfg-auth-protoerror-exception-update-debug-done",
552 "bminit-cfg-auth-getplc-implementerror-update-debug-done",
554 sequences.update({n : "restart_bootmanager_boot"})
556 # conn.restart_bootmanager('rins')
557 for n in [ "bminit-cfg-auth-getplc-installinit-validate-exception-modulefail-update-debug-done",
558 "bminit-cfg-auth-getplc-update-installinit-validate-exception-modulefail-update-debug-done",
559 "bminit-cfg-auth-getplc-installinit-validate-bmexceptmount-exception-noinstall-update-debug-done",
560 "bminit-cfg-auth-getplc-update-installinit-validate-bmexceptmount-exception-noinstall-update-debug-done",
561 "bminit-cfg-auth-getplc-installinit-validate-bmexceptvgscan-exception-noinstall-update-debug-done",
562 "bminit-cfg-auth-getplc-update-installinit-validate-exception-noinstall-update-debug-done",
563 "bminit-cfg-auth-getplc-hardware-installinit-installdisk-bziperror-exception-update-debug-done",
564 "bminit-cfg-auth-getplc-update-hardware-installinit-installdisk-installbootfs-exception-update-debug-done",
565 "bminit-cfg-auth-getplc-update-installinit-validate-bmexceptvgscan-exception-noinstall-update-debug-done",
566 "bminit-cfg-auth-getplc-hardware-installinit-installdisk-installbootfs-exception-update-debug-done",
567 "bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-nofilereference-update-debug-done",
568 "bminit-cfg-auth-getplc-update-hardware-installinit-installdisk-exception-mkfsfail-update-debug-done",
569 "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-exception-chrootfail-update-debug-done",
570 "bminit-cfg-auth-getplc-installinit-validate-exception-noinstall-update-debug-done",
572 sequences.update({n : "restart_bootmanager_rins"})
575 sequences.update({"bminit-cfg-auth-bootcheckfail-authfail-exception-update-bootupdatefail-authfail-debug-done": "repair_node_keys"})
577 # conn.restart_node('rins')
578 for n in ["bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-exception-chrootfail-update-debug-done",
579 "bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-update3-disk-update4-exception-chrootfail-update-debug-done",
580 "bminit-cfg-auth-getplc-hardware-installinit-installdisk-installbootfs-installcfg-exception-chrootfail-update-debug-done",
581 "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-writeerror-exception-chrootfail-update-debug-done",
582 "bminit-cfg-auth-getplc-update-hardware-installinit-exception-bmexceptrmfail-update-debug-done",
583 "bminit-cfg-auth-getplc-hardware-installinit-exception-bmexceptrmfail-update-debug-done",
584 "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-disk-update4-update3-implementerror-bootupdatefail-update-debug-done",
585 "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-readonlyfs-update-debug-done",
586 "bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-update3-nospace-exception-update-debug-done",
587 "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-nospace-update-debug-done",
588 "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-update-debug-done",
589 "bminit-cfg-auth-getplc-update-hardware-installinit-installdisk-installbootfs-exception-downloadfail-update-debug-done",
591 sequences.update({n : "restart_node_rins"})
594 for n in ["bminit-cfg-auth-getplc-implementerror-bootupdatefail-update-debug-done",
595 "bminit-cfg-auth-implementerror-bootcheckfail-update-debug-done",
596 "bminit-cfg-auth-implementerror-bootcheckfail-update-implementerror-bootupdatefail-done",
597 "bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-nospace-update-debug-done",
598 "bminit-cfg-auth-getplc-hardware-installinit-installdisk-installbootfs-exception-downloadfail-update-debug-done",
600 sequences.update({n: "restart_node_boot"})
602 # update_node_config_email
603 for n in ["bminit-cfg-exception-nocfg-update-bootupdatefail-nonode-debug-done",
604 "bminit-cfg-exception-update-bootupdatefail-nonode-debug-done",
606 sequences.update({n : "update_node_config_email"})
608 for n in [ "bminit-cfg-exception-nodehostname-update-debug-done", ]:
609 sequences.update({n : "nodenetwork_email"})
611 # update_bootcd_email
612 for n in ["bminit-cfg-auth-getplc-update-hardware-exception-noblockdev-hardwarerequirefail-update-debug-done",
613 "bminit-cfg-auth-getplc-hardware-exception-noblockdev-hardwarerequirefail-update-debug-done",
614 "bminit-cfg-auth-getplc-update-hardware-noblockdev-exception-hardwarerequirefail-update-debug-done",
615 "bminit-cfg-auth-getplc-hardware-noblockdev-exception-hardwarerequirefail-update-debug-done",
616 "bminit-cfg-auth-getplc-hardware-exception-hardwarerequirefail-update-debug-done",
618 sequences.update({n : "update_bootcd_email"})
620 for n in [ "bminit-cfg-auth-getplc-installinit-validate-rebuildinitrd-netcfg-update3-implementerror-nofilereference-update-debug-done",
622 sequences.update({n: "suspect_error_email"})
624 # update_hardware_email
625 sequences.update({"bminit-cfg-auth-getplc-hardware-exception-disktoosmall-hardwarerequirefail-update-debug-done" : "update_hardware_email"})
626 sequences.update({"bminit-cfg-auth-getplc-hardware-disktoosmall-exception-hardwarerequirefail-update-debug-done" : "update_hardware_email"})
628 # broken_hardware_email
629 sequences.update({"bminit-cfg-auth-getplc-update-hardware-exception-hardwarerequirefail-update-debug-done" : "broken_hardware_email"})
634 if s not in sequences:
635 print " HOST %s" % hostname
636 print " UNKNOWN SEQUENCE: %s" % s
639 args['hostname'] = hostname
641 args['bmlog'] = conn.get_bootmanager_log().read()
642 m = PersistMessage(hostname, mailtxt.unknownsequence[0] % args,
643 mailtxt.unknownsequence[1] % args, False, db='unknown_persistmessages')
645 m.send(['monitor-list@lists.planet-lab.org'])
647 conn.restart_bootmanager('boot')
649 # NOTE: Do not set the pflags value for this sequence if it's unknown.
650 # This way, we can check it again after we've fixed it.
655 if sequences[s] == "restart_bootmanager_boot":
656 if config and not config.quiet: print "...Restarting BootManager.py on %s "% node
657 conn.restart_bootmanager('boot')
658 elif sequences[s] == "restart_bootmanager_rins":
659 if config and not config.quiet: print "...Restarting BootManager.py on %s "% node
660 conn.restart_bootmanager('rins')
661 elif sequences[s] == "restart_node_rins":
662 conn.restart_node('rins')
663 elif sequences[s] == "restart_node_boot":
664 conn.restart_node('boot')
665 elif sequences[s] == "repair_node_keys":
666 if conn.compare_and_repair_nodekeys():
667 # the keys either are in sync or were forced in sync.
668 # so try to reboot the node again.
669 conn.restart_bootmanager('rins')
672 # there was some failure to synchronize the keys.
673 print "...Unable to repair node keys on %s" % node
675 elif sequences[s] == "suspect_error_email":
677 args['hostname'] = hostname
679 args['bmlog'] = conn.get_bootmanager_log().read()
680 m = PersistMessage(hostname, "Suspicous error from BootManager on %s" % args,
681 mailtxt.unknownsequence[1] % args, False, db='suspect_persistmessages')
683 m.send(['monitor-list@lists.planet-lab.org'])
685 conn.restart_bootmanager('boot')
687 elif sequences[s] == "update_node_config_email":
688 print "...Sending message to UPDATE NODE CONFIG"
690 args['hostname'] = hostname
691 m = PersistMessage(hostname, mailtxt.plnode_cfg[0] % args, mailtxt.plnode_cfg[1] % args,
692 True, db='nodeid_persistmessages')
693 loginbase = plc.siteId(hostname)
694 m.send([policy.PIEMAIL % loginbase, policy.TECHEMAIL % loginbase])
695 conn.dump_plconf_file()
696 conn.set_nodestate('diag')
698 elif sequences[s] == "nodenetwork_email":
699 print "...Sending message to LOOK AT NODE NETWORK"
701 args['hostname'] = hostname
702 args['bmlog'] = conn.get_bootmanager_log().read()
703 m = PersistMessage(hostname, mailtxt.plnode_network[0] % args, mailtxt.plnode_cfg[1] % args,
704 True, db='nodenet_persistmessages')
705 loginbase = plc.siteId(hostname)
706 m.send([policy.PIEMAIL % loginbase, policy.TECHEMAIL % loginbase])
707 conn.dump_plconf_file()
708 conn.set_nodestate('diag')
710 elif sequences[s] == "update_bootcd_email":
711 print "...NOTIFY OWNER TO UPDATE BOOTCD!!!"
714 args.update(getconf.getconf(hostname)) # NOTE: Generates boot images for the user:
715 args['hostname_list'] = "%s" % hostname
717 m = PersistMessage(hostname, "Please Update Boot Image for %s" % hostname,
718 mailtxt.newalphacd_one[1] % args, True, db='bootcd_persistmessages')
720 loginbase = plc.siteId(hostname)
721 m.send([policy.PIEMAIL % loginbase, policy.TECHEMAIL % loginbase])
723 print "\tDisabling %s due to out-of-date BOOTCD" % hostname
724 conn.set_nodestate('disable')
726 elif sequences[s] == "broken_hardware_email":
727 # MAKE An ACTION record that this host has failed hardware. May
728 # require either an exception "/minhw" or other manual intervention.
729 # Definitely need to send out some more EMAIL.
730 print "...NOTIFYING OWNERS OF BROKEN HARDWARE on %s!!!" % hostname
731 # TODO: email notice of broken hardware
733 args['hostname'] = hostname
734 args['log'] = conn.get_dmesg().read()
735 m = PersistMessage(hostname, mailtxt.baddisk[0] % args,
736 mailtxt.baddisk[1] % args, True, db='hardware_persistmessages')
738 loginbase = plc.siteId(hostname)
739 m.send([policy.PIEMAIL % loginbase, policy.TECHEMAIL % loginbase])
740 conn.set_nodestate('disable')
742 elif sequences[s] == "update_hardware_email":
743 print "...NOTIFYING OWNERS OF MINIMAL HARDWARE FAILURE on %s!!!" % hostname
745 args['hostname'] = hostname
746 args['bmlog'] = conn.get_bootmanager_log().read()
747 m = PersistMessage(hostname, mailtxt.minimalhardware[0] % args,
748 mailtxt.minimalhardware[1] % args, True, db='minhardware_persistmessages')
750 loginbase = plc.siteId(hostname)
751 m.send([policy.PIEMAIL % loginbase, policy.TECHEMAIL % loginbase])
752 conn.set_nodestate('disable')
755 pflags.setRecentFlag(s)
761 # MAIN -------------------------------------------------------------------
764 from config import config
765 from optparse import OptionParser
766 parser = OptionParser()
767 parser.set_defaults(node=None, nodelist=None, child=False, collect=False, nosetup=False, verbose=False, force=None, quiet=False)
768 parser.add_option("", "--child", dest="child", action="store_true",
769 help="This is the child mode of this process.")
770 parser.add_option("", "--force", dest="force", metavar="boot_state",
771 help="Force a boot state passed to BootManager.py.")
772 parser.add_option("", "--quiet", dest="quiet", action="store_true",
773 help="Extra quiet output messages.")
774 parser.add_option("", "--verbose", dest="verbose", action="store_true",
775 help="Extra debug output messages.")
776 parser.add_option("", "--collect", dest="collect", action="store_true",
777 help="No action, just collect dmesg, and bm.log")
778 parser.add_option("", "--nosetup", dest="nosetup", action="store_true",
779 help="Do not perform the orginary setup phase.")
780 parser.add_option("", "--node", dest="node", metavar="nodename.edu",
781 help="A single node name to try to bring out of debug mode.")
782 parser.add_option("", "--nodelist", dest="nodelist", metavar="nodelist.txt",
783 help="A list of nodes to bring out of debug mode.")
784 config = config(parser)
788 nodes = config.getListFromFile(config.nodelist)
790 nodes = [ config.node ]
798 if __name__ == "__main__":